Merge remote-tracking branch 'es/7.x' into enrich-7.x
This commit is contained in:
commit
eb8e03bc8b
|
@ -18,10 +18,10 @@
|
|||
*/
|
||||
package org.elasticsearch.plugin.noop.action.bulk;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.action.StreamableResponseAction;
|
||||
import org.elasticsearch.action.bulk.BulkResponse;
|
||||
|
||||
public class NoopBulkAction extends Action<BulkResponse> {
|
||||
public class NoopBulkAction extends StreamableResponseAction<BulkResponse> {
|
||||
public static final String NAME = "mock:data/write/bulk";
|
||||
|
||||
public static final NoopBulkAction INSTANCE = new NoopBulkAction();
|
||||
|
|
|
@ -30,11 +30,6 @@ public class NoopSearchAction extends Action<SearchResponse> {
|
|||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SearchResponse newResponse() {
|
||||
throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Writeable.Reader<SearchResponse> getResponseReader() {
|
||||
return SearchResponse::new;
|
||||
|
|
|
@ -45,7 +45,6 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optiona
|
|||
public class DateHistogramGroupSource extends SingleGroupSource implements ToXContentObject {
|
||||
|
||||
private static final ParseField TIME_ZONE = new ParseField("time_zone");
|
||||
private static final ParseField FORMAT = new ParseField("format");
|
||||
|
||||
// From DateHistogramAggregationBuilder in core, transplanted and modified to a set
|
||||
// so we don't need to import a dependency on the class
|
||||
|
@ -195,8 +194,7 @@ public class DateHistogramGroupSource extends SingleGroupSource implements ToXCo
|
|||
}
|
||||
|
||||
ZoneId zoneId = (ZoneId) args[3];
|
||||
String format = (String) args[4];
|
||||
return new DateHistogramGroupSource(field, interval, format, zoneId);
|
||||
return new DateHistogramGroupSource(field, interval, zoneId);
|
||||
});
|
||||
|
||||
static {
|
||||
|
@ -212,8 +210,6 @@ public class DateHistogramGroupSource extends SingleGroupSource implements ToXCo
|
|||
return ZoneOffset.ofHours(p.intValue());
|
||||
}
|
||||
}, TIME_ZONE, ObjectParser.ValueType.LONG);
|
||||
|
||||
PARSER.declareString(optionalConstructorArg(), FORMAT);
|
||||
}
|
||||
|
||||
public static DateHistogramGroupSource fromXContent(final XContentParser parser) {
|
||||
|
@ -221,13 +217,11 @@ public class DateHistogramGroupSource extends SingleGroupSource implements ToXCo
|
|||
}
|
||||
|
||||
private final Interval interval;
|
||||
private final String format;
|
||||
private final ZoneId timeZone;
|
||||
|
||||
DateHistogramGroupSource(String field, Interval interval, String format, ZoneId timeZone) {
|
||||
DateHistogramGroupSource(String field, Interval interval, ZoneId timeZone) {
|
||||
super(field);
|
||||
this.interval = interval;
|
||||
this.format = format;
|
||||
this.timeZone = timeZone;
|
||||
}
|
||||
|
||||
|
@ -240,10 +234,6 @@ public class DateHistogramGroupSource extends SingleGroupSource implements ToXCo
|
|||
return interval;
|
||||
}
|
||||
|
||||
public String getFormat() {
|
||||
return format;
|
||||
}
|
||||
|
||||
public ZoneId getTimeZone() {
|
||||
return timeZone;
|
||||
}
|
||||
|
@ -258,9 +248,6 @@ public class DateHistogramGroupSource extends SingleGroupSource implements ToXCo
|
|||
if (timeZone != null) {
|
||||
builder.field(TIME_ZONE.getPreferredName(), timeZone.toString());
|
||||
}
|
||||
if (format != null) {
|
||||
builder.field(FORMAT.getPreferredName(), format);
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
@ -279,13 +266,12 @@ public class DateHistogramGroupSource extends SingleGroupSource implements ToXCo
|
|||
|
||||
return Objects.equals(this.field, that.field) &&
|
||||
Objects.equals(this.interval, that.interval) &&
|
||||
Objects.equals(this.timeZone, that.timeZone) &&
|
||||
Objects.equals(this.format, that.format);
|
||||
Objects.equals(this.timeZone, that.timeZone);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(field, interval, timeZone, format);
|
||||
return Objects.hash(field, interval, timeZone);
|
||||
}
|
||||
|
||||
public static Builder builder() {
|
||||
|
@ -296,7 +282,6 @@ public class DateHistogramGroupSource extends SingleGroupSource implements ToXCo
|
|||
|
||||
private String field;
|
||||
private Interval interval;
|
||||
private String format;
|
||||
private ZoneId timeZone;
|
||||
|
||||
/**
|
||||
|
@ -319,16 +304,6 @@ public class DateHistogramGroupSource extends SingleGroupSource implements ToXCo
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the optional String formatting for the time interval.
|
||||
* @param format The format of the output for the time interval key
|
||||
* @return The {@link Builder} with the format set.
|
||||
*/
|
||||
public Builder setFormat(String format) {
|
||||
this.format = format;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the time zone to use for this aggregation
|
||||
* @param timeZone The zoneId for the timeZone
|
||||
|
@ -340,7 +315,7 @@ public class DateHistogramGroupSource extends SingleGroupSource implements ToXCo
|
|||
}
|
||||
|
||||
public DateHistogramGroupSource build() {
|
||||
return new DateHistogramGroupSource(field, interval, format, timeZone);
|
||||
return new DateHistogramGroupSource(field, interval, timeZone);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.client.ml;
|
|||
import org.elasticsearch.client.Validatable;
|
||||
import org.elasticsearch.client.ValidationException;
|
||||
import org.elasticsearch.client.ml.dataframe.DataFrameAnalyticsConfig;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
|
@ -67,4 +68,9 @@ public class PutDataFrameAnalyticsRequest implements ToXContentObject, Validatab
|
|||
public int hashCode() {
|
||||
return Objects.hash(config);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return Strings.toString(this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,11 +19,14 @@
|
|||
|
||||
package org.elasticsearch.client.ml.dataframe;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.client.dataframe.transforms.util.TimeUtil;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
@ -31,11 +34,9 @@ import org.elasticsearch.common.xcontent.XContentParserUtils;
|
|||
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.time.Instant;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.ObjectParser.ValueType.OBJECT_ARRAY_BOOLEAN_OR_STRING;
|
||||
import static org.elasticsearch.common.xcontent.ObjectParser.ValueType.VALUE;
|
||||
|
||||
public class DataFrameAnalyticsConfig implements ToXContentObject {
|
||||
|
||||
public static DataFrameAnalyticsConfig fromXContent(XContentParser parser) {
|
||||
|
@ -52,6 +53,8 @@ public class DataFrameAnalyticsConfig implements ToXContentObject {
|
|||
private static final ParseField ANALYSIS = new ParseField("analysis");
|
||||
private static final ParseField ANALYZED_FIELDS = new ParseField("analyzed_fields");
|
||||
private static final ParseField MODEL_MEMORY_LIMIT = new ParseField("model_memory_limit");
|
||||
private static final ParseField CREATE_TIME = new ParseField("create_time");
|
||||
private static final ParseField VERSION = new ParseField("version");
|
||||
|
||||
private static ObjectParser<Builder, Void> PARSER = new ObjectParser<>("data_frame_analytics_config", true, Builder::new);
|
||||
|
||||
|
@ -63,9 +66,24 @@ public class DataFrameAnalyticsConfig implements ToXContentObject {
|
|||
PARSER.declareField(Builder::setAnalyzedFields,
|
||||
(p, c) -> FetchSourceContext.fromXContent(p),
|
||||
ANALYZED_FIELDS,
|
||||
OBJECT_ARRAY_BOOLEAN_OR_STRING);
|
||||
ValueType.OBJECT_ARRAY_BOOLEAN_OR_STRING);
|
||||
PARSER.declareField(Builder::setModelMemoryLimit,
|
||||
(p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MODEL_MEMORY_LIMIT.getPreferredName()), MODEL_MEMORY_LIMIT, VALUE);
|
||||
(p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MODEL_MEMORY_LIMIT.getPreferredName()),
|
||||
MODEL_MEMORY_LIMIT,
|
||||
ValueType.VALUE);
|
||||
PARSER.declareField(Builder::setCreateTime,
|
||||
p -> TimeUtil.parseTimeFieldToInstant(p, CREATE_TIME.getPreferredName()),
|
||||
CREATE_TIME,
|
||||
ValueType.VALUE);
|
||||
PARSER.declareField(Builder::setVersion,
|
||||
p -> {
|
||||
if (p.currentToken() == XContentParser.Token.VALUE_STRING) {
|
||||
return Version.fromString(p.text());
|
||||
}
|
||||
throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]");
|
||||
},
|
||||
VERSION,
|
||||
ValueType.STRING);
|
||||
}
|
||||
|
||||
private static DataFrameAnalysis parseAnalysis(XContentParser parser) throws IOException {
|
||||
|
@ -82,15 +100,20 @@ public class DataFrameAnalyticsConfig implements ToXContentObject {
|
|||
private final DataFrameAnalysis analysis;
|
||||
private final FetchSourceContext analyzedFields;
|
||||
private final ByteSizeValue modelMemoryLimit;
|
||||
private final Instant createTime;
|
||||
private final Version version;
|
||||
|
||||
private DataFrameAnalyticsConfig(String id, DataFrameAnalyticsSource source, DataFrameAnalyticsDest dest, DataFrameAnalysis analysis,
|
||||
@Nullable FetchSourceContext analyzedFields, @Nullable ByteSizeValue modelMemoryLimit) {
|
||||
@Nullable FetchSourceContext analyzedFields, @Nullable ByteSizeValue modelMemoryLimit,
|
||||
@Nullable Instant createTime, @Nullable Version version) {
|
||||
this.id = Objects.requireNonNull(id);
|
||||
this.source = Objects.requireNonNull(source);
|
||||
this.dest = Objects.requireNonNull(dest);
|
||||
this.analysis = Objects.requireNonNull(analysis);
|
||||
this.analyzedFields = analyzedFields;
|
||||
this.modelMemoryLimit = modelMemoryLimit;
|
||||
this.createTime = createTime == null ? null : Instant.ofEpochMilli(createTime.toEpochMilli());;
|
||||
this.version = version;
|
||||
}
|
||||
|
||||
public String getId() {
|
||||
|
@ -117,6 +140,14 @@ public class DataFrameAnalyticsConfig implements ToXContentObject {
|
|||
return modelMemoryLimit;
|
||||
}
|
||||
|
||||
public Instant getCreateTime() {
|
||||
return createTime;
|
||||
}
|
||||
|
||||
public Version getVersion() {
|
||||
return version;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
|
@ -132,6 +163,12 @@ public class DataFrameAnalyticsConfig implements ToXContentObject {
|
|||
if (modelMemoryLimit != null) {
|
||||
builder.field(MODEL_MEMORY_LIMIT.getPreferredName(), modelMemoryLimit.getStringRep());
|
||||
}
|
||||
if (createTime != null) {
|
||||
builder.timeField(CREATE_TIME.getPreferredName(), CREATE_TIME.getPreferredName() + "_string", createTime.toEpochMilli());
|
||||
}
|
||||
if (version != null) {
|
||||
builder.field(VERSION.getPreferredName(), version);
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
@ -147,12 +184,14 @@ public class DataFrameAnalyticsConfig implements ToXContentObject {
|
|||
&& Objects.equals(dest, other.dest)
|
||||
&& Objects.equals(analysis, other.analysis)
|
||||
&& Objects.equals(analyzedFields, other.analyzedFields)
|
||||
&& Objects.equals(modelMemoryLimit, other.modelMemoryLimit);
|
||||
&& Objects.equals(modelMemoryLimit, other.modelMemoryLimit)
|
||||
&& Objects.equals(createTime, other.createTime)
|
||||
&& Objects.equals(version, other.version);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(id, source, dest, analysis, analyzedFields, getModelMemoryLimit());
|
||||
return Objects.hash(id, source, dest, analysis, analyzedFields, modelMemoryLimit, createTime, version);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -168,6 +207,8 @@ public class DataFrameAnalyticsConfig implements ToXContentObject {
|
|||
private DataFrameAnalysis analysis;
|
||||
private FetchSourceContext analyzedFields;
|
||||
private ByteSizeValue modelMemoryLimit;
|
||||
private Instant createTime;
|
||||
private Version version;
|
||||
|
||||
private Builder() {}
|
||||
|
||||
|
@ -201,8 +242,18 @@ public class DataFrameAnalyticsConfig implements ToXContentObject {
|
|||
return this;
|
||||
}
|
||||
|
||||
public Builder setCreateTime(Instant createTime) {
|
||||
this.createTime = createTime;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setVersion(Version version) {
|
||||
this.version = version;
|
||||
return this;
|
||||
}
|
||||
|
||||
public DataFrameAnalyticsConfig build() {
|
||||
return new DataFrameAnalyticsConfig(id, source, dest, analysis, analyzedFields, modelMemoryLimit);
|
||||
return new DataFrameAnalyticsConfig(id, source, dest, analysis, analyzedFields, modelMemoryLimit, createTime, version);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,6 +28,8 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
public class DataFrameAnalyticsSource implements ToXContentObject {
|
||||
|
@ -46,19 +48,19 @@ public class DataFrameAnalyticsSource implements ToXContentObject {
|
|||
private static ObjectParser<Builder, Void> PARSER = new ObjectParser<>("data_frame_analytics_source", true, Builder::new);
|
||||
|
||||
static {
|
||||
PARSER.declareString(Builder::setIndex, INDEX);
|
||||
PARSER.declareStringArray(Builder::setIndex, INDEX);
|
||||
PARSER.declareObject(Builder::setQueryConfig, (p, c) -> QueryConfig.fromXContent(p), QUERY);
|
||||
}
|
||||
|
||||
private final String index;
|
||||
private final String[] index;
|
||||
private final QueryConfig queryConfig;
|
||||
|
||||
private DataFrameAnalyticsSource(String index, @Nullable QueryConfig queryConfig) {
|
||||
private DataFrameAnalyticsSource(String[] index, @Nullable QueryConfig queryConfig) {
|
||||
this.index = Objects.requireNonNull(index);
|
||||
this.queryConfig = queryConfig;
|
||||
}
|
||||
|
||||
public String getIndex() {
|
||||
public String[] getIndex() {
|
||||
return index;
|
||||
}
|
||||
|
||||
|
@ -83,13 +85,13 @@ public class DataFrameAnalyticsSource implements ToXContentObject {
|
|||
if (o == null || getClass() != o.getClass()) return false;
|
||||
|
||||
DataFrameAnalyticsSource other = (DataFrameAnalyticsSource) o;
|
||||
return Objects.equals(index, other.index)
|
||||
return Arrays.equals(index, other.index)
|
||||
&& Objects.equals(queryConfig, other.queryConfig);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(index, queryConfig);
|
||||
return Objects.hash(Arrays.asList(index), queryConfig);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -99,16 +101,21 @@ public class DataFrameAnalyticsSource implements ToXContentObject {
|
|||
|
||||
public static class Builder {
|
||||
|
||||
private String index;
|
||||
private String[] index;
|
||||
private QueryConfig queryConfig;
|
||||
|
||||
private Builder() {}
|
||||
|
||||
public Builder setIndex(String index) {
|
||||
public Builder setIndex(String... index) {
|
||||
this.index = index;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setIndex(List<String> index) {
|
||||
this.index = index.toArray(new String[0]);
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setQueryConfig(QueryConfig queryConfig) {
|
||||
this.queryConfig = queryConfig;
|
||||
return this;
|
||||
|
|
|
@ -47,8 +47,7 @@ public class OutlierDetection implements DataFrameAnalysis {
|
|||
public static final ParseField NAME = new ParseField("outlier_detection");
|
||||
static final ParseField N_NEIGHBORS = new ParseField("n_neighbors");
|
||||
static final ParseField METHOD = new ParseField("method");
|
||||
public static final ParseField MINIMUM_SCORE_TO_WRITE_FEATURE_INFLUENCE =
|
||||
new ParseField("minimum_score_to_write_feature_influence");
|
||||
public static final ParseField FEATURE_INFLUENCE_THRESHOLD = new ParseField("feature_influence_threshold");
|
||||
|
||||
private static ObjectParser<Builder, Void> PARSER = new ObjectParser<>(NAME.getPreferredName(), true, Builder::new);
|
||||
|
||||
|
@ -60,23 +59,23 @@ public class OutlierDetection implements DataFrameAnalysis {
|
|||
}
|
||||
throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]");
|
||||
}, METHOD, ObjectParser.ValueType.STRING);
|
||||
PARSER.declareDouble(Builder::setMinScoreToWriteFeatureInfluence, MINIMUM_SCORE_TO_WRITE_FEATURE_INFLUENCE);
|
||||
PARSER.declareDouble(Builder::setFeatureInfluenceThreshold, FEATURE_INFLUENCE_THRESHOLD);
|
||||
}
|
||||
|
||||
private final Integer nNeighbors;
|
||||
private final Method method;
|
||||
private final Double minScoreToWriteFeatureInfluence;
|
||||
private final Double featureInfluenceThreshold;
|
||||
|
||||
/**
|
||||
* Constructs the outlier detection configuration
|
||||
* @param nNeighbors The number of neighbors. Leave unspecified for dynamic detection.
|
||||
* @param method The method. Leave unspecified for a dynamic mixture of methods.
|
||||
* @param minScoreToWriteFeatureInfluence The min outlier score required to calculate feature influence. Defaults to 0.1.
|
||||
* @param featureInfluenceThreshold The min outlier score required to calculate feature influence. Defaults to 0.1.
|
||||
*/
|
||||
private OutlierDetection(@Nullable Integer nNeighbors, @Nullable Method method, @Nullable Double minScoreToWriteFeatureInfluence) {
|
||||
private OutlierDetection(@Nullable Integer nNeighbors, @Nullable Method method, @Nullable Double featureInfluenceThreshold) {
|
||||
this.nNeighbors = nNeighbors;
|
||||
this.method = method;
|
||||
this.minScoreToWriteFeatureInfluence = minScoreToWriteFeatureInfluence;
|
||||
this.featureInfluenceThreshold = featureInfluenceThreshold;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -92,8 +91,8 @@ public class OutlierDetection implements DataFrameAnalysis {
|
|||
return method;
|
||||
}
|
||||
|
||||
public Double getMinScoreToWriteFeatureInfluence() {
|
||||
return minScoreToWriteFeatureInfluence;
|
||||
public Double getFeatureInfluenceThreshold() {
|
||||
return featureInfluenceThreshold;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -105,8 +104,8 @@ public class OutlierDetection implements DataFrameAnalysis {
|
|||
if (method != null) {
|
||||
builder.field(METHOD.getPreferredName(), method);
|
||||
}
|
||||
if (minScoreToWriteFeatureInfluence != null) {
|
||||
builder.field(MINIMUM_SCORE_TO_WRITE_FEATURE_INFLUENCE.getPreferredName(), minScoreToWriteFeatureInfluence);
|
||||
if (featureInfluenceThreshold != null) {
|
||||
builder.field(FEATURE_INFLUENCE_THRESHOLD.getPreferredName(), featureInfluenceThreshold);
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
|
@ -120,12 +119,12 @@ public class OutlierDetection implements DataFrameAnalysis {
|
|||
OutlierDetection other = (OutlierDetection) o;
|
||||
return Objects.equals(nNeighbors, other.nNeighbors)
|
||||
&& Objects.equals(method, other.method)
|
||||
&& Objects.equals(minScoreToWriteFeatureInfluence, other.minScoreToWriteFeatureInfluence);
|
||||
&& Objects.equals(featureInfluenceThreshold, other.featureInfluenceThreshold);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(nNeighbors, method, minScoreToWriteFeatureInfluence);
|
||||
return Objects.hash(nNeighbors, method, featureInfluenceThreshold);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -150,7 +149,7 @@ public class OutlierDetection implements DataFrameAnalysis {
|
|||
|
||||
private Integer nNeighbors;
|
||||
private Method method;
|
||||
private Double minScoreToWriteFeatureInfluence;
|
||||
private Double featureInfluenceThreshold;
|
||||
|
||||
private Builder() {}
|
||||
|
||||
|
@ -164,13 +163,13 @@ public class OutlierDetection implements DataFrameAnalysis {
|
|||
return this;
|
||||
}
|
||||
|
||||
public Builder setMinScoreToWriteFeatureInfluence(Double minScoreToWriteFeatureInfluence) {
|
||||
this.minScoreToWriteFeatureInfluence = minScoreToWriteFeatureInfluence;
|
||||
public Builder setFeatureInfluenceThreshold(Double featureInfluenceThreshold) {
|
||||
this.featureInfluenceThreshold = featureInfluenceThreshold;
|
||||
return this;
|
||||
}
|
||||
|
||||
public OutlierDetection build() {
|
||||
return new OutlierDetection(nNeighbors, method, minScoreToWriteFeatureInfluence);
|
||||
return new OutlierDetection(nNeighbors, method, featureInfluenceThreshold);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -45,7 +45,7 @@ public abstract class AbstractResponseTestCase<S extends ToXContent, C> extends
|
|||
final S serverTestInstance = createServerTestInstance();
|
||||
|
||||
final XContentType xContentType = randomFrom(XContentType.values());
|
||||
final BytesReference bytes = toShuffledXContent(serverTestInstance, xContentType, ToXContent.EMPTY_PARAMS, randomBoolean());
|
||||
final BytesReference bytes = toShuffledXContent(serverTestInstance, xContentType, getParams(), randomBoolean());
|
||||
|
||||
final XContent xContent = XContentFactory.xContent(xContentType);
|
||||
final XContentParser parser = xContent.createParser(
|
||||
|
@ -62,4 +62,8 @@ public abstract class AbstractResponseTestCase<S extends ToXContent, C> extends
|
|||
|
||||
protected abstract void assertInstances(S serverTestInstance, C clientInstance);
|
||||
|
||||
protected ToXContent.Params getParams() {
|
||||
return ToXContent.EMPTY_PARAMS;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -258,8 +258,10 @@ public class DataFrameTransformIT extends ESRestHighLevelClientTestCase {
|
|||
GetDataFrameTransformStatsResponse statsResponse = execute(new GetDataFrameTransformStatsRequest(id),
|
||||
client::getDataFrameTransformStats, client::getDataFrameTransformStatsAsync);
|
||||
assertThat(statsResponse.getTransformsStateAndStats(), hasSize(1));
|
||||
IndexerState indexerState = statsResponse.getTransformsStateAndStats().get(0).getTransformState().getIndexerState();
|
||||
assertThat(indexerState, is(oneOf(IndexerState.STARTED, IndexerState.INDEXING)));
|
||||
DataFrameTransformTaskState taskState = statsResponse.getTransformsStateAndStats().get(0).getTransformState().getTaskState();
|
||||
|
||||
// Since we are non-continuous, the transform could auto-stop between being started earlier and us gathering the statistics
|
||||
assertThat(taskState, is(oneOf(DataFrameTransformTaskState.STARTED, DataFrameTransformTaskState.STOPPED)));
|
||||
|
||||
StopDataFrameTransformRequest stopRequest = new StopDataFrameTransformRequest(id, Boolean.TRUE, null);
|
||||
StopDataFrameTransformResponse stopResponse =
|
||||
|
@ -267,6 +269,12 @@ public class DataFrameTransformIT extends ESRestHighLevelClientTestCase {
|
|||
assertTrue(stopResponse.isAcknowledged());
|
||||
assertThat(stopResponse.getNodeFailures(), empty());
|
||||
assertThat(stopResponse.getTaskFailures(), empty());
|
||||
|
||||
// Calling stop with wait_for_completion assures that we will be in the `STOPPED` state for the transform task
|
||||
statsResponse = execute(new GetDataFrameTransformStatsRequest(id),
|
||||
client::getDataFrameTransformStats, client::getDataFrameTransformStatsAsync);
|
||||
taskState = statsResponse.getTransformsStateAndStats().get(0).getTransformState().getTaskState();
|
||||
assertThat(taskState, is(DataFrameTransformTaskState.STOPPED));
|
||||
}
|
||||
|
||||
public void testPreview() throws IOException {
|
||||
|
|
|
@ -730,8 +730,8 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
"indices.exists_type",
|
||||
"indices.get_upgrade",
|
||||
"indices.put_alias",
|
||||
"scripts_painless_execute",
|
||||
"render_search_template"
|
||||
"render_search_template",
|
||||
"scripts_painless_execute"
|
||||
};
|
||||
//These API are not required for high-level client feature completeness
|
||||
String[] notRequiredApi = new String[] {
|
||||
|
|
|
@ -39,7 +39,6 @@ public class DateHistogramGroupSourceTests extends AbstractXContentTestCase<Date
|
|||
String field = randomAlphaOfLengthBetween(1, 20);
|
||||
return new DateHistogramGroupSource(field,
|
||||
randomDateHistogramInterval(),
|
||||
randomBoolean() ? randomAlphaOfLength(10) : null,
|
||||
randomBoolean() ? randomZone() : null);
|
||||
}
|
||||
|
||||
|
|
|
@ -44,9 +44,6 @@ public class DateHistogramGroupSourceTests extends AbstractResponseTestCase<
|
|||
if (randomBoolean()) {
|
||||
dateHistogramGroupSource.setTimeZone(randomZone());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
dateHistogramGroupSource.setFormat(randomAlphaOfLength(10));
|
||||
}
|
||||
return dateHistogramGroupSource;
|
||||
}
|
||||
|
||||
|
@ -64,7 +61,6 @@ public class DateHistogramGroupSourceTests extends AbstractResponseTestCase<
|
|||
protected void assertInstances(DateHistogramGroupSource serverTestInstance,
|
||||
org.elasticsearch.client.dataframe.transforms.pivot.DateHistogramGroupSource clientInstance) {
|
||||
assertThat(serverTestInstance.getField(), equalTo(clientInstance.getField()));
|
||||
assertThat(serverTestInstance.getFormat(), equalTo(clientInstance.getFormat()));
|
||||
assertSameInterval(serverTestInstance.getInterval(), clientInstance.getInterval());
|
||||
assertThat(serverTestInstance.getTimeZone(), equalTo(clientInstance.getTimeZone()));
|
||||
assertThat(serverTestInstance.getType().name(), equalTo(clientInstance.getType().name()));
|
||||
|
|
|
@ -2802,7 +2802,7 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
|
||||
public void testGetDataFrameAnalytics() throws Exception {
|
||||
createIndex(DF_ANALYTICS_CONFIG.getSource().getIndex());
|
||||
createIndex(DF_ANALYTICS_CONFIG.getSource().getIndex()[0]);
|
||||
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
client.machineLearning().putDataFrameAnalytics(new PutDataFrameAnalyticsRequest(DF_ANALYTICS_CONFIG), RequestOptions.DEFAULT);
|
||||
|
@ -2851,7 +2851,7 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
|
||||
public void testGetDataFrameAnalyticsStats() throws Exception {
|
||||
createIndex(DF_ANALYTICS_CONFIG.getSource().getIndex());
|
||||
createIndex(DF_ANALYTICS_CONFIG.getSource().getIndex()[0]);
|
||||
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
client.machineLearning().putDataFrameAnalytics(new PutDataFrameAnalyticsRequest(DF_ANALYTICS_CONFIG), RequestOptions.DEFAULT);
|
||||
|
@ -2901,7 +2901,7 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
|
||||
public void testPutDataFrameAnalytics() throws Exception {
|
||||
createIndex(DF_ANALYTICS_CONFIG.getSource().getIndex());
|
||||
createIndex(DF_ANALYTICS_CONFIG.getSource().getIndex()[0]);
|
||||
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
|
@ -2994,7 +2994,7 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
|
||||
public void testDeleteDataFrameAnalytics() throws Exception {
|
||||
createIndex(DF_ANALYTICS_CONFIG.getSource().getIndex());
|
||||
createIndex(DF_ANALYTICS_CONFIG.getSource().getIndex()[0]);
|
||||
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
client.machineLearning().putDataFrameAnalytics(new PutDataFrameAnalyticsRequest(DF_ANALYTICS_CONFIG), RequestOptions.DEFAULT);
|
||||
|
@ -3044,9 +3044,9 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
|
||||
public void testStartDataFrameAnalytics() throws Exception {
|
||||
createIndex(DF_ANALYTICS_CONFIG.getSource().getIndex());
|
||||
createIndex(DF_ANALYTICS_CONFIG.getSource().getIndex()[0]);
|
||||
highLevelClient().index(
|
||||
new IndexRequest(DF_ANALYTICS_CONFIG.getSource().getIndex()).source(XContentType.JSON, "total", 10000)
|
||||
new IndexRequest(DF_ANALYTICS_CONFIG.getSource().getIndex()[0]).source(XContentType.JSON, "total", 10000)
|
||||
.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE), RequestOptions.DEFAULT);
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
client.machineLearning().putDataFrameAnalytics(new PutDataFrameAnalyticsRequest(DF_ANALYTICS_CONFIG), RequestOptions.DEFAULT);
|
||||
|
@ -3101,9 +3101,9 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
|
||||
public void testStopDataFrameAnalytics() throws Exception {
|
||||
createIndex(DF_ANALYTICS_CONFIG.getSource().getIndex());
|
||||
createIndex(DF_ANALYTICS_CONFIG.getSource().getIndex()[0]);
|
||||
highLevelClient().index(
|
||||
new IndexRequest(DF_ANALYTICS_CONFIG.getSource().getIndex()).source(XContentType.JSON, "total", 10000)
|
||||
new IndexRequest(DF_ANALYTICS_CONFIG.getSource().getIndex()[0]).source(XContentType.JSON, "total", 10000)
|
||||
.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE), RequestOptions.DEFAULT);
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
client.machineLearning().putDataFrameAnalytics(new PutDataFrameAnalyticsRequest(DF_ANALYTICS_CONFIG), RequestOptions.DEFAULT);
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.client.ml.dataframe;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
|
@ -29,6 +30,7 @@ import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
|
|||
import org.elasticsearch.test.AbstractXContentTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.time.Instant;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
@ -54,6 +56,12 @@ public class DataFrameAnalyticsConfigTests extends AbstractXContentTestCase<Data
|
|||
if (randomBoolean()) {
|
||||
builder.setModelMemoryLimit(new ByteSizeValue(randomIntBetween(1, 16), randomFrom(ByteSizeUnit.MB, ByteSizeUnit.GB)));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.setCreateTime(Instant.now());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.setVersion(Version.CURRENT);
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@ public class DataFrameAnalyticsSourceTests extends AbstractXContentTestCase<Data
|
|||
|
||||
public static DataFrameAnalyticsSource randomSourceConfig() {
|
||||
return DataFrameAnalyticsSource.builder()
|
||||
.setIndex(randomAlphaOfLengthBetween(1, 10))
|
||||
.setIndex(generateRandomStringArray(10, 10, false, false))
|
||||
.setQueryConfig(randomBoolean() ? null : randomQueryConfig())
|
||||
.build();
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ public class OutlierDetectionTests extends AbstractXContentTestCase<OutlierDetec
|
|||
return OutlierDetection.builder()
|
||||
.setNNeighbors(randomBoolean() ? null : randomIntBetween(1, 20))
|
||||
.setMethod(randomBoolean() ? null : randomFrom(OutlierDetection.Method.values()))
|
||||
.setMinScoreToWriteFeatureInfluence(randomBoolean() ? null : randomDoubleBetween(0.0, 1.0, true))
|
||||
.setFeatureInfluenceThreshold(randomBoolean() ? null : randomDoubleBetween(0.0, 1.0, true))
|
||||
.build();
|
||||
}
|
||||
|
||||
|
@ -56,7 +56,7 @@ public class OutlierDetectionTests extends AbstractXContentTestCase<OutlierDetec
|
|||
OutlierDetection outlierDetection = OutlierDetection.createDefault();
|
||||
assertNull(outlierDetection.getNNeighbors());
|
||||
assertNull(outlierDetection.getMethod());
|
||||
assertNull(outlierDetection.getMinScoreToWriteFeatureInfluence());
|
||||
assertNull(outlierDetection.getFeatureInfluenceThreshold());
|
||||
}
|
||||
|
||||
public void testGetParams_GivenExplicitValues() {
|
||||
|
@ -64,10 +64,10 @@ public class OutlierDetectionTests extends AbstractXContentTestCase<OutlierDetec
|
|||
OutlierDetection.builder()
|
||||
.setNNeighbors(42)
|
||||
.setMethod(OutlierDetection.Method.LDOF)
|
||||
.setMinScoreToWriteFeatureInfluence(0.5)
|
||||
.setFeatureInfluenceThreshold(0.5)
|
||||
.build();
|
||||
assertThat(outlierDetection.getNNeighbors(), equalTo(42));
|
||||
assertThat(outlierDetection.getMethod(), equalTo(OutlierDetection.Method.LDOF));
|
||||
assertThat(outlierDetection.getMinScoreToWriteFeatureInfluence(), closeTo(0.5, 1E-9));
|
||||
assertThat(outlierDetection.getFeatureInfluenceThreshold(), closeTo(0.5, 1E-9));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,115 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.watcher;
|
||||
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.xcontent.ObjectPath;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
public class ExecuteWatchResponseTests extends ESTestCase {
|
||||
|
||||
public static final String WATCH_ID_VALUE = "my_watch";
|
||||
public static final String NODE_VALUE = "my_node";
|
||||
public static final String TRIGGER_TYPE_VALUE = "manual";
|
||||
public static final String STATE_VALUE = "executed";
|
||||
public static final String STATE_KEY = "state";
|
||||
public static final String TRIGGER_EVENT_KEY = "trigger_event";
|
||||
public static final String TRIGGER_EVENT_TYPE_KEY = "type";
|
||||
public static final String MESSAGES_KEY = "messages";
|
||||
public static final String NODE_KEY = "node";
|
||||
public static final String WATCH_ID_KEY = "watch_id";
|
||||
|
||||
public void testFromXContent() throws IOException {
|
||||
xContentTester(this::createParser,
|
||||
ExecuteWatchResponseTests::createTestInstance,
|
||||
this::toXContent,
|
||||
ExecuteWatchResponse::fromXContent)
|
||||
.supportsUnknownFields(true)
|
||||
.assertEqualsConsumer(this::assertEqualInstances)
|
||||
.assertToXContentEquivalence(false)
|
||||
.test();
|
||||
}
|
||||
|
||||
private void assertEqualInstances(ExecuteWatchResponse expected, ExecuteWatchResponse actual) {
|
||||
assertThat(expected.getRecordId(), is(actual.getRecordId()));
|
||||
|
||||
// This may have extra json, so lets just assume that if all of the original fields from the creation are there, then its equal
|
||||
// This is the same code that is in createTestInstance in this class.
|
||||
Map<String, Object> actualMap = actual.getRecordAsMap();
|
||||
assertThat(ObjectPath.eval(WATCH_ID_KEY, actualMap), is(WATCH_ID_VALUE));
|
||||
assertThat(ObjectPath.eval(NODE_KEY, actualMap), is(NODE_VALUE));
|
||||
List<Object> messages = ObjectPath.eval(MESSAGES_KEY, actualMap);
|
||||
assertThat(messages.size(), is(0));
|
||||
assertThat(ObjectPath.eval(TRIGGER_EVENT_KEY + "." + TRIGGER_EVENT_TYPE_KEY, actualMap), is(TRIGGER_TYPE_VALUE));
|
||||
assertThat(ObjectPath.eval(STATE_KEY, actualMap), is(STATE_VALUE));
|
||||
}
|
||||
|
||||
private XContentBuilder toXContent(BytesReference bytes, XContentBuilder builder) throws IOException {
|
||||
// EMPTY is safe here because we never use namedObject
|
||||
try (InputStream stream = bytes.streamInput();
|
||||
XContentParser parser = createParser(JsonXContent.jsonXContent, stream)) {
|
||||
parser.nextToken();
|
||||
builder.generator().copyCurrentStructure(parser);
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
|
||||
private XContentBuilder toXContent(ExecuteWatchResponse response, XContentBuilder builder) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field("_id", response.getRecordId());
|
||||
builder.field("watch_record");
|
||||
toXContent(response.getRecord(), builder);
|
||||
return builder.endObject();
|
||||
}
|
||||
|
||||
private static ExecuteWatchResponse createTestInstance() {
|
||||
String id = "my_watch_0-2015-06-02T23:17:55.124Z";
|
||||
try {
|
||||
XContentBuilder builder = XContentFactory.jsonBuilder();
|
||||
builder.startObject();
|
||||
builder.field(WATCH_ID_KEY, WATCH_ID_VALUE);
|
||||
builder.field(NODE_KEY, NODE_VALUE);
|
||||
builder.startArray(MESSAGES_KEY);
|
||||
builder.endArray();
|
||||
builder.startObject(TRIGGER_EVENT_KEY);
|
||||
builder.field(TRIGGER_EVENT_TYPE_KEY, TRIGGER_TYPE_VALUE);
|
||||
builder.endObject();
|
||||
builder.field(STATE_KEY, STATE_VALUE);
|
||||
builder.endObject();
|
||||
BytesReference bytes = BytesReference.bytes(builder);
|
||||
return new ExecuteWatchResponse(id, bytes);
|
||||
}
|
||||
catch (IOException e) {
|
||||
throw new AssertionError(e);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -19,16 +19,12 @@
|
|||
package org.elasticsearch.client.watcher;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.elasticsearch.client.AbstractResponseTestCase;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.xcontent.DeprecationHandler;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.client.AbstractHlrcStreamableXContentTestCase;
|
||||
import org.elasticsearch.xpack.core.watcher.actions.ActionStatus;
|
||||
import org.elasticsearch.xpack.core.watcher.execution.ExecutionState;
|
||||
import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource;
|
||||
|
@ -36,7 +32,6 @@ import org.elasticsearch.xpack.core.watcher.transport.actions.get.GetWatchRespon
|
|||
import org.elasticsearch.xpack.core.watcher.watch.WatchStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.time.Clock;
|
||||
import java.time.Instant;
|
||||
import java.time.ZoneOffset;
|
||||
|
@ -44,65 +39,14 @@ import java.time.ZonedDateTime;
|
|||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
public class GetWatchResponseTests extends
|
||||
AbstractHlrcStreamableXContentTestCase<GetWatchResponse, org.elasticsearch.client.watcher.GetWatchResponse> {
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
|
||||
private static final String[] SHUFFLE_FIELDS_EXCEPTION = new String[] { "watch" };
|
||||
public class GetWatchResponseTests extends AbstractResponseTestCase<GetWatchResponse, org.elasticsearch.client.watcher.GetWatchResponse> {
|
||||
|
||||
@Override
|
||||
protected String[] getShuffleFieldsExceptions() {
|
||||
return SHUFFLE_FIELDS_EXCEPTION;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ToXContent.Params getToXContentParams() {
|
||||
return new ToXContent.MapParams(Collections.singletonMap("hide_headers", "false"));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Predicate<String> getRandomFieldsExcludeFilter() {
|
||||
return f -> f.contains("watch") || f.contains("actions") || f.contains("headers");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void assertEqualInstances(GetWatchResponse expectedInstance, GetWatchResponse newInstance) {
|
||||
if (expectedInstance.isFound() &&
|
||||
expectedInstance.getSource().getContentType() != newInstance.getSource().getContentType()) {
|
||||
/**
|
||||
* The {@link GetWatchResponse#getContentType()} depends on the content type that
|
||||
* was used to serialize the main object so we use the same content type than the
|
||||
* <code>expectedInstance</code> to translate the watch of the <code>newInstance</code>.
|
||||
*/
|
||||
XContent from = XContentFactory.xContent(newInstance.getSource().getContentType());
|
||||
XContent to = XContentFactory.xContent(expectedInstance.getSource().getContentType());
|
||||
final BytesReference newSource;
|
||||
// It is safe to use EMPTY here because this never uses namedObject
|
||||
try (InputStream stream = newInstance.getSource().getBytes().streamInput();
|
||||
XContentParser parser = XContentFactory.xContent(from.type()).createParser(NamedXContentRegistry.EMPTY,
|
||||
DeprecationHandler.THROW_UNSUPPORTED_OPERATION, stream)) {
|
||||
parser.nextToken();
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(to.type());
|
||||
builder.copyCurrentStructure(parser);
|
||||
newSource = BytesReference.bytes(builder);
|
||||
} catch (IOException e) {
|
||||
throw new AssertionError(e);
|
||||
}
|
||||
newInstance = new GetWatchResponse(newInstance.getId(), newInstance.getVersion(),
|
||||
newInstance.getSeqNo(), newInstance.getPrimaryTerm(),
|
||||
newInstance.getStatus(), new XContentSource(newSource, expectedInstance.getSource().getContentType()));
|
||||
}
|
||||
super.assertEqualInstances(expectedInstance, newInstance);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected GetWatchResponse createBlankInstance() {
|
||||
return new GetWatchResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected GetWatchResponse createTestInstance() {
|
||||
protected GetWatchResponse createServerTestInstance() {
|
||||
String id = randomAlphaOfLength(10);
|
||||
if (LuceneTestCase.rarely()) {
|
||||
return new GetWatchResponse(id);
|
||||
|
@ -115,6 +59,34 @@ public class GetWatchResponseTests extends
|
|||
return new GetWatchResponse(id, version, seqNo, primaryTerm, status, new XContentSource(source, XContentType.JSON));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected org.elasticsearch.client.watcher.GetWatchResponse doParseToClientInstance(XContentParser parser) throws IOException {
|
||||
return org.elasticsearch.client.watcher.GetWatchResponse.fromXContent(parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void assertInstances(GetWatchResponse serverTestInstance, org.elasticsearch.client.watcher.GetWatchResponse clientInstance) {
|
||||
assertThat(clientInstance.getId(), equalTo(serverTestInstance.getId()));
|
||||
assertThat(clientInstance.getSeqNo(), equalTo(serverTestInstance.getSeqNo()));
|
||||
assertThat(clientInstance.getPrimaryTerm(), equalTo(serverTestInstance.getPrimaryTerm()));
|
||||
assertThat(clientInstance.getVersion(), equalTo(serverTestInstance.getVersion()));
|
||||
if (serverTestInstance.getStatus() != null) {
|
||||
assertThat(convertWatchStatus(clientInstance.getStatus()), equalTo(serverTestInstance.getStatus()));
|
||||
} else {
|
||||
assertThat(clientInstance.getStatus(), nullValue());
|
||||
}
|
||||
if (serverTestInstance.getSource() != null) {
|
||||
assertThat(clientInstance.getSourceAsMap(), equalTo(serverTestInstance.getSource().getAsMap()));
|
||||
} else {
|
||||
assertThat(clientInstance.getSource(), nullValue());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ToXContent.Params getParams() {
|
||||
return new ToXContent.MapParams(Collections.singletonMap("hide_headers", "false"));
|
||||
}
|
||||
|
||||
private static BytesReference simpleWatch() {
|
||||
try {
|
||||
XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent());
|
||||
|
@ -181,58 +153,45 @@ public class GetWatchResponseTests extends
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public org.elasticsearch.client.watcher.GetWatchResponse doHlrcParseInstance(XContentParser parser) throws IOException {
|
||||
return org.elasticsearch.client.watcher.GetWatchResponse.fromXContent(parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
public GetWatchResponse convertHlrcToInternal(org.elasticsearch.client.watcher.GetWatchResponse instance) {
|
||||
if (instance.isFound()) {
|
||||
return new GetWatchResponse(instance.getId(), instance.getVersion(), instance.getSeqNo(), instance.getPrimaryTerm(),
|
||||
convertHlrcToInternal(instance.getStatus()), new XContentSource(instance.getSource(), instance.getContentType()));
|
||||
} else {
|
||||
return new GetWatchResponse(instance.getId());
|
||||
}
|
||||
}
|
||||
|
||||
private static WatchStatus convertHlrcToInternal(org.elasticsearch.client.watcher.WatchStatus status) {
|
||||
private static WatchStatus convertWatchStatus(org.elasticsearch.client.watcher.WatchStatus status) {
|
||||
final Map<String, ActionStatus> actions = new HashMap<>();
|
||||
for (Map.Entry<String, org.elasticsearch.client.watcher.ActionStatus> entry : status.getActions().entrySet()) {
|
||||
actions.put(entry.getKey(), convertHlrcToInternal(entry.getValue()));
|
||||
actions.put(entry.getKey(), convertActionStatus(entry.getValue()));
|
||||
}
|
||||
return new WatchStatus(status.version(),
|
||||
convertHlrcToInternal(status.state()),
|
||||
status.getExecutionState() == null ? null : convertHlrcToInternal(status.getExecutionState()),
|
||||
convertWatchStatusState(status.state()),
|
||||
status.getExecutionState() == null ? null : convertWatchStatus(status.getExecutionState()),
|
||||
status.lastChecked(), status.lastMetCondition(), actions, status.getHeaders()
|
||||
);
|
||||
}
|
||||
|
||||
private static ActionStatus convertHlrcToInternal(org.elasticsearch.client.watcher.ActionStatus actionStatus) {
|
||||
return new ActionStatus(convertHlrcToInternal(actionStatus.ackStatus()),
|
||||
actionStatus.lastExecution() == null ? null : convertHlrcToInternal(actionStatus.lastExecution()),
|
||||
actionStatus.lastSuccessfulExecution() == null ? null : convertHlrcToInternal(actionStatus.lastSuccessfulExecution()),
|
||||
actionStatus.lastThrottle() == null ? null : convertHlrcToInternal(actionStatus.lastThrottle())
|
||||
private static ActionStatus convertActionStatus(org.elasticsearch.client.watcher.ActionStatus actionStatus) {
|
||||
return new ActionStatus(convertAckStatus(actionStatus.ackStatus()),
|
||||
actionStatus.lastExecution() == null ? null : convertActionStatusExecution(actionStatus.lastExecution()),
|
||||
actionStatus.lastSuccessfulExecution() == null ? null : convertActionStatusExecution(actionStatus.lastSuccessfulExecution()),
|
||||
actionStatus.lastThrottle() == null ? null : convertActionStatusThrottle(actionStatus.lastThrottle())
|
||||
);
|
||||
}
|
||||
|
||||
private static ActionStatus.AckStatus convertHlrcToInternal(org.elasticsearch.client.watcher.ActionStatus.AckStatus ackStatus) {
|
||||
return new ActionStatus.AckStatus(ackStatus.timestamp(), convertHlrcToInternal(ackStatus.state()));
|
||||
private static ActionStatus.AckStatus convertAckStatus(org.elasticsearch.client.watcher.ActionStatus.AckStatus ackStatus) {
|
||||
return new ActionStatus.AckStatus(ackStatus.timestamp(), convertAckStatusState(ackStatus.state()));
|
||||
}
|
||||
|
||||
private static ActionStatus.AckStatus.State convertHlrcToInternal(org.elasticsearch.client.watcher.ActionStatus.AckStatus.State state) {
|
||||
private static ActionStatus.AckStatus.State convertAckStatusState(
|
||||
org.elasticsearch.client.watcher.ActionStatus.AckStatus.State state) {
|
||||
return ActionStatus.AckStatus.State.valueOf(state.name());
|
||||
}
|
||||
|
||||
private static WatchStatus.State convertHlrcToInternal(org.elasticsearch.client.watcher.WatchStatus.State state) {
|
||||
private static WatchStatus.State convertWatchStatusState(org.elasticsearch.client.watcher.WatchStatus.State state) {
|
||||
return new WatchStatus.State(state.isActive(), state.getTimestamp());
|
||||
}
|
||||
|
||||
private static ExecutionState convertHlrcToInternal(org.elasticsearch.client.watcher.ExecutionState executionState) {
|
||||
private static ExecutionState convertWatchStatus(org.elasticsearch.client.watcher.ExecutionState executionState) {
|
||||
return ExecutionState.valueOf(executionState.name());
|
||||
}
|
||||
|
||||
private static ActionStatus.Execution convertHlrcToInternal(org.elasticsearch.client.watcher.ActionStatus.Execution execution) {
|
||||
private static ActionStatus.Execution convertActionStatusExecution(
|
||||
org.elasticsearch.client.watcher.ActionStatus.Execution execution) {
|
||||
if (execution.successful()) {
|
||||
return ActionStatus.Execution.successful(execution.timestamp());
|
||||
} else {
|
||||
|
@ -240,7 +199,7 @@ public class GetWatchResponseTests extends
|
|||
}
|
||||
}
|
||||
|
||||
private static ActionStatus.Throttle convertHlrcToInternal(org.elasticsearch.client.watcher.ActionStatus.Throttle throttle) {
|
||||
private static ActionStatus.Throttle convertActionStatusThrottle(org.elasticsearch.client.watcher.ActionStatus.Throttle throttle) {
|
||||
return new ActionStatus.Throttle(throttle.timestamp(), throttle.reason());
|
||||
}
|
||||
|
||||
|
|
|
@ -18,17 +18,19 @@
|
|||
*/
|
||||
package org.elasticsearch.client.watcher.hlrc;
|
||||
|
||||
import org.elasticsearch.client.AbstractResponseTestCase;
|
||||
import org.elasticsearch.client.watcher.DeleteWatchResponse;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.client.AbstractHlrcXContentTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class DeleteWatchResponseTests extends AbstractHlrcXContentTestCase<
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
public class DeleteWatchResponseTests extends AbstractResponseTestCase<
|
||||
org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse, DeleteWatchResponse> {
|
||||
|
||||
@Override
|
||||
protected org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse createTestInstance() {
|
||||
protected org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse createServerTestInstance() {
|
||||
String id = randomAlphaOfLength(10);
|
||||
long version = randomLongBetween(1, 10);
|
||||
boolean found = randomBoolean();
|
||||
|
@ -36,23 +38,15 @@ public class DeleteWatchResponseTests extends AbstractHlrcXContentTestCase<
|
|||
}
|
||||
|
||||
@Override
|
||||
protected org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse doParseInstance(XContentParser parser) throws IOException {
|
||||
return org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse.fromXContent(parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
public DeleteWatchResponse doHlrcParseInstance(XContentParser parser) throws IOException {
|
||||
protected DeleteWatchResponse doParseToClientInstance(XContentParser parser) throws IOException {
|
||||
return DeleteWatchResponse.fromXContent(parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
public org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse convertHlrcToInternal(DeleteWatchResponse instance) {
|
||||
return new org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse(instance.getId(), instance.getVersion(),
|
||||
instance.isFound());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean supportsUnknownFields() {
|
||||
return false;
|
||||
protected void assertInstances(org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse serverTestInstance,
|
||||
DeleteWatchResponse clientInstance) {
|
||||
assertThat(clientInstance.getId(), equalTo(serverTestInstance.getId()));
|
||||
assertThat(clientInstance.getVersion(), equalTo(serverTestInstance.getVersion()));
|
||||
assertThat(clientInstance.isFound(), equalTo(serverTestInstance.isFound()));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,31 +19,23 @@
|
|||
|
||||
package org.elasticsearch.client.watcher.hlrc;
|
||||
|
||||
import org.elasticsearch.client.AbstractResponseTestCase;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.client.AbstractHlrcXContentTestCase;
|
||||
import org.elasticsearch.xpack.core.watcher.transport.actions.execute.ExecuteWatchResponse;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class ExecuteWatchResponseTests
|
||||
extends AbstractHlrcXContentTestCase<ExecuteWatchResponse, org.elasticsearch.client.watcher.ExecuteWatchResponse> {
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
public class ExecuteWatchResponseTests extends AbstractResponseTestCase<
|
||||
ExecuteWatchResponse, org.elasticsearch.client.watcher.ExecuteWatchResponse> {
|
||||
|
||||
@Override
|
||||
public org.elasticsearch.client.watcher.ExecuteWatchResponse doHlrcParseInstance(XContentParser parser) throws IOException {
|
||||
return org.elasticsearch.client.watcher.ExecuteWatchResponse.fromXContent(parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ExecuteWatchResponse convertHlrcToInternal(org.elasticsearch.client.watcher.ExecuteWatchResponse instance) {
|
||||
return new ExecuteWatchResponse(instance.getRecordId(), instance.getRecord(), XContentType.JSON);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ExecuteWatchResponse createTestInstance() {
|
||||
protected ExecuteWatchResponse createServerTestInstance() {
|
||||
String id = "my_watch_0-2015-06-02T23:17:55.124Z";
|
||||
try {
|
||||
XContentBuilder builder = XContentFactory.jsonBuilder();
|
||||
|
@ -66,12 +58,14 @@ public class ExecuteWatchResponseTests
|
|||
}
|
||||
|
||||
@Override
|
||||
protected ExecuteWatchResponse doParseInstance(XContentParser parser) throws IOException {
|
||||
return ExecuteWatchResponse.fromXContent(parser);
|
||||
protected org.elasticsearch.client.watcher.ExecuteWatchResponse doParseToClientInstance(XContentParser parser) throws IOException {
|
||||
return org.elasticsearch.client.watcher.ExecuteWatchResponse.fromXContent(parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean supportsUnknownFields() {
|
||||
return false;
|
||||
protected void assertInstances(ExecuteWatchResponse serverTestInstance,
|
||||
org.elasticsearch.client.watcher.ExecuteWatchResponse clientInstance) {
|
||||
assertThat(clientInstance.getRecordId(), equalTo(serverTestInstance.getRecordId()));
|
||||
assertThat(clientInstance.getRecordAsMap(), equalTo(serverTestInstance.getRecordSource().getAsMap()));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,17 +18,19 @@
|
|||
*/
|
||||
package org.elasticsearch.client.watcher.hlrc;
|
||||
|
||||
import org.elasticsearch.client.AbstractResponseTestCase;
|
||||
import org.elasticsearch.client.watcher.PutWatchResponse;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.client.AbstractHlrcXContentTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class PutWatchResponseTests extends AbstractHlrcXContentTestCase<
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
public class PutWatchResponseTests extends AbstractResponseTestCase<
|
||||
org.elasticsearch.protocol.xpack.watcher.PutWatchResponse, PutWatchResponse> {
|
||||
|
||||
@Override
|
||||
protected org.elasticsearch.protocol.xpack.watcher.PutWatchResponse createTestInstance() {
|
||||
protected org.elasticsearch.protocol.xpack.watcher.PutWatchResponse createServerTestInstance() {
|
||||
String id = randomAlphaOfLength(10);
|
||||
long seqNo = randomNonNegativeLong();
|
||||
long primaryTerm = randomLongBetween(1, 20);
|
||||
|
@ -38,23 +40,17 @@ public class PutWatchResponseTests extends AbstractHlrcXContentTestCase<
|
|||
}
|
||||
|
||||
@Override
|
||||
protected org.elasticsearch.protocol.xpack.watcher.PutWatchResponse doParseInstance(XContentParser parser) throws IOException {
|
||||
return org.elasticsearch.protocol.xpack.watcher.PutWatchResponse.fromXContent(parser);
|
||||
protected PutWatchResponse doParseToClientInstance(XContentParser parser) throws IOException {
|
||||
return PutWatchResponse.fromXContent(parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
public PutWatchResponse doHlrcParseInstance(XContentParser parser) throws IOException {
|
||||
return org.elasticsearch.client.watcher.PutWatchResponse.fromXContent(parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
public org.elasticsearch.protocol.xpack.watcher.PutWatchResponse convertHlrcToInternal(PutWatchResponse instance) {
|
||||
return new org.elasticsearch.protocol.xpack.watcher.PutWatchResponse(instance.getId(), instance.getVersion(),
|
||||
instance.getSeqNo(), instance.getPrimaryTerm(), instance.isCreated());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean supportsUnknownFields() {
|
||||
return false;
|
||||
protected void assertInstances(org.elasticsearch.protocol.xpack.watcher.PutWatchResponse serverTestInstance,
|
||||
PutWatchResponse clientInstance) {
|
||||
assertThat(clientInstance.getId(), equalTo(serverTestInstance.getId()));
|
||||
assertThat(clientInstance.getSeqNo(), equalTo(serverTestInstance.getSeqNo()));
|
||||
assertThat(clientInstance.getPrimaryTerm(), equalTo(serverTestInstance.getPrimaryTerm()));
|
||||
assertThat(clientInstance.getVersion(), equalTo(serverTestInstance.getVersion()));
|
||||
assertThat(clientInstance.isCreated(), equalTo(serverTestInstance.isCreated()));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -117,7 +117,7 @@ task buildTransportModules {
|
|||
|
||||
void copyModule(Sync copyTask, Project module) {
|
||||
copyTask.configure {
|
||||
dependsOn { module.bundlePlugin }
|
||||
dependsOn "${module.path}:bundlePlugin"
|
||||
from({ zipTree(module.bundlePlugin.outputs.files.singleFile) }) {
|
||||
includeEmptyDirs false
|
||||
|
||||
|
@ -167,6 +167,7 @@ buildDefaultLog4jConfig.doLast(writeLog4jProperties)
|
|||
|
||||
// copy log4j2.properties from modules that have it
|
||||
void copyLog4jProperties(Task buildTask, Project module) {
|
||||
buildTask.dependsOn "${module.path}:bundlePlugin"
|
||||
buildTask.doFirst {
|
||||
FileTree tree = zipTree(module.bundlePlugin.outputs.files.singleFile)
|
||||
FileTree filtered = tree.matching {
|
||||
|
|
|
@ -4,8 +4,9 @@
|
|||
A token filter which removes elisions. For example, "l'avion" (the
|
||||
plane) will tokenized as "avion" (plane).
|
||||
|
||||
Accepts `articles` parameter which is a set of stop words articles. Also accepts
|
||||
`articles_case`, which indicates whether the filter treats those articles as
|
||||
Requires either an `articles` parameter which is a set of stop word articles, or
|
||||
`articles_path` which points to a text file containing the stop set. Also optionally
|
||||
accepts `articles_case`, which indicates whether the filter treats those articles as
|
||||
case sensitive.
|
||||
|
||||
For example:
|
||||
|
|
|
@ -43,6 +43,8 @@ Additional settings are:
|
|||
* `expand` (defaults to `true`).
|
||||
* `lenient` (defaults to `false`). If `true` ignores exceptions while parsing the synonym configuration. It is important
|
||||
to note that only those synonym rules which cannot get parsed are ignored. For instance consider the following request:
|
||||
|
||||
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
|
|
@ -22,7 +22,7 @@ Deletes an existing {dataframe-transform}.
|
|||
[[delete-data-frame-transform-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
If the {es} {security-features} are enabled, you must have
|
||||
* If the {es} {security-features} are enabled, you must have
|
||||
`manage_data_frame_transforms` cluster privileges to use this API. The built-in
|
||||
`data_frame_transforms_admin` role has these privileges. For more information,
|
||||
see {stack-ov}/security-privileges.html[Security privileges] and
|
||||
|
@ -43,7 +43,7 @@ NOTE: Before you can delete the {dataframe-transform}, you must stop it.
|
|||
|
||||
[discrete]
|
||||
[[delete-data-frame-transform-examples]]
|
||||
==== {api-example-title}
|
||||
==== {api-examples-title}
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
|
|
@ -31,15 +31,21 @@ Retrieves usage information for {dataframe-transforms}.
|
|||
[[get-data-frame-transform-stats-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
If the {es} {security-features} are enabled, you must have
|
||||
* If the {es} {security-features} are enabled, you must have
|
||||
`monitor_data_frame_transforms` cluster privileges to use this API. The built-in
|
||||
`data_frame_transforms_user` role has these privileges. For more information,
|
||||
see {stack-ov}/security-privileges.html[Security privileges] and
|
||||
{stack-ov}/built-in-roles.html[Built-in roles].
|
||||
|
||||
//[discrete]
|
||||
//[[get-data-frame-transform-stats-desc]]
|
||||
//===== {api-description-title}
|
||||
[discrete]
|
||||
[[get-data-frame-transform-stats-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
You can get statistics for multiple {dataframe-transforms} in a single API
|
||||
request by using a comma-separated list of identifiers or a wildcard expression.
|
||||
You can get statistics for all {dataframe-transforms} by using `_all`, by
|
||||
specifying `*` as the `<data_frame_transform_id>`, or by omitting the
|
||||
`<data_frame_transform_id>`.
|
||||
|
||||
[discrete]
|
||||
[[get-data-frame-transform-stats-path-parms]]
|
||||
|
@ -56,17 +62,26 @@ see {stack-ov}/security-privileges.html[Security privileges] and
|
|||
==== {api-query-parms-title}
|
||||
|
||||
`allow_no_match` (Optional)::
|
||||
(boolean) Whether to ignore if a wildcard expression matches no
|
||||
{dataframe-transforms}. This includes `_all` string or when no transforms have
|
||||
been specified. The default is `true`.
|
||||
(boolean) Specifies what to do when the request:
|
||||
+
|
||||
--
|
||||
* Contains wildcard expressions and there are no {dataframe-transforms} that match.
|
||||
* Contains the `_all` string or no identifiers and there are no matches.
|
||||
* Contains wildcard expressions and there are only partial matches.
|
||||
|
||||
The default value is `true`, which returns an empty `transforms` array when
|
||||
there are no matches and the subset of results when there are partial matches.
|
||||
If this parameter is `false`, the request returns a `404` status code when there
|
||||
are no matches or only partial matches.
|
||||
--
|
||||
|
||||
`from` (Optional)::
|
||||
(integer) Skips the specified number of {dataframe-transforms}. The
|
||||
default value is `0`.
|
||||
(integer) Skips the specified number of {dataframe-transforms}. The
|
||||
default value is `0`.
|
||||
|
||||
`size` (Optional)::
|
||||
(integer) Specifies the maximum number of {dataframe-transforms} to obtain.
|
||||
The default value is `100`.
|
||||
(integer) Specifies the maximum number of {dataframe-transforms} to obtain.
|
||||
The default value is `100`.
|
||||
|
||||
[discrete]
|
||||
[[get-data-frame-transform-stats-response]]
|
||||
|
@ -75,6 +90,13 @@ see {stack-ov}/security-privileges.html[Security privileges] and
|
|||
`transforms`::
|
||||
(array) An array of statistics objects for {dataframe-transforms}, which are
|
||||
sorted by the `id` value in ascending order.
|
||||
|
||||
[[get-data-frame-transform-stats-response-codes]]
|
||||
==== {api-response-codes-title}
|
||||
|
||||
`404` (Missing resources)::
|
||||
If `allow_no_match` is `false`, this code indicates that there are no
|
||||
resources that match the request or only partial matches for the request.
|
||||
|
||||
[discrete]
|
||||
[[get-data-frame-transform-stats-example]]
|
||||
|
|
|
@ -30,12 +30,22 @@ Retrieves configuration information for {dataframe-transforms}.
|
|||
[[get-data-frame-transform-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
If the {es} {security-features} are enabled, you must have
|
||||
* If the {es} {security-features} are enabled, you must have
|
||||
`monitor_data_frame_transforms` cluster privileges to use this API. The built-in
|
||||
`data_frame_transforms_user` role has these privileges. For more information,
|
||||
see {stack-ov}/security-privileges.html[Security privileges] and
|
||||
{stack-ov}/built-in-roles.html[Built-in roles].
|
||||
|
||||
[discrete]
|
||||
[[get-data-frame-transform-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
You can get information for multiple {dataframe-transforms} in a single API
|
||||
request by using a comma-separated list of identifiers or a wildcard expression.
|
||||
You can get information for all {dataframe-transforms} by using `_all`, by
|
||||
specifying `*` as the `<data_frame_transform_id>`, or by omitting the
|
||||
`<data_frame_transform_id>`.
|
||||
|
||||
[discrete]
|
||||
[[get-data-frame-transform-path-parms]]
|
||||
==== {api-path-parms-title}
|
||||
|
@ -51,17 +61,26 @@ see {stack-ov}/security-privileges.html[Security privileges] and
|
|||
==== {api-query-parms-title}
|
||||
|
||||
`allow_no_match` (Optional)::
|
||||
(boolean) Whether to ignore if a wildcard expression matches no
|
||||
{dataframe-transforms}. This includes `_all` string or when no transforms have
|
||||
been specified. The default is `true`.
|
||||
(boolean) Specifies what to do when the request:
|
||||
+
|
||||
--
|
||||
* Contains wildcard expressions and there are no {dataframe-transforms} that match.
|
||||
* Contains the `_all` string or no identifiers and there are no matches.
|
||||
* Contains wildcard expressions and there are only partial matches.
|
||||
|
||||
The default value is `true`, which returns an empty `transforms` array when
|
||||
there are no matches and the subset of results when there are partial matches.
|
||||
If this parameter is `false`, the request returns a `404` status code when there
|
||||
are no matches or only partial matches.
|
||||
--
|
||||
|
||||
`from` (Optional)::
|
||||
(integer) Skips the specified number of {dataframe-transforms}. The
|
||||
default value is `0`.
|
||||
(integer) Skips the specified number of {dataframe-transforms}. The
|
||||
default value is `0`.
|
||||
|
||||
`size` (Optional)::
|
||||
(integer) Specifies the maximum number of {dataframe-transforms} to obtain.
|
||||
The default value is `100`.
|
||||
(integer) Specifies the maximum number of {dataframe-transforms} to obtain.
|
||||
The default value is `100`.
|
||||
|
||||
[discrete]
|
||||
[[get-data-frame-transform-response]]
|
||||
|
@ -70,10 +89,17 @@ see {stack-ov}/security-privileges.html[Security privileges] and
|
|||
`transforms`::
|
||||
(array) An array of transform resources, which are sorted by the `id` value in
|
||||
ascending order.
|
||||
|
||||
[[get-data-frame-transform-response-codes]]
|
||||
==== {api-response-codes-title}
|
||||
|
||||
`404` (Missing resources)::
|
||||
If `allow_no_match` is `false`, this code indicates that there are no
|
||||
resources that match the request or only partial matches for the request.
|
||||
|
||||
[discrete]
|
||||
[[get-data-frame-transform-example]]
|
||||
==== {api-example-title}
|
||||
==== {api-examples-title}
|
||||
|
||||
The following example retrieves information about a maximum of ten transforms:
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ Previews a {dataframe-transform}.
|
|||
[[preview-data-frame-transform-prereq]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
If the {es} {security-features} are enabled, you must have
|
||||
* If the {es} {security-features} are enabled, you must have
|
||||
`manage_data_frame_transforms` cluster privileges to use this API. The built-in
|
||||
`data_frame_transforms_admin` role has these privileges. You must also have
|
||||
`read` and `view_index_metadata` privileges on the source index for the
|
||||
|
@ -42,7 +42,7 @@ If the {es} {security-features} are enabled, you must have
|
|||
reduce the data. See <<data-frame-transform-pivot>>.
|
||||
|
||||
[discrete]
|
||||
==== {api-example-title}
|
||||
==== {api-examples-title}
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
|
|
@ -22,7 +22,7 @@ Instantiates a {dataframe-transform}.
|
|||
[[put-data-frame-transform-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
If the {es} {security-features} are enabled, you must have
|
||||
* If the {es} {security-features} are enabled, you must have
|
||||
`manage_data_frame_transforms` cluster privileges to use this API. The built-in
|
||||
`data_frame_transforms_admin` role has these privileges. You must also
|
||||
have `read` and `view_index_metadata` privileges on the source index and `read`,
|
||||
|
@ -30,10 +30,9 @@ have `read` and `view_index_metadata` privileges on the source index and `read`,
|
|||
information, see {stack-ov}/security-privileges.html[Security privileges] and
|
||||
{stack-ov}/built-in-roles.html[Built-in roles].
|
||||
|
||||
|
||||
[discrete]
|
||||
[[put-data-frame-transform-desc]]
|
||||
===== {api-description-title}
|
||||
==== {api-description-title}
|
||||
|
||||
IMPORTANT: You must use {kib} or this API to create a {dataframe-transform}.
|
||||
Do not put a {dataframe-transform} directly into any
|
||||
|
@ -71,7 +70,7 @@ IMPORTANT: You must use {kib} or this API to create a {dataframe-transform}.
|
|||
|
||||
[discrete]
|
||||
[[put-data-frame-transform-example]]
|
||||
==== {api-example-title}
|
||||
==== {api-examples-title}
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
|
|
@ -22,7 +22,7 @@ Starts one or more {dataframe-transforms}.
|
|||
[[start-data-frame-transform-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
If the {es} {security-features} are enabled, you must have
|
||||
* If the {es} {security-features} are enabled, you must have
|
||||
`manage_data_frame_transforms` cluster privileges to use this API. You must also
|
||||
have `view_index_metadata` privileges on the source index for the
|
||||
{dataframe-transform}. For more information, see
|
||||
|
@ -40,7 +40,7 @@ have `view_index_metadata` privileges on the source index for the
|
|||
|
||||
[discrete]
|
||||
[[start-data-frame-transform-example]]
|
||||
==== {api-example-title}
|
||||
==== {api-examples-title}
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
|
|
@ -26,7 +26,7 @@ Stops one or more {dataframe-transforms}.
|
|||
[[stop-data-frame-transform-prereq]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
If the {es} {security-features} are enabled, you must have
|
||||
* If the {es} {security-features} are enabled, you must have
|
||||
`manage_data_frame_transforms` cluster privileges to use this API. The built-in
|
||||
`data_frame_transforms_admin` role has these privileges. For more information,
|
||||
see {stack-ov}/security-privileges.html[Security privileges] and
|
||||
|
@ -55,9 +55,23 @@ All {dataframe-transforms} can be stopped by using `_all` or `*` as the
|
|||
==== {api-query-parms-title}
|
||||
|
||||
`allow_no_match` (Optional)::
|
||||
(boolean) Whether to ignore if a wildcard expression matches no
|
||||
{dataframe-transforms}. This includes `_all` string or when no transforms have
|
||||
been specified. The default is `true`.
|
||||
(boolean) Specifies what to do when the request:
|
||||
+
|
||||
--
|
||||
* Contains wildcard expressions and there are no {dataframe-transforms} that match.
|
||||
* Contains the `_all` string or no identifiers and there are no matches.
|
||||
* Contains wildcard expressions and there are only partial matches.
|
||||
|
||||
The default value is `true`, which returns a successful acknowledgement message
|
||||
when there are no matches. When there are only partial matches, the API stops
|
||||
the appropriate {dataframe-transforms}. For example, if the request contains
|
||||
`test-id1*,test-id2*` as the identifiers and there are no {dataframe-transforms}
|
||||
that match `test-id2*`, the API nonetheless stops the {dataframe-transforms}
|
||||
that match `test-id1*`.
|
||||
|
||||
If this parameter is `false`, the request returns a `404` status code when there
|
||||
are no matches or only partial matches.
|
||||
--
|
||||
|
||||
`timeout` (Optional)::
|
||||
(time value) If `wait_for_completion=true`, the API blocks for (at maximum)
|
||||
|
@ -72,9 +86,17 @@ All {dataframe-transforms} can be stopped by using `_all` or `*` as the
|
|||
completely stops. If set to `false`, the API returns immediately and the
|
||||
indexer will be stopped asynchronously in the background. Defaults to `false`.
|
||||
|
||||
[discrete]
|
||||
[[stop-data-frame-transform-response-codes]]
|
||||
==== {api-response-codes-title}
|
||||
|
||||
`404` (Missing resources)::
|
||||
If `allow_no_match` is `false`, this code indicates that there are no
|
||||
resources that match the request or only partial matches for the request.
|
||||
|
||||
[discrete]
|
||||
[[stop-data-frame-transform-example]]
|
||||
==== {api-example-title}
|
||||
==== {api-examples-title}
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
|
|
@ -0,0 +1,72 @@
|
|||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[indices-reload-analyzers]]
|
||||
== Reload Search Analyzers
|
||||
|
||||
experimental[]
|
||||
|
||||
Reloads search analyzers and its resources.
|
||||
|
||||
Synonym filters (both `synonym` and `synonym_graph`) can be declared as
|
||||
updateable if they are only used in <<search-analyzer,search analyzers>>
|
||||
with the `updateable` flag:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /my_index
|
||||
{
|
||||
"settings": {
|
||||
"index" : {
|
||||
"analysis" : {
|
||||
"analyzer" : {
|
||||
"my_synonyms" : {
|
||||
"tokenizer" : "whitespace",
|
||||
"filter" : ["synonym"]
|
||||
}
|
||||
},
|
||||
"filter" : {
|
||||
"synonym" : {
|
||||
"type" : "synonym",
|
||||
"synonyms_path" : "analysis/synonym.txt",
|
||||
"updateable" : true <1>
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"mappings": {
|
||||
"properties": {
|
||||
"text": {
|
||||
"type": "text",
|
||||
"analyzer" : "standard",
|
||||
"search_analyzer": "my_synonyms" <2>
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
<1> Mark the synonym filter as updateable.
|
||||
<2> Synonym analyzer is usable as a search_analyzer.
|
||||
|
||||
NOTE: Trying to use the above analyzer as an index analyzer will result in an error.
|
||||
|
||||
Using the <<indices-reload-analyzers,analyzer reload API>>, you can trigger reloading of the
|
||||
synonym definition. The contents of the configured synonyms file will be reloaded and the
|
||||
synonyms definition the filter uses will be updated.
|
||||
|
||||
The `_reload_search_analyzers` API can be run on one or more indices and will trigger
|
||||
reloading of the synonyms from the configured file.
|
||||
|
||||
NOTE: Reloading will happen on every node the index has shards, so its important
|
||||
to update the synonym file contents on every data node (even the ones that don't currently
|
||||
hold shard copies; shards might be relocated there in the future) before calling
|
||||
reload to ensure the new state of the file is reflected everywhere in the cluster.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /my_index/_reload_search_analyzers
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[s/^/PUT my_index\n/]
|
|
@ -89,8 +89,8 @@ GET my_index/_mapping <3>
|
|||
<2> The document can be retrieved.
|
||||
<3> Checking the mapping reveals that no fields have been added.
|
||||
|
||||
TIP: The `enabled` setting can be updated on existing fields
|
||||
using the <<indices-put-mapping,PUT mapping API>>.
|
||||
The `enabled` setting for existing fields and the top-level mapping
|
||||
definition cannot be updated.
|
||||
|
||||
Note that because Elasticsearch completely skips parsing the field
|
||||
contents, it is possible to add non-object data to a disabled field:
|
||||
|
|
|
@ -30,7 +30,7 @@ PUT my_index/_doc/2 <3>
|
|||
"message": "Syntax error with some long stacktrace"
|
||||
}
|
||||
|
||||
GET _search <4>
|
||||
GET my_index/_search <4>
|
||||
{
|
||||
"aggs": {
|
||||
"messages": {
|
||||
|
|
|
@ -12,8 +12,8 @@ A job can be opened and closed multiple times throughout its lifecycle.
|
|||
A closed job cannot receive data or perform analysis
|
||||
operations, but you can still explore and navigate results.
|
||||
|
||||
|
||||
==== Request
|
||||
[[ml-close-job-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`POST _ml/anomaly_detectors/<job_id>/_close` +
|
||||
|
||||
|
@ -21,8 +21,15 @@ operations, but you can still explore and navigate results.
|
|||
|
||||
`POST _ml/anomaly_detectors/_all/_close` +
|
||||
|
||||
[[ml-close-job-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
==== Description
|
||||
* If the {es} {security-features} are enabled, you must have `manage_ml` or
|
||||
`manage` cluster privileges to use this API. See
|
||||
{stack-ov}/security-privileges.html[Security privileges].
|
||||
|
||||
[[ml-close-job-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
You can close multiple jobs in a single API request by using a group name, a
|
||||
comma-separated list of jobs, or a wildcard expression. You can close all jobs
|
||||
|
@ -47,32 +54,26 @@ after the close job API returns. The `force` query parameter should only be use
|
|||
situations where the job has already failed, or where you are not interested in
|
||||
results the job might have recently produced or might produce in the future.
|
||||
|
||||
[[ml-close-job-path-parms]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
==== Path Parameters
|
||||
|
||||
`job_id`::
|
||||
`<job_id>` (Required)::
|
||||
(string) Identifier for the job. It can be a job identifier, a group name, or
|
||||
a wildcard expression.
|
||||
|
||||
[[ml-close-job-query-parms]]
|
||||
==== {api-query-parms-title}
|
||||
|
||||
==== Query Parameters
|
||||
|
||||
`force`::
|
||||
`force` (Optional)::
|
||||
(boolean) Use to close a failed job, or to forcefully close a job which has not
|
||||
responded to its initial close request.
|
||||
|
||||
`timeout`::
|
||||
`timeout` (Optional)::
|
||||
(time units) Controls the time to wait until a job has closed.
|
||||
The default value is 30 minutes.
|
||||
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see {xpack-ref}/security-privileges.html[Security Privileges].
|
||||
|
||||
|
||||
==== Examples
|
||||
[[ml-close-job-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
The following example closes the `total-requests` job:
|
||||
|
||||
|
|
|
@ -8,34 +8,37 @@
|
|||
|
||||
Deletes scheduled events from a calendar.
|
||||
|
||||
|
||||
==== Request
|
||||
[[ml-delete-calendar-event-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`DELETE _ml/calendars/<calendar_id>/events/<event_id>`
|
||||
|
||||
[[ml-delete-calendar-event-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
==== Description
|
||||
* If the {es} {security-features} are enabled, you must have `manage_ml` or
|
||||
`manage` cluster privileges to use this API. See
|
||||
{stack-ov}/security-privileges.html[Security privileges].
|
||||
|
||||
[[ml-delete-calendar-event-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
This API removes individual events from a calendar. To remove all scheduled
|
||||
events and delete the calendar, see the
|
||||
<<ml-delete-calendar,delete calendar API>>.
|
||||
|
||||
==== Path Parameters
|
||||
[[ml-delete-calendar-event-path-parms]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
`calendar_id`(required)::
|
||||
`<calendar_id>` (Required)::
|
||||
(string) Identifier for the calendar.
|
||||
|
||||
`event_id` (required)::
|
||||
`<event_id>` (Required)::
|
||||
(string) Identifier for the scheduled event. You can obtain this identifier
|
||||
by using the <<ml-get-calendar-event,get calendar events API>>.
|
||||
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see {xpack-ref}/security-privileges.html[Security Privileges].
|
||||
|
||||
==== Examples
|
||||
[[ml-delete-calendar-event-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
The following example deletes a scheduled event from the `planned-outages`
|
||||
calendar:
|
||||
|
|
|
@ -8,28 +8,30 @@
|
|||
|
||||
Deletes jobs from a calendar.
|
||||
|
||||
|
||||
==== Request
|
||||
[[ml-delete-calendar-job-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`DELETE _ml/calendars/<calendar_id>/jobs/<job_id>`
|
||||
|
||||
[[ml-delete-calendar-job-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
==== Path Parameters
|
||||
* If the {es} {security-features} are enabled, you must have `manage_ml` or
|
||||
`manage` cluster privileges to use this API. See
|
||||
{stack-ov}/security-privileges.html[Security privileges].
|
||||
|
||||
`calendar_id`(required)::
|
||||
[[ml-delete-calendar-job-path-parms]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
`<calendar_id>` (Required)::
|
||||
(string) Identifier for the calendar.
|
||||
|
||||
`job_id` (required)::
|
||||
(string) An identifier for the job. It can be a job identifier, a group name, or a
|
||||
comma-separated list of jobs or groups.
|
||||
`<job_id>` (Required)::
|
||||
(string) An identifier for the job. It can be a job identifier, a group name,
|
||||
or a comma-separated list of jobs or groups.
|
||||
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see {xpack-ref}/security-privileges.html[Security Privileges].
|
||||
|
||||
==== Examples
|
||||
[[ml-delete-calendar-job-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
The following example removes the association between the `planned-outages`
|
||||
calendar and `total-requests` job:
|
||||
|
|
|
@ -8,31 +8,32 @@
|
|||
|
||||
Deletes a calendar.
|
||||
|
||||
|
||||
==== Request
|
||||
[[ml-delete-calendar-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`DELETE _ml/calendars/<calendar_id>`
|
||||
|
||||
[[ml-delete-calendar-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
==== Description
|
||||
* If the {es} {security-features} are enabled, you must have `manage_ml` or
|
||||
`manage` cluster privileges to use this API. See
|
||||
{stack-ov}/security-privileges.html[Security privileges].
|
||||
|
||||
[[ml-delete-calendar-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
This API removes all scheduled events from the calendar then deletes the
|
||||
calendar.
|
||||
|
||||
[[ml-delete-calendar-path-parms]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
==== Path Parameters
|
||||
|
||||
`calendar_id` (required)::
|
||||
`<calendar_id>` (Required)::
|
||||
(string) Identifier for the calendar.
|
||||
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see {xpack-ref}/security-privileges.html[Security Privileges].
|
||||
|
||||
|
||||
==== Examples
|
||||
[[ml-delete-calendar-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
The following example deletes the `planned-outages` calendar:
|
||||
|
||||
|
|
|
@ -10,38 +10,39 @@
|
|||
|
||||
Deletes an existing {dfeed}.
|
||||
|
||||
|
||||
==== Request
|
||||
[[ml-delete-datafeed-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`DELETE _ml/datafeeds/<feed_id>`
|
||||
|
||||
[[ml-delete-datafeed-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
==== Description
|
||||
* If the {es} {security-features} are enabled, you must have `manage_ml` or
|
||||
`manage` cluster privileges to use this API. See
|
||||
{stack-ov}/security-privileges.html[Security privileges].
|
||||
|
||||
NOTE: Unless the `force` parameter is used, the {dfeed} must be stopped before it can be deleted.
|
||||
[[ml-delete-datafeed-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
NOTE: Unless you use the `force` parameter, you must stop the {dfeed} before you
|
||||
can delete it.
|
||||
|
||||
==== Path Parameters
|
||||
[[ml-delete-datafeed-path-parms]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
`feed_id` (required)::
|
||||
(string) Identifier for the {dfeed}
|
||||
`<feed_id>` (Required)::
|
||||
(string) Identifier for the {dfeed}.
|
||||
|
||||
[[ml-delete-datafeed-query-parms]]
|
||||
==== {api-query-parms-title}
|
||||
|
||||
===== Query Parameters
|
||||
`force` (Optional)::
|
||||
(boolean) Use to forcefully delete a started {dfeed}; this method is quicker
|
||||
than stopping and deleting the {dfeed}.
|
||||
|
||||
`force`::
|
||||
(boolean) Use to forcefully delete a started {dfeed}; this method is quicker than
|
||||
stopping and deleting the {dfeed}.
|
||||
|
||||
|
||||
===== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see {xpack-ref}/security-privileges.html[Security Privileges].
|
||||
//<<privileges-list-cluster>>.
|
||||
|
||||
|
||||
==== Examples
|
||||
[[ml-delete-datafeed-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
The following example deletes the `datafeed-total-requests` {dfeed}:
|
||||
|
||||
|
|
|
@ -8,25 +8,27 @@
|
|||
|
||||
Deletes expired and unused machine learning data.
|
||||
|
||||
==== Request
|
||||
[[ml-delete-expired-data-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`DELETE _ml/_delete_expired_data`
|
||||
|
||||
==== Description
|
||||
[[ml-delete-expired-data-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
* If the {es} {security-features} are enabled, you must have `manage_ml` or
|
||||
`manage` cluster privileges to use this API. See
|
||||
{stack-ov}/security-privileges.html[Security privileges].
|
||||
|
||||
[[ml-delete-expired-data-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
Deletes all job results, model snapshots and forecast data that have exceeded
|
||||
their `retention days` period. Machine learning state documents that are not
|
||||
associated with any job are also deleted.
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see
|
||||
{stack-ov}/security-privileges.html[Security Privileges] and
|
||||
{stack-ov}/built-in-roles.html[Built-in Roles].
|
||||
|
||||
|
||||
==== Examples
|
||||
[[ml-delete-expired-data-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
The endpoint takes no arguments:
|
||||
|
||||
|
|
|
@ -8,32 +8,33 @@
|
|||
|
||||
Deletes a filter.
|
||||
|
||||
|
||||
==== Request
|
||||
[[ml-delete-filter-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`DELETE _ml/filters/<filter_id>`
|
||||
|
||||
[[ml-delete-filter-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
==== Description
|
||||
* If the {es} {security-features} are enabled, you must have `manage_ml` or
|
||||
`manage` cluster privileges to use this API. See
|
||||
{stack-ov}/security-privileges.html[Security privileges].
|
||||
|
||||
[[ml-delete-filter-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
This API deletes a {stack-ov}/ml-rules.html[filter].
|
||||
If a {ml} job references the filter, you cannot delete the filter. You must
|
||||
update or delete the job before you can delete the filter.
|
||||
|
||||
[[ml-delete-filter-path-parms]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
==== Path Parameters
|
||||
|
||||
`filter_id` (required)::
|
||||
`<filter_id>` (Required)::
|
||||
(string) Identifier for the filter.
|
||||
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see {xpack-ref}/security-privileges.html[Security Privileges].
|
||||
|
||||
|
||||
==== Examples
|
||||
[[ml-delete-filter-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
The following example deletes the `safe_domains` filter:
|
||||
|
||||
|
|
|
@ -8,7 +8,8 @@
|
|||
|
||||
Deletes forecasts from a {ml} job.
|
||||
|
||||
==== Request
|
||||
[[ml-delete-forecast-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`DELETE _ml/anomaly_detectors/<job_id>/_forecast` +
|
||||
|
||||
|
@ -16,48 +17,54 @@ Deletes forecasts from a {ml} job.
|
|||
|
||||
`DELETE _ml/anomaly_detectors/<job_id>/_forecast/_all`
|
||||
|
||||
[[ml-delete-forecast-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
==== Description
|
||||
* If the {es} {security-features} are enabled, you must have `manage_ml` or
|
||||
`manage` cluster privileges to use this API. See
|
||||
{stack-ov}/security-privileges.html[Security privileges].
|
||||
|
||||
[[ml-delete-forecast-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
By default, forecasts are retained for 14 days. You can specify a different
|
||||
retention period with the `expires_in` parameter in the <<ml-forecast,forecast jobs API>>. The delete forecast API enables you to delete one or more forecasts before they expire.
|
||||
retention period with the `expires_in` parameter in the
|
||||
<<ml-forecast,forecast jobs API>>. The delete forecast API enables you to delete
|
||||
one or more forecasts before they expire.
|
||||
|
||||
NOTE: When you delete a job its associated forecasts are deleted.
|
||||
NOTE: When you delete a job, its associated forecasts are deleted.
|
||||
|
||||
For more information, see {stack-ov}/ml-overview.html#ml-forecasting[Forecasting the Future].
|
||||
For more information, see
|
||||
{stack-ov}/ml-overview.html#ml-forecasting[Forecasting the future].
|
||||
|
||||
[[ml-delete-forecast-path-parms]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
==== Path Parameters
|
||||
|
||||
`job_id` (required)::
|
||||
`<job_id>` (Required)::
|
||||
(string) Identifier for the job.
|
||||
|
||||
`forecast_id`::
|
||||
`forecast_id` (Optional)::
|
||||
(string) A comma-separated list of forecast identifiers.
|
||||
If you do not specify this optional parameter or if you specify `_all`, the
|
||||
API deletes all forecasts from the job.
|
||||
|
||||
==== Request Parameters
|
||||
[[ml-delete-forecast-query-parms]]
|
||||
==== {api-query-parms-title}
|
||||
|
||||
`allow_no_forecasts`::
|
||||
`allow_no_forecasts` (Optional)::
|
||||
(boolean) Specifies whether an error occurs when there are no forecasts. In
|
||||
particular, if this parameter is set to `false` and there are no forecasts
|
||||
associated with the job, attempts to delete all forecasts return an error.
|
||||
The default value is `true`.
|
||||
|
||||
`timeout`::
|
||||
`timeout` (Optional)::
|
||||
(time units) Specifies the period of time to wait for the completion of the
|
||||
delete operation. When this period of time elapses, the API fails and returns
|
||||
an error. The default value is `30s`. For more information about time units,
|
||||
see <<time-units>>.
|
||||
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see {stack-ov}/security-privileges.html[Security Privileges].
|
||||
|
||||
==== Examples
|
||||
[[ml-delete-forecast-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
The following example deletes all forecasts from the `total-requests` job:
|
||||
|
||||
|
|
|
@ -8,13 +8,20 @@
|
|||
|
||||
Deletes an existing anomaly detection job.
|
||||
|
||||
|
||||
==== Request
|
||||
[[ml-delete-job-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`DELETE _ml/anomaly_detectors/<job_id>`
|
||||
|
||||
[[ml-delete-job-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
==== Description
|
||||
* If {es} {security-features} are enabled, you must have `manage_ml` or `manage`
|
||||
cluster privileges to use this API. See
|
||||
{stack-ov}/security-privileges.html[Security privileges].
|
||||
|
||||
[[ml-delete-job-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
All job configuration, model state and results are deleted.
|
||||
|
||||
|
@ -30,29 +37,25 @@ is used the job must be closed before it can be deleted.
|
|||
It is not currently possible to delete multiple jobs using wildcards or a comma
|
||||
separated list.
|
||||
|
||||
==== Path Parameters
|
||||
[[ml-delete-job-path-parms]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
`job_id` (required)::
|
||||
(string) Identifier for the job
|
||||
`<job_id>` (Required)::
|
||||
(string) Identifier for the job.
|
||||
|
||||
===== Query Parameters
|
||||
[[ml-delete-job-query-parms]]
|
||||
==== {api-query-parms-title}
|
||||
|
||||
`force`::
|
||||
`force` (Optional)::
|
||||
(boolean) Use to forcefully delete an opened job; this method is quicker than
|
||||
closing and deleting the job.
|
||||
|
||||
`wait_for_completion`::
|
||||
`wait_for_completion` (Optional)::
|
||||
(boolean) Specifies whether the request should return immediately or wait
|
||||
until the job deletion completes. Defaults to `true`.
|
||||
|
||||
==== Authorization
|
||||
|
||||
If {es} {security-features} are enabled, you must have `manage_ml`, or `manage`
|
||||
cluster privileges to use this API.
|
||||
For more information, see {stack-ov}/security-privileges.html[Security Privileges].
|
||||
|
||||
|
||||
==== Examples
|
||||
[[ml-delete-job-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
The following example deletes the `total-requests` job:
|
||||
|
||||
|
|
|
@ -8,34 +8,36 @@
|
|||
|
||||
Deletes an existing model snapshot.
|
||||
|
||||
|
||||
==== Request
|
||||
[[ml-delete-snapshot-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`DELETE _ml/anomaly_detectors/<job_id>/model_snapshots/<snapshot_id>`
|
||||
|
||||
[[ml-delete-snapshot-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
==== Description
|
||||
* If the {es} {security-features} are enabled, you must have `manage_ml` or
|
||||
`manage` cluster privileges to use this API. See
|
||||
{stack-ov}/security-privileges.html[Security privileges].
|
||||
|
||||
[[ml-delete-snapshot-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
IMPORTANT: You cannot delete the active model snapshot. To delete that snapshot,
|
||||
first revert to a different one. To identify the active model snapshot, refer to
|
||||
the `model_snapshot_id` in the results from the get jobs API.
|
||||
|
||||
==== Path Parameters
|
||||
[[ml-delete-snapshot-path-parms]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
`job_id` (required)::
|
||||
(string) Identifier for the job
|
||||
`<job_id>` (Required)::
|
||||
(string) Identifier for the job.
|
||||
|
||||
`snapshot_id` (required)::
|
||||
(string) Identifier for the model snapshot
|
||||
`<snapshot_id>` (Required)::
|
||||
(string) Identifier for the model snapshot.
|
||||
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see {xpack-ref}/security-privileges.html[Security Privileges].
|
||||
|
||||
|
||||
==== Examples
|
||||
[[ml-delete-snapshot-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
The following example deletes the `1491948163` snapshot:
|
||||
|
||||
|
|
|
@ -11,12 +11,20 @@ experimental[]
|
|||
Finds the structure of a text file. The text file must contain data that is
|
||||
suitable to be ingested into {es}.
|
||||
|
||||
==== Request
|
||||
[[ml-find-file-structure-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`POST _ml/find_file_structure`
|
||||
|
||||
[[ml-find-file-structure-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
==== Description
|
||||
* If the {es} {security-features} are enabled, you must have `monitor_ml` or
|
||||
`monitor` cluster privileges to use this API. See
|
||||
{stack-ov}/security-privileges.html[Security privileges].
|
||||
|
||||
[[ml-find-file-structure-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
This API provides a starting point for ingesting data into {es} in a format that
|
||||
is suitable for subsequent use with other {ml} functionality.
|
||||
|
@ -47,38 +55,39 @@ specify the `explain` query parameter. It causes an `explanation` to appear in
|
|||
the response, which should help in determining why the returned structure was
|
||||
chosen.
|
||||
|
||||
==== Query Parameters
|
||||
[[ml-find-file-structure-query-parms]]
|
||||
==== {api-query-parms-title}
|
||||
|
||||
`charset`::
|
||||
`charset` (Optional)::
|
||||
(string) The file's character set. It must be a character set that is supported
|
||||
by the JVM that {es} uses. For example, `UTF-8`, `UTF-16LE`, `windows-1252`, or
|
||||
`EUC-JP`. If this parameter is not specified, the structure finder chooses an
|
||||
appropriate character set.
|
||||
|
||||
`column_names`::
|
||||
`column_names` (Optional)::
|
||||
(string) If you have set `format` to `delimited`, you can specify the column names
|
||||
in a comma-separated list. If this parameter is not specified, the structure
|
||||
finder uses the column names from the header row of the file. If the file does
|
||||
not have a header role, columns are named "column1", "column2", "column3", etc.
|
||||
|
||||
`delimiter`::
|
||||
`delimiter` (Optional)::
|
||||
(string) If you have set `format` to `delimited`, you can specify the character used
|
||||
to delimit the values in each row. Only a single character is supported; the
|
||||
delimiter cannot have multiple characters. If this parameter is not specified,
|
||||
the structure finder considers the following possibilities: comma, tab,
|
||||
semi-colon, and pipe (`|`).
|
||||
|
||||
`explain`::
|
||||
`explain` (Optional)::
|
||||
(boolean) If this parameter is set to `true`, the response includes a field
|
||||
named `explanation`, which is an array of strings that indicate how the
|
||||
structure finder produced its result. The default value is `false`.
|
||||
|
||||
`format`::
|
||||
`format` (Optional)::
|
||||
(string) The high level structure of the file. Valid values are `ndjson`, `xml`,
|
||||
`delimited`, and `semi_structured_text`. If this parameter is not specified,
|
||||
the structure finder chooses one.
|
||||
|
||||
`grok_pattern`::
|
||||
`grok_pattern` (Optional)::
|
||||
(string) If you have set `format` to `semi_structured_text`, you can specify a Grok
|
||||
pattern that is used to extract fields from every message in the file. The
|
||||
name of the timestamp field in the Grok pattern must match what is specified
|
||||
|
@ -86,20 +95,20 @@ chosen.
|
|||
name of the timestamp field in the Grok pattern must match "timestamp". If
|
||||
`grok_pattern` is not specified, the structure finder creates a Grok pattern.
|
||||
|
||||
`has_header_row`::
|
||||
`has_header_row` (Optional)::
|
||||
(boolean) If you have set `format` to `delimited`, you can use this parameter to
|
||||
indicate whether the column names are in the first row of the file. If this
|
||||
parameter is not specified, the structure finder guesses based on the similarity of
|
||||
the first row of the file to other rows.
|
||||
|
||||
`line_merge_size_limit`::
|
||||
`line_merge_size_limit` (Optional)::
|
||||
(unsigned integer) The maximum number of characters in a message when lines are
|
||||
merged to form messages while analyzing semi-structured files. The default
|
||||
is 10000. If you have extremely long messages you may need to increase this, but
|
||||
be aware that this may lead to very long processing times if the way to group
|
||||
lines into messages is misdetected.
|
||||
|
||||
`lines_to_sample`::
|
||||
`lines_to_sample` (Optional)::
|
||||
(unsigned integer) The number of lines to include in the structural analysis,
|
||||
starting from the beginning of the file. The minimum is 2; the default
|
||||
is 1000. If the value of this parameter is greater than the number of lines in
|
||||
|
@ -115,7 +124,7 @@ efficient to upload a sample file with more variety in the first 1000 lines than
|
|||
to request analysis of 100000 lines to achieve some variety.
|
||||
--
|
||||
|
||||
`quote`::
|
||||
`quote` (Optional)::
|
||||
(string) If you have set `format` to `delimited`, you can specify the character used
|
||||
to quote the values in each row if they contain newlines or the delimiter
|
||||
character. Only a single character is supported. If this parameter is not
|
||||
|
@ -123,18 +132,18 @@ to request analysis of 100000 lines to achieve some variety.
|
|||
format does not use quoting, a workaround is to set this argument to a
|
||||
character that does not appear anywhere in the sample.
|
||||
|
||||
`should_trim_fields`::
|
||||
`should_trim_fields` (Optional)::
|
||||
(boolean) If you have set `format` to `delimited`, you can specify whether values
|
||||
between delimiters should have whitespace trimmed from them. If this parameter
|
||||
is not specified and the delimiter is pipe (`|`), the default value is `true`.
|
||||
Otherwise, the default value is `false`.
|
||||
|
||||
`timeout`::
|
||||
`timeout` (Optional)::
|
||||
(time) Sets the maximum amount of time that the structure analysis make take.
|
||||
If the analysis is still running when the timeout expires then it will be
|
||||
aborted. The default value is 25 seconds.
|
||||
|
||||
`timestamp_field`::
|
||||
`timestamp_field` (Optional)::
|
||||
(string) The name of the field that contains the primary timestamp of each
|
||||
record in the file. In particular, if the file were ingested into an index,
|
||||
this is the field that would be used to populate the `@timestamp` field. +
|
||||
|
@ -153,7 +162,7 @@ field (if any) is the primary timestamp field. For structured file formats, it
|
|||
is not compulsory to have a timestamp in the file.
|
||||
--
|
||||
|
||||
`timestamp_format`::
|
||||
`timestamp_format` (Optional)::
|
||||
(string) The Java time format of the timestamp field in the file. +
|
||||
+
|
||||
--
|
||||
|
@ -197,22 +206,16 @@ format from a built-in set.
|
|||
|
||||
--
|
||||
|
||||
==== Request Body
|
||||
[[ml-find-file-structure-request-body]]
|
||||
==== {api-request-body-title}
|
||||
|
||||
The text file that you want to analyze. It must contain data that is suitable to
|
||||
be ingested into {es}. It does not need to be in JSON format and it does not
|
||||
need to be UTF-8 encoded. The size is limited to the {es} HTTP receive buffer
|
||||
size, which defaults to 100 Mb.
|
||||
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `monitor_ml`, or `monitor` cluster privileges to use this API.
|
||||
For more information, see {stack-ov}/security-privileges.html[Security Privileges].
|
||||
|
||||
|
||||
[[ml-find-file-structure-examples]]
|
||||
==== Examples
|
||||
==== {api-examples-title}
|
||||
|
||||
Suppose you have a newline-delimited JSON file that contains information about
|
||||
some books. You can send the contents to the `find_file_structure` endpoint:
|
||||
|
|
|
@ -8,13 +8,20 @@
|
|||
|
||||
Forces any buffered data to be processed by the job.
|
||||
|
||||
|
||||
==== Request
|
||||
[[ml-flush-job-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`POST _ml/anomaly_detectors/<job_id>/_flush`
|
||||
|
||||
[[ml-flush-job-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
==== Description
|
||||
* If the {es} {security-features} are enabled, you must have `manage_ml` or
|
||||
`manage` cluster privileges to use this API. See
|
||||
{stack-ov}/security-privileges.html[Security privileges].
|
||||
|
||||
[[ml-flush-job-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
The flush jobs API is only applicable when sending data for analysis using the
|
||||
<<ml-post-data,post data API>>. Depending on the content of the buffer, then it
|
||||
|
@ -26,44 +33,38 @@ remains open and is available to continue analyzing data. A close operation
|
|||
additionally prunes and persists the model state to disk and the job must be
|
||||
opened again before analyzing further data.
|
||||
|
||||
[[ml-flush-job-path-parms]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
==== Path Parameters
|
||||
`<job_id>` (Required)::
|
||||
(string) Identifier for the job.
|
||||
|
||||
`job_id` (required)::
|
||||
(string) Identifier for the job
|
||||
[[ml-flush-job-query-parms]]
|
||||
==== {api-query-parms-title}
|
||||
|
||||
|
||||
==== Query Parameters
|
||||
|
||||
`advance_time`::
|
||||
`advance_time` (Optional)::
|
||||
(string) Specifies to advance to a particular time value. Results are
|
||||
generated and the model is updated for data from the specified time interval.
|
||||
|
||||
`calc_interim`::
|
||||
`calc_interim` (Optional)::
|
||||
(boolean) If true, calculates the interim results for the most recent bucket
|
||||
or all buckets within the latency period.
|
||||
|
||||
`end`::
|
||||
`end` (Optional)::
|
||||
(string) When used in conjunction with `calc_interim`, specifies the range
|
||||
of buckets on which to calculate interim results.
|
||||
|
||||
`skip_time`::
|
||||
`skip_time` (Optional)::
|
||||
(string) Specifies to skip to a particular time value. Results are not
|
||||
generated and the model is not updated for data from the specified time
|
||||
interval.
|
||||
|
||||
`start`::
|
||||
`start` (Optional)::
|
||||
(string) When used in conjunction with `calc_interim`, specifies the range of
|
||||
buckets on which to calculate interim results.
|
||||
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see {xpack-ref}/security-privileges.html[Security Privileges].
|
||||
|
||||
|
||||
==== Examples
|
||||
[[ml-flush-job-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
The following example flushes the `total-requests` job:
|
||||
|
||||
|
|
|
@ -8,14 +8,22 @@
|
|||
|
||||
Predicts the future behavior of a time series by using its historical behavior.
|
||||
|
||||
==== Request
|
||||
[[ml-forecast-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`POST _ml/anomaly_detectors/<job_id>/_forecast`
|
||||
|
||||
[[ml-forecast-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
==== Description
|
||||
* If the {es} {security-features} are enabled, you must have `manage_ml` or
|
||||
`manage` cluster privileges to use this API. See
|
||||
{stack-ov}/security-privileges.html[Security privileges].
|
||||
|
||||
See {xpack-ref}/ml-overview.html#ml-forecasting[Forecasting the Future].
|
||||
[[ml-forecast-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
See {stack-ov}/ml-overview.html#ml-forecasting[Forecasting the future].
|
||||
|
||||
[NOTE]
|
||||
===============================
|
||||
|
@ -25,33 +33,29 @@ forecast. For more information about this property, see <<ml-job-resource>>.
|
|||
* The job must be open when you create a forecast. Otherwise, an error occurs.
|
||||
===============================
|
||||
|
||||
==== Path Parameters
|
||||
[[ml-forecast-path-parms]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
`job_id`::
|
||||
`<job_id>` (Required)::
|
||||
(string) Identifier for the job.
|
||||
|
||||
[[ml-forecast-request-body]]
|
||||
==== {api-request-body-title}
|
||||
|
||||
==== Request Parameters
|
||||
|
||||
`duration`::
|
||||
`duration` (Optional)::
|
||||
(time units) A period of time that indicates how far into the future to
|
||||
forecast. For example, `30d` corresponds to 30 days. The default value is 1
|
||||
day. The forecast starts at the last record that was processed. For more
|
||||
information about time units, see <<time-units>>.
|
||||
|
||||
`expires_in`::
|
||||
`expires_in` (Optional)::
|
||||
(time units) The period of time that forecast results are retained.
|
||||
After a forecast expires, the results are deleted. The default value is 14 days.
|
||||
If set to a value of `0`, the forecast is never automatically deleted.
|
||||
For more information about time units, see <<time-units>>.
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see {xpack-ref}/security-privileges.html[Security Privileges].
|
||||
|
||||
|
||||
==== Examples
|
||||
[[ml-forecast-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
The following example requests a 10 day forecast for the `total-requests` job:
|
||||
|
||||
|
|
|
@ -8,64 +8,75 @@
|
|||
|
||||
Retrieves job results for one or more buckets.
|
||||
|
||||
|
||||
==== Request
|
||||
[[ml-get-bucket-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`GET _ml/anomaly_detectors/<job_id>/results/buckets` +
|
||||
|
||||
`GET _ml/anomaly_detectors/<job_id>/results/buckets/<timestamp>`
|
||||
|
||||
[[ml-get-bucket-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
==== Description
|
||||
* If the {es} {security-features} are enabled, you must have `monitor_ml`,
|
||||
`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. You also
|
||||
need `read` index privilege on the index that stores the results. The
|
||||
`machine_learning_admin` and `machine_learning_user` roles provide these
|
||||
privileges. For more information, see
|
||||
{stack-ov}/security-privileges.html[Security privileges] and
|
||||
{stack-ov}/built-in-roles.html[Built-in roles].
|
||||
|
||||
[[ml-get-bucket-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
The get buckets API presents a chronological view of the records, grouped by
|
||||
bucket.
|
||||
|
||||
[[ml-get-bucket-path-parms]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
==== Path Parameters
|
||||
|
||||
`job_id`::
|
||||
`<job_id>` (Required)::
|
||||
(string) Identifier for the job
|
||||
|
||||
`timestamp`::
|
||||
`<timestamp>` (Optional)::
|
||||
(string) The timestamp of a single bucket result.
|
||||
If you do not specify this optional parameter, the API returns information
|
||||
If you do not specify this parameter, the API returns information
|
||||
about all buckets.
|
||||
|
||||
[[ml-get-bucket-request-body]]
|
||||
==== {api-request-body-title}
|
||||
|
||||
==== Request Body
|
||||
|
||||
`anomaly_score`::
|
||||
`anomaly_score` (Optional)::
|
||||
(double) Returns buckets with anomaly scores greater or equal than this value.
|
||||
|
||||
`desc`::
|
||||
`desc` (Optional)::
|
||||
(boolean) If true, the buckets are sorted in descending order.
|
||||
|
||||
`end`::
|
||||
`end` (Optional)::
|
||||
(string) Returns buckets with timestamps earlier than this time.
|
||||
|
||||
`exclude_interim`::
|
||||
`exclude_interim` (Optional)::
|
||||
(boolean) If true, the output excludes interim results.
|
||||
By default, interim results are included.
|
||||
|
||||
`expand`::
|
||||
`expand` (Optional)::
|
||||
(boolean) If true, the output includes anomaly records.
|
||||
|
||||
`page`::
|
||||
`page` (Optional)::
|
||||
`from`:::
|
||||
(integer) Skips the specified number of buckets.
|
||||
`size`:::
|
||||
(integer) Specifies the maximum number of buckets to obtain.
|
||||
|
||||
`sort`::
|
||||
`sort` (Optional)::
|
||||
(string) Specifies the sort field for the requested buckets.
|
||||
By default, the buckets are sorted by the `timestamp` field.
|
||||
|
||||
`start`::
|
||||
`start` (Optional)::
|
||||
(string) Returns buckets with timestamps after this time.
|
||||
|
||||
|
||||
===== Results
|
||||
[[ml-get-bucket-results]]
|
||||
==== {api-response-body-title}
|
||||
|
||||
The API returns the following information:
|
||||
|
||||
|
@ -73,18 +84,8 @@ The API returns the following information:
|
|||
(array) An array of bucket objects. For more information, see
|
||||
<<ml-results-buckets,Buckets>>.
|
||||
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster
|
||||
privileges to use this API. You also need `read` index privilege on the index
|
||||
that stores the results. The `machine_learning_admin` and `machine_learning_user`
|
||||
roles provide these privileges. For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges] and
|
||||
{xpack-ref}/built-in-roles.html[Built-in Roles].
|
||||
|
||||
|
||||
==== Examples
|
||||
[[ml-get-bucket-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
The following example gets bucket information for the `it-ops-kpi` job:
|
||||
|
||||
|
|
|
@ -9,39 +9,49 @@
|
|||
Retrieves information about the scheduled events in
|
||||
calendars.
|
||||
|
||||
|
||||
==== Request
|
||||
[[ml-get-calendar-event-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`GET _ml/calendars/<calendar_id>/events` +
|
||||
|
||||
`GET _ml/calendars/_all/events`
|
||||
|
||||
[[ml-get-calendar-event-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
===== Description
|
||||
* If the {es} {security-features} are enabled, you must have `monitor_ml`,
|
||||
`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. See
|
||||
{stack-ov}/security-privileges.html[Security privileges].
|
||||
|
||||
[[ml-get-calendar-event-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
You can get scheduled event information for a single calendar or for all
|
||||
calendars by using `_all`.
|
||||
|
||||
==== Path Parameters
|
||||
[[ml-get-calendar-event-path-parms]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
`calendar_id` (required)::
|
||||
`<calendar_id>` (Required)::
|
||||
(string) Identifier for the calendar.
|
||||
|
||||
==== Request Body
|
||||
[[ml-get-calendar-event-request-body]]
|
||||
==== {api-request-body-title}
|
||||
|
||||
`end`::
|
||||
`end` (Optional)::
|
||||
(string) Specifies to get events with timestamps earlier than this time.
|
||||
|
||||
`from`::
|
||||
`from` (Optional)::
|
||||
(integer) Skips the specified number of events.
|
||||
|
||||
`size`::
|
||||
`size` (Optional)::
|
||||
(integer) Specifies the maximum number of events to obtain.
|
||||
|
||||
`start`::
|
||||
`start` (Optional)::
|
||||
(string) Specifies to get events with timestamps after this time.
|
||||
|
||||
==== Results
|
||||
[[ml-get-calendar-event-results]]
|
||||
==== {api-response-body-title}
|
||||
|
||||
The API returns the following information:
|
||||
|
||||
|
@ -49,15 +59,8 @@ The API returns the following information:
|
|||
(array) An array of scheduled event resources.
|
||||
For more information, see <<ml-event-resource>>.
|
||||
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster
|
||||
privileges to use this API. For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||
|
||||
|
||||
==== Examples
|
||||
[[ml-get-calendar-event-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
The following example gets information about the scheduled events in the
|
||||
`planned-outages` calendar:
|
||||
|
|
|
@ -8,37 +8,44 @@
|
|||
|
||||
Retrieves configuration information for calendars.
|
||||
|
||||
|
||||
==== Request
|
||||
[[ml-get-calendar-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`GET _ml/calendars/<calendar_id>` +
|
||||
|
||||
`GET _ml/calendars/_all`
|
||||
|
||||
[[ml-get-calendar-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
===== Description
|
||||
* If the {es} {security-features} are enabled, you must have `monitor_ml`,
|
||||
`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. See
|
||||
{stack-ov}/security-privileges.html[Security privileges].
|
||||
|
||||
[[ml-get-calendar-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
You can get information for a single calendar or for all calendars by using
|
||||
`_all`.
|
||||
|
||||
[[ml-get-calendar-path-parms]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
==== Path Parameters
|
||||
|
||||
`calendar_id`::
|
||||
`<calendar_id>` (Required)::
|
||||
(string) Identifier for the calendar.
|
||||
|
||||
[[ml-get-calendar-request-body]]
|
||||
==== {api-request-body-title}
|
||||
|
||||
==== Request Body
|
||||
|
||||
`page`::
|
||||
`page` (Optional)::
|
||||
`from`:::
|
||||
(integer) Skips the specified number of calendars.
|
||||
|
||||
`size`:::
|
||||
`size` (Optional):::
|
||||
(integer) Specifies the maximum number of calendars to obtain.
|
||||
|
||||
|
||||
==== Results
|
||||
[[ml-get-calendar-results]]
|
||||
==== {api-response-body-title}
|
||||
|
||||
The API returns the following information:
|
||||
|
||||
|
@ -46,15 +53,8 @@ The API returns the following information:
|
|||
(array) An array of calendar resources.
|
||||
For more information, see <<ml-calendar-resource>>.
|
||||
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster
|
||||
privileges to use this API. For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||
|
||||
|
||||
==== Examples
|
||||
[[ml-get-calendar-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
The following example gets configuration information for the `planned-outages`
|
||||
calendar:
|
||||
|
|
|
@ -8,38 +8,50 @@
|
|||
|
||||
Retrieves job results for one or more categories.
|
||||
|
||||
|
||||
==== Request
|
||||
[[ml-get-category-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`GET _ml/anomaly_detectors/<job_id>/results/categories` +
|
||||
|
||||
`GET _ml/anomaly_detectors/<job_id>/results/categories/<category_id>`
|
||||
|
||||
==== Description
|
||||
[[ml-get-category-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
* If the {es} {security-features} are enabled, you must have `monitor_ml`,
|
||||
`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. You also
|
||||
need `read` index privilege on the index that stores the results. The
|
||||
`machine_learning_admin` and `machine_learning_user` roles provide these
|
||||
privileges. See {stack-ov}/security-privileges.html[Security privileges] and
|
||||
{stack-ov}/built-in-roles.html[Built-in roles].
|
||||
|
||||
[[ml-get-category-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
For more information about categories, see
|
||||
{xpack-ref}/ml-configuring-categories.html[Categorizing Log Messages].
|
||||
{stack-ov}/ml-configuring-categories.html[Categorizing log messages].
|
||||
|
||||
==== Path Parameters
|
||||
[[ml-get-category-path-parms]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
`job_id`::
|
||||
`<job_id>` (Required)::
|
||||
(string) Identifier for the job.
|
||||
|
||||
`category_id`::
|
||||
(long) Identifier for the category. If you do not specify this optional parameter,
|
||||
`<category_id>` (Optional)::
|
||||
(long) Identifier for the category. If you do not specify this parameter,
|
||||
the API returns information about all categories in the job.
|
||||
|
||||
[[ml-get-category-request-body]]
|
||||
==== {api-request-body-title}
|
||||
|
||||
==== Request Body
|
||||
|
||||
`page`::
|
||||
`page` (Optional)::
|
||||
`from`:::
|
||||
(integer) Skips the specified number of categories.
|
||||
`size`:::
|
||||
(integer) Specifies the maximum number of categories to obtain.
|
||||
|
||||
|
||||
==== Results
|
||||
[[ml-get-category-results]]
|
||||
==== {api-response-body-title}
|
||||
|
||||
The API returns the following information:
|
||||
|
||||
|
@ -47,18 +59,8 @@ The API returns the following information:
|
|||
(array) An array of category objects. For more information, see
|
||||
<<ml-results-categories,Categories>>.
|
||||
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster
|
||||
privileges to use this API. You also need `read` index privilege on the index
|
||||
that stores the results. The `machine_learning_admin` and `machine_learning_user`
|
||||
roles provide these privileges. For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges] and
|
||||
{xpack-ref}/built-in-roles.html[Built-in Roles].
|
||||
|
||||
|
||||
==== Examples
|
||||
[[ml-get-category-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
The following example gets information about one category for the
|
||||
`esxi_log` job:
|
||||
|
|
|
@ -10,9 +10,8 @@
|
|||
|
||||
Retrieves usage information for {dfeeds}.
|
||||
|
||||
|
||||
==== Request
|
||||
|
||||
[[ml-get-datafeed-stats-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`GET _ml/datafeeds/<feed_id>/_stats` +
|
||||
|
||||
|
@ -20,11 +19,17 @@ Retrieves usage information for {dfeeds}.
|
|||
|
||||
`GET _ml/datafeeds/_stats` +
|
||||
|
||||
`GET _ml/datafeeds/_all/_stats` +
|
||||
`GET _ml/datafeeds/_all/_stats`
|
||||
|
||||
[[ml-get-datafeed-stats-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
* If the {es} {security-features} are enabled, you must have `monitor_ml`,
|
||||
`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. See
|
||||
{stack-ov}/security-privileges.html[Security privileges].
|
||||
|
||||
==== Description
|
||||
[[ml-get-datafeed-stats-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
You can get statistics for multiple {dfeeds} in a single API request by using a
|
||||
comma-separated list of {dfeeds} or a wildcard expression. You can get
|
||||
|
@ -36,15 +41,16 @@ If the {dfeed} is stopped, the only information you receive is the
|
|||
|
||||
IMPORTANT: This API returns a maximum of 10,000 {dfeeds}.
|
||||
|
||||
==== Path Parameters
|
||||
[[ml-get-datafeed-stats-path-parms]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
`feed_id`::
|
||||
`<feed_id>` (Optional)::
|
||||
(string) Identifier for the {dfeed}. It can be a {dfeed} identifier or a
|
||||
wildcard expression. If you do not specify one of these options, the API
|
||||
returns statistics for all {dfeeds}.
|
||||
|
||||
|
||||
==== Results
|
||||
[[ml-get-datafeed-stats-results]]
|
||||
==== {api-response-body-title}
|
||||
|
||||
The API returns the following information:
|
||||
|
||||
|
@ -52,15 +58,8 @@ The API returns the following information:
|
|||
(array) An array of {dfeed} count objects.
|
||||
For more information, see <<ml-datafeed-counts>>.
|
||||
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster
|
||||
privileges to use this API. For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||
|
||||
|
||||
==== Examples
|
||||
[[ml-get-datafeed-stats-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
The following example gets usage information for the
|
||||
`datafeed-total-requests` {dfeed}:
|
||||
|
|
|
@ -10,8 +10,8 @@
|
|||
|
||||
Retrieves configuration information for {dfeeds}.
|
||||
|
||||
==== Request
|
||||
|
||||
[[ml-get-datafeed-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`GET _ml/datafeeds/<feed_id>` +
|
||||
|
||||
|
@ -19,10 +19,17 @@ Retrieves configuration information for {dfeeds}.
|
|||
|
||||
`GET _ml/datafeeds/` +
|
||||
|
||||
`GET _ml/datafeeds/_all` +
|
||||
`GET _ml/datafeeds/_all`
|
||||
|
||||
[[ml-get-datafeed-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
===== Description
|
||||
* If the {es} {security-features} are enabled, you must have `monitor_ml`,
|
||||
`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. See
|
||||
{stack-ov}/security-privileges.html[Security privileges].
|
||||
|
||||
[[ml-get-datafeed-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
You can get information for multiple {dfeeds} in a single API request by using a
|
||||
comma-separated list of {dfeeds} or a wildcard expression. You can get
|
||||
|
@ -31,15 +38,16 @@ information for all {dfeeds} by using `_all`, by specifying `*` as the
|
|||
|
||||
IMPORTANT: This API returns a maximum of 10,000 {dfeeds}.
|
||||
|
||||
==== Path Parameters
|
||||
[[ml-get-datafeed-path-parms]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
`feed_id`::
|
||||
`<feed_id>` (Optional)::
|
||||
(string) Identifier for the {dfeed}. It can be a {dfeed} identifier or a
|
||||
wildcard expression. If you do not specify one of these options, the API
|
||||
returns information about all {dfeeds}.
|
||||
|
||||
|
||||
==== Results
|
||||
[[ml-get-datafeed-results]]
|
||||
==== {api-response-body-title}
|
||||
|
||||
The API returns the following information:
|
||||
|
||||
|
@ -47,15 +55,8 @@ The API returns the following information:
|
|||
(array) An array of {dfeed} objects.
|
||||
For more information, see <<ml-datafeed-resource>>.
|
||||
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster
|
||||
privileges to use this API. For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||
|
||||
|
||||
==== Examples
|
||||
[[ml-get-datafeed-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
The following example gets configuration information for the
|
||||
`datafeed-total-requests` {dfeed}:
|
||||
|
|
|
@ -8,36 +8,43 @@
|
|||
|
||||
Retrieves filters.
|
||||
|
||||
|
||||
==== Request
|
||||
[[ml-get-filter-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`GET _ml/filters/<filter_id>` +
|
||||
|
||||
`GET _ml/filters/`
|
||||
|
||||
[[ml-get-filter-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
===== Description
|
||||
* If the {es} {security-features} are enabled, you must have `monitor_ml`,
|
||||
`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. See
|
||||
{stack-ov}/security-privileges.html[Security privileges].
|
||||
|
||||
[[ml-get-filter-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
You can get a single filter or all filters. For more information, see
|
||||
{stack-ov}/ml-rules.html[Machine learning custom rules].
|
||||
|
||||
[[ml-get-filter-path-parms]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
==== Path Parameters
|
||||
|
||||
`filter_id`::
|
||||
`<filter_id>` (Optional)::
|
||||
(string) Identifier for the filter.
|
||||
|
||||
[[ml-get-filter-query-parms]]
|
||||
==== {api-query-parms-title}
|
||||
|
||||
==== Request Body
|
||||
|
||||
`from`:::
|
||||
`from` (Optional):::
|
||||
(integer) Skips the specified number of filters.
|
||||
|
||||
`size`:::
|
||||
`size` (Optional):::
|
||||
(integer) Specifies the maximum number of filters to obtain.
|
||||
|
||||
|
||||
==== Results
|
||||
[[ml-get-filter-results]]
|
||||
==== {api-response-body-title}
|
||||
|
||||
The API returns the following information:
|
||||
|
||||
|
@ -45,15 +52,8 @@ The API returns the following information:
|
|||
(array) An array of filter resources.
|
||||
For more information, see <<ml-filter-resource>>.
|
||||
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster
|
||||
privileges to use this API. For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||
|
||||
|
||||
==== Examples
|
||||
[[ml-get-filter-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
The following example gets configuration information for the `safe_domains`
|
||||
filter:
|
||||
|
|
|
@ -8,48 +8,58 @@
|
|||
|
||||
Retrieves job results for one or more influencers.
|
||||
|
||||
|
||||
==== Request
|
||||
[[ml-get-influencer-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`GET _ml/anomaly_detectors/<job_id>/results/influencers`
|
||||
|
||||
//===== Description
|
||||
[[ml-get-influencer-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
==== Path Parameters
|
||||
* If the {es} {security-features} are enabled, you must have `monitor_ml`,
|
||||
`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. You also
|
||||
need `read` index privilege on the index that stores the results. The
|
||||
`machine_learning_admin` and `machine_learning_user` roles provide these
|
||||
privileges. See {stack-ov}/security-privileges.html[Security privileges] and
|
||||
{stack-ov}/built-in-roles.html[Built-in roles].
|
||||
|
||||
`job_id`::
|
||||
[[ml-get-influencer-path-parms]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
`<job_id>` (Required)::
|
||||
(string) Identifier for the job.
|
||||
|
||||
==== Request Body
|
||||
[[ml-get-influencer-request-body]]
|
||||
==== {api-request-body-title}
|
||||
|
||||
`desc`::
|
||||
`desc` (Optional)::
|
||||
(boolean) If true, the results are sorted in descending order.
|
||||
|
||||
`end`::
|
||||
`end` (Optional)::
|
||||
(string) Returns influencers with timestamps earlier than this time.
|
||||
|
||||
`exclude_interim`::
|
||||
`exclude_interim` (Optional)::
|
||||
(boolean) If true, the output excludes interim results.
|
||||
By default, interim results are included.
|
||||
|
||||
`influencer_score`::
|
||||
`influencer_score` (Optional)::
|
||||
(double) Returns influencers with anomaly scores greater or equal than this value.
|
||||
|
||||
`page`::
|
||||
`page` (Optional)::
|
||||
`from`:::
|
||||
(integer) Skips the specified number of influencers.
|
||||
`size`:::
|
||||
(integer) Specifies the maximum number of influencers to obtain.
|
||||
|
||||
`sort`::
|
||||
`sort` (Optional)::
|
||||
(string) Specifies the sort field for the requested influencers.
|
||||
By default the influencers are sorted by the `influencer_score` value.
|
||||
|
||||
`start`::
|
||||
`start` (Optional)::
|
||||
(string) Returns influencers with timestamps after this time.
|
||||
|
||||
|
||||
==== Results
|
||||
[[ml-get-influencer-results]]
|
||||
==== {api-response-body-title}
|
||||
|
||||
The API returns the following information:
|
||||
|
||||
|
@ -57,19 +67,8 @@ The API returns the following information:
|
|||
(array) An array of influencer objects.
|
||||
For more information, see <<ml-results-influencers,Influencers>>.
|
||||
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster
|
||||
privileges to use this API. You also need `read` index privilege on the index
|
||||
that stores the results. The `machine_learning_admin` and `machine_learning_user`
|
||||
roles provide these privileges. For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges] and
|
||||
{xpack-ref}/built-in-roles.html[Built-in Roles].
|
||||
//<<security-privileges>> and <<built-in-roles>>.
|
||||
|
||||
|
||||
==== Examples
|
||||
[[ml-get-influencer-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
The following example gets influencer information for the `it_ops_new_kpi` job:
|
||||
|
||||
|
|
|
@ -8,10 +8,8 @@
|
|||
|
||||
Retrieves usage information for jobs.
|
||||
|
||||
|
||||
==== Request
|
||||
|
||||
|
||||
[[ml-get-job-stats-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`GET _ml/anomaly_detectors/<job_id>/_stats`
|
||||
|
||||
|
@ -19,10 +17,17 @@ Retrieves usage information for jobs.
|
|||
|
||||
`GET _ml/anomaly_detectors/_stats` +
|
||||
|
||||
`GET _ml/anomaly_detectors/_all/_stats` +
|
||||
`GET _ml/anomaly_detectors/_all/_stats`
|
||||
|
||||
[[ml-get-job-stats-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
===== Description
|
||||
* If the {es} {security-features} are enabled, you must have `monitor_ml`,
|
||||
`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. See
|
||||
{stack-ov}/security-privileges.html[Security privileges].
|
||||
|
||||
[[ml-get-job-stats-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
You can get statistics for multiple jobs in a single API request by using a
|
||||
group name, a comma-separated list of jobs, or a wildcard expression. You can
|
||||
|
@ -31,16 +36,16 @@ get statistics for all jobs by using `_all`, by specifying `*` as the
|
|||
|
||||
IMPORTANT: This API returns a maximum of 10,000 jobs.
|
||||
|
||||
[[ml-get-job-stats-path-parms]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
==== Path Parameters
|
||||
|
||||
`job_id`::
|
||||
`<job_id>` (Optional)::
|
||||
(string) An identifier for the job. It can be a job identifier, a group name,
|
||||
or a wildcard expression. If you do not specify one of these options, the API
|
||||
returns statistics for all jobs.
|
||||
|
||||
|
||||
==== Results
|
||||
[[ml-get-job-stats-results]]
|
||||
==== {api-response-body-title}
|
||||
|
||||
The API returns the following information:
|
||||
|
||||
|
@ -48,15 +53,8 @@ The API returns the following information:
|
|||
(array) An array of job statistics objects.
|
||||
For more information, see <<ml-jobstats,Job Statistics>>.
|
||||
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster
|
||||
privileges to use this API. For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||
|
||||
|
||||
==== Examples
|
||||
[[ml-get-job-stats-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
The following example gets usage information for the `farequote` job:
|
||||
|
||||
|
|
|
@ -8,8 +8,8 @@
|
|||
|
||||
Retrieves configuration information for jobs.
|
||||
|
||||
|
||||
==== Request
|
||||
[[ml-get-job-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`GET _ml/anomaly_detectors/<job_id>` +
|
||||
|
||||
|
@ -19,8 +19,15 @@ Retrieves configuration information for jobs.
|
|||
|
||||
`GET _ml/anomaly_detectors/_all`
|
||||
|
||||
[[ml-get-job-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
===== Description
|
||||
* If the {es} {security-features} are enabled, you must have `monitor_ml`,
|
||||
`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. See
|
||||
{stack-ov}/security-privileges.html[Security privileges].
|
||||
|
||||
[[ml-get-job-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
You can get information for multiple jobs in a single API request by using a
|
||||
group name, a comma-separated list of jobs, or a wildcard expression. You can
|
||||
|
@ -29,15 +36,16 @@ get information for all jobs by using `_all`, by specifying `*` as the
|
|||
|
||||
IMPORTANT: This API returns a maximum of 10,000 jobs.
|
||||
|
||||
[[ml-get-job-path-parms]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
==== Path Parameters
|
||||
|
||||
`job_id`::
|
||||
`<job_id> (Optional)`::
|
||||
(string) Identifier for the job. It can be a job identifier, a group name,
|
||||
or a wildcard expression. If you do not specify one of these options, the API
|
||||
returns information for all jobs.
|
||||
|
||||
==== Results
|
||||
[[ml-get-job-results]]
|
||||
==== {api-response-body-title}
|
||||
|
||||
The API returns the following information:
|
||||
|
||||
|
@ -45,15 +53,8 @@ The API returns the following information:
|
|||
(array) An array of job resources.
|
||||
For more information, see <<ml-job-resource,Job Resources>>.
|
||||
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster
|
||||
privileges to use this API. For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||
|
||||
|
||||
==== Examples
|
||||
[[ml-get-job-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
The following example gets configuration information for the `total-requests` job:
|
||||
|
||||
|
|
|
@ -10,28 +10,30 @@
|
|||
|
||||
Returns defaults and limits used by machine learning.
|
||||
|
||||
==== Request
|
||||
[[get-ml-info-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`GET _ml/info`
|
||||
|
||||
==== Description
|
||||
[[get-ml-info-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
* If the {es} {security-features} are enabled, you must have `monitor_ml`,
|
||||
`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. The
|
||||
`machine_learning_admin` and `machine_learning_user` roles provide these
|
||||
privileges. See {stack-ov}/security-privileges.html[Security privileges] and
|
||||
{stack-ov}/built-in-roles.html[Built-in roles].
|
||||
|
||||
[[get-ml-info-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
This endpoint is designed to be used by a user interface that needs to fully
|
||||
understand machine learning configurations where some options are not specified,
|
||||
meaning that the defaults should be used. This endpoint may be used to find out
|
||||
what those defaults are.
|
||||
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster
|
||||
privileges to use this API. The `machine_learning_admin` and `machine_learning_user`
|
||||
roles provide these privileges. For more information, see
|
||||
{stack-ov}/security-privileges.html[Security Privileges] and
|
||||
{stack-ov}/built-in-roles.html[Built-in Roles].
|
||||
|
||||
|
||||
==== Examples
|
||||
[[get-ml-info-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
The endpoint takes no arguments:
|
||||
|
||||
|
|
|
@ -9,7 +9,8 @@
|
|||
Retrieves overall bucket results that summarize the
|
||||
bucket results of multiple jobs.
|
||||
|
||||
==== Request
|
||||
[[ml-get-overall-buckets-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`GET _ml/anomaly_detectors/<job_id>/results/overall_buckets` +
|
||||
|
||||
|
@ -17,7 +18,18 @@ bucket results of multiple jobs.
|
|||
|
||||
`GET _ml/anomaly_detectors/_all/results/overall_buckets`
|
||||
|
||||
==== Description
|
||||
[[ml-get-overall-buckets-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
* If the {es} {security-features} are enabled, you must have `monitor_ml`,
|
||||
`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. You also
|
||||
need `read` index privilege on the index that stores the results. The
|
||||
`machine_learning_admin` and `machine_learning_user` roles provide these
|
||||
privileges. See {stack-ov}/security-privileges.html[Security privileges] and
|
||||
{stack-ov}/built-in-roles.html[Built-in roles].
|
||||
|
||||
[[ml-get-overall-buckets-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
You can summarize the bucket results for all jobs by using `_all` or by
|
||||
specifying `*` as the `<job_id>`.
|
||||
|
@ -41,43 +53,46 @@ to request overall buckets that span longer than the largest job's `bucket_span`
|
|||
When set, the `overall_score` will be the max `overall_score` of the corresponding
|
||||
overall buckets with a span equal to the largest job's `bucket_span`.
|
||||
|
||||
==== Path Parameters
|
||||
[[ml-get-overall-buckets-path-parms]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
`job_id`::
|
||||
`<job_id>` (Required)::
|
||||
(string) Identifier for the job. It can be a job identifier, a group name, a
|
||||
comma-separated list of jobs or groups, or a wildcard expression.
|
||||
|
||||
==== Request Body
|
||||
[[ml-get-overall-buckets-request-body]]
|
||||
==== {api-request-body-title}
|
||||
|
||||
`allow_no_jobs`::
|
||||
`allow_no_jobs` (Optional)::
|
||||
(boolean) If `false` and the `job_id` does not match any job an error will
|
||||
be returned. The default value is `true`.
|
||||
|
||||
`bucket_span`::
|
||||
`bucket_span` (Optional)::
|
||||
(string) The span of the overall buckets. Must be greater or equal
|
||||
to the largest job's `bucket_span`. Defaults to the largest job's `bucket_span`.
|
||||
|
||||
`end`::
|
||||
`end` (Optional)::
|
||||
(string) Returns overall buckets with timestamps earlier than this time.
|
||||
|
||||
`exclude_interim`::
|
||||
`exclude_interim` (Optional)::
|
||||
(boolean) If `true`, the output excludes interim overall buckets.
|
||||
Overall buckets are interim if any of the job buckets within
|
||||
the overall bucket interval are interim.
|
||||
By default, interim results are included.
|
||||
|
||||
`overall_score`::
|
||||
(double) Returns overall buckets with overall scores greater or equal than this value.
|
||||
`overall_score` (Optional)::
|
||||
(double) Returns overall buckets with overall scores greater or equal than
|
||||
this value.
|
||||
|
||||
`start`::
|
||||
`start` (Optional)::
|
||||
(string) Returns overall buckets with timestamps after this time.
|
||||
|
||||
`top_n`::
|
||||
`top_n` (Optional)::
|
||||
(integer) The number of top job bucket scores to be used in the
|
||||
`overall_score` calculation. The default value is `1`.
|
||||
|
||||
|
||||
===== Results
|
||||
[[ml-get-overall-buckets-results]]
|
||||
==== {api-response-body-title}
|
||||
|
||||
The API returns the following information:
|
||||
|
||||
|
@ -85,18 +100,8 @@ The API returns the following information:
|
|||
(array) An array of overall bucket objects. For more information, see
|
||||
<<ml-results-overall-buckets,Overall Buckets>>.
|
||||
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster
|
||||
privileges to use this API. You also need `read` index privilege on the index
|
||||
that stores the results. The `machine_learning_admin` and `machine_learning_user`
|
||||
roles provide these privileges. For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges] and
|
||||
{xpack-ref}/built-in-roles.html[Built-in Roles].
|
||||
|
||||
|
||||
==== Examples
|
||||
[[ml-get-overall-buckets-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
The following example gets overall buckets for jobs with IDs matching `job-*`:
|
||||
|
||||
|
|
|
@ -8,49 +8,58 @@
|
|||
|
||||
Retrieves anomaly records for a job.
|
||||
|
||||
|
||||
==== Request
|
||||
[[ml-get-record-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`GET _ml/anomaly_detectors/<job_id>/results/records`
|
||||
|
||||
//===== Description
|
||||
[[ml-get-record-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
==== Path Parameters
|
||||
* If the {es} {security-features} are enabled, you must have `monitor_ml`,
|
||||
`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. You also
|
||||
need `read` index privilege on the index that stores the results. The
|
||||
`machine_learning_admin` and `machine_learning_user` roles provide these
|
||||
privileges. See {stack-ov}/security-privileges.html[Security privileges] and
|
||||
{stack-ov}/built-in-roles.html[Built-in roles].
|
||||
|
||||
`job_id`::
|
||||
[[ml-get-record-path-parms]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
`job_id` (Required)::
|
||||
(string) Identifier for the job.
|
||||
|
||||
[[ml-get-record-request-body]]
|
||||
==== {api-request-body-title}
|
||||
|
||||
==== Request Body
|
||||
|
||||
`desc`::
|
||||
`desc` (Optional)::
|
||||
(boolean) If true, the results are sorted in descending order.
|
||||
|
||||
`end`::
|
||||
`end` (Optional)::
|
||||
(string) Returns records with timestamps earlier than this time.
|
||||
|
||||
`exclude_interim`::
|
||||
`exclude_interim` (Optional)::
|
||||
(boolean) If true, the output excludes interim results.
|
||||
By default, interim results are included.
|
||||
|
||||
`page`::
|
||||
`page` (Optional)::
|
||||
`from`:::
|
||||
(integer) Skips the specified number of records.
|
||||
`size`:::
|
||||
(integer) Specifies the maximum number of records to obtain.
|
||||
|
||||
`record_score`::
|
||||
`record_score` (Optional)::
|
||||
(double) Returns records with anomaly scores greater or equal than this value.
|
||||
|
||||
`sort`::
|
||||
`sort` (Optional)::
|
||||
(string) Specifies the sort field for the requested records.
|
||||
By default, the records are sorted by the `anomaly_score` value.
|
||||
|
||||
`start`::
|
||||
`start` (Optional)::
|
||||
(string) Returns records with timestamps after this time.
|
||||
|
||||
|
||||
==== Results
|
||||
[[ml-get-record-results]]
|
||||
==== {api-response-body-title}
|
||||
|
||||
The API returns the following information:
|
||||
|
||||
|
@ -58,19 +67,8 @@ The API returns the following information:
|
|||
(array) An array of record objects. For more information, see
|
||||
<<ml-results-records,Records>>.
|
||||
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster
|
||||
privileges to use this API. You also need `read` index privilege on the index
|
||||
that stores the results. The `machine_learning_admin` and `machine_learning_user`
|
||||
roles provide these privileges. For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges] and
|
||||
{xpack-ref}/built-in-roles.html[Built-in Roles].
|
||||
//<<security-privileges>> and <<built-in-roles>>.
|
||||
|
||||
|
||||
==== Examples
|
||||
[[ml-get-record-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
The following example gets record information for the `it-ops-kpi` job:
|
||||
|
||||
|
|
|
@ -8,47 +8,54 @@
|
|||
|
||||
Retrieves information about model snapshots.
|
||||
|
||||
|
||||
==== Request
|
||||
[[ml-get-snapshot-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`GET _ml/anomaly_detectors/<job_id>/model_snapshots` +
|
||||
|
||||
`GET _ml/anomaly_detectors/<job_id>/model_snapshots/<snapshot_id>`
|
||||
|
||||
//===== Description
|
||||
[[ml-get-snapshot-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
==== Path Parameters
|
||||
* If the {es} {security-features} are enabled, you must have `monitor_ml`,
|
||||
`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. See
|
||||
{stack-ov}/security-privileges.html[Security privileges].
|
||||
|
||||
`job_id`::
|
||||
[[ml-get-snapshot-path-parms]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
`<job_id>` (Required)::
|
||||
(string) Identifier for the job.
|
||||
|
||||
`snapshot_id`::
|
||||
`<snapshot_id>` (Optional)::
|
||||
(string) Identifier for the model snapshot. If you do not specify this
|
||||
optional parameter, the API returns information about all model snapshots.
|
||||
|
||||
==== Request Body
|
||||
[[ml-get-snapshot-request-body]]
|
||||
==== {api-request-body-title}
|
||||
|
||||
`desc`::
|
||||
`desc` (Optional)::
|
||||
(boolean) If true, the results are sorted in descending order.
|
||||
|
||||
`end`::
|
||||
`end` (Optional)::
|
||||
(date) Returns snapshots with timestamps earlier than this time.
|
||||
|
||||
`from`::
|
||||
`from` (Optional)::
|
||||
(integer) Skips the specified number of snapshots.
|
||||
|
||||
`size`::
|
||||
`size` (Optional)::
|
||||
(integer) Specifies the maximum number of snapshots to obtain.
|
||||
|
||||
`sort`::
|
||||
`sort` (Optional)::
|
||||
(string) Specifies the sort field for the requested snapshots.
|
||||
By default, the snapshots are sorted by their timestamp.
|
||||
|
||||
`start`::
|
||||
`start` (Optional)::
|
||||
(string) Returns snapshots with timestamps after this time.
|
||||
|
||||
|
||||
==== Results
|
||||
[[ml-get-snapshot-results]]
|
||||
==== {api-response-body-title}
|
||||
|
||||
The API returns the following information:
|
||||
|
||||
|
@ -56,16 +63,8 @@ The API returns the following information:
|
|||
(array) An array of model snapshot objects. For more information, see
|
||||
<<ml-snapshot-resource,Model Snapshots>>.
|
||||
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster
|
||||
privileges to use this API. For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||
//<<privileges-list-cluster>>.
|
||||
|
||||
|
||||
==== Examples
|
||||
[[ml-get-snapshot-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
The following example gets model snapshot information for the
|
||||
`it_ops_new_logs` job:
|
||||
|
|
|
@ -10,41 +10,42 @@ Opens one or more jobs.
|
|||
A job must be opened in order for it to be ready to receive and analyze data.
|
||||
A job can be opened and closed multiple times throughout its lifecycle.
|
||||
|
||||
|
||||
==== Request
|
||||
[[ml-open-job-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`POST _ml/anomaly_detectors/{job_id}/_open`
|
||||
|
||||
[[ml-open-job-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
==== Description
|
||||
* If the {es} {security-features} are enabled, you must have `manage_ml` or
|
||||
`manage` cluster privileges to use this API. See
|
||||
{stack-ov}/security-privileges.html[Security privileges].
|
||||
|
||||
[[ml-open-job-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
When you open a new job, it starts with an empty model.
|
||||
|
||||
When you open an existing job, the most recent model state is automatically loaded.
|
||||
The job is ready to resume its analysis from where it left off, once new data is received.
|
||||
When you open an existing job, the most recent model state is automatically
|
||||
loaded. The job is ready to resume its analysis from where it left off, once new
|
||||
data is received.
|
||||
|
||||
[[ml-open-job-path-parms]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
==== Path Parameters
|
||||
`<job_id>` (Required)::
|
||||
(string) Identifier for the job
|
||||
|
||||
`job_id` (required)::
|
||||
(string) Identifier for the job
|
||||
[[ml-open-job-request-body]]
|
||||
==== {api-request-body-title}
|
||||
|
||||
|
||||
==== Request Body
|
||||
|
||||
`timeout`::
|
||||
`timeout` (Optional)::
|
||||
(time) Controls the time to wait until a job has opened.
|
||||
The default value is 30 minutes.
|
||||
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||
|
||||
|
||||
==== Examples
|
||||
[[ml-open-job-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
The following example opens the `total-requests` job and sets an optional
|
||||
property:
|
||||
|
|
|
@ -8,38 +8,40 @@
|
|||
|
||||
Posts scheduled events in a calendar.
|
||||
|
||||
==== Request
|
||||
[[ml-post-calendar-event-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`POST _ml/calendars/<calendar_id>/events`
|
||||
|
||||
[[ml-post-calendar-event-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
==== Description
|
||||
* If the {es} {security-features} are enabled, you must have `manage_ml` or
|
||||
`manage` cluster privileges to use this API. See
|
||||
{stack-ov}/security-privileges.html[Security privileges].
|
||||
|
||||
This API accepts a list of {xpack-ref}/ml-calendars.html[scheduled events], each
|
||||
[[ml-post-calendar-event-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
This API accepts a list of {stack-ov}/ml-calendars.html[scheduled events], each
|
||||
of which must have a start time, end time, and description.
|
||||
|
||||
==== Path Parameters
|
||||
[[ml-post-calendar-event-path-parms]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
`calendar_id` (required)::
|
||||
`<calendar_id>` (Required)::
|
||||
(string) Identifier for the calendar.
|
||||
|
||||
[[ml-post-calendar-event-request-body]]
|
||||
==== {api-request-body-title}
|
||||
|
||||
==== Request Body
|
||||
`events` (Required)::
|
||||
(array) A list of one of more scheduled events. The event's start and end
|
||||
times may be specified as integer milliseconds since the epoch or as a string
|
||||
in ISO 8601 format. See <<ml-event-resource>>.
|
||||
|
||||
`events`::
|
||||
(array) A list of one of more scheduled events. The event's start and end times
|
||||
may be specified as integer milliseconds since the epoch or as a string in ISO 8601
|
||||
format. See <<ml-event-resource>>.
|
||||
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||
|
||||
|
||||
==== Examples
|
||||
[[ml-post-calendar-event-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
You can add scheduled events to the `planned-outages` calendar as follows:
|
||||
|
||||
|
|
|
@ -8,13 +8,20 @@
|
|||
|
||||
Sends data to an anomaly detection job for analysis.
|
||||
|
||||
|
||||
==== Request
|
||||
[[ml-post-data-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`POST _ml/anomaly_detectors/<job_id>/_data`
|
||||
|
||||
[[ml-post-data-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
==== Description
|
||||
* If the {es} {security-features} are enabled, you must have `manage_ml` or
|
||||
`manage` cluster privileges to use this API. See
|
||||
{stack-ov}/security-privileges.html[Security privileges].
|
||||
|
||||
[[ml-post-data-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
The job must have a state of `open` to receive and process the data.
|
||||
|
||||
|
@ -42,39 +49,32 @@ IMPORTANT: For each job, data can only be accepted from a single connection at
|
|||
a time. It is not currently possible to post data to multiple jobs using wildcards
|
||||
or a comma-separated list.
|
||||
|
||||
[[ml-post-data-path-parms]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
==== Path Parameters
|
||||
`<job_id>` (Required)::
|
||||
(string) Identifier for the job.
|
||||
|
||||
`job_id` (required)::
|
||||
(string) Identifier for the job
|
||||
[[ml-post-data-query-parms]]
|
||||
==== {api-query-parms-title}
|
||||
|
||||
`reset_start` (Optional)::
|
||||
(string) Specifies the start of the bucket resetting range.
|
||||
|
||||
==== Query Parameters
|
||||
`reset_end` (Optional)::
|
||||
(string) Specifies the end of the bucket resetting range.
|
||||
|
||||
`reset_start`::
|
||||
(string) Specifies the start of the bucket resetting range
|
||||
|
||||
`reset_end`::
|
||||
(string) Specifies the end of the bucket resetting range
|
||||
|
||||
|
||||
==== Request Body
|
||||
[[ml-post-data-request-body]]
|
||||
==== {api-request-body-title}
|
||||
|
||||
A sequence of one or more JSON documents containing the data to be analyzed.
|
||||
Only whitespace characters are permitted in between the documents.
|
||||
|
||||
[[ml-post-data-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||
//<<privileges-list-cluster>>.
|
||||
|
||||
|
||||
==== Examples
|
||||
|
||||
The following example posts data from the it_ops_new_kpi.json file to the `it_ops_new_kpi` job:
|
||||
The following example posts data from the `it_ops_new_kpi.json` file to the
|
||||
`it_ops_new_kpi` job:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
@ -83,8 +83,8 @@ $ curl -s -H "Content-type: application/json"
|
|||
--data-binary @it_ops_new_kpi.json
|
||||
--------------------------------------------------
|
||||
|
||||
When the data is sent, you receive information about the operational progress of the job.
|
||||
For example:
|
||||
When the data is sent, you receive information about the operational progress of
|
||||
the job. For example:
|
||||
|
||||
[source,js]
|
||||
----
|
||||
|
|
|
@ -10,45 +10,41 @@
|
|||
|
||||
Previews a {dfeed}.
|
||||
|
||||
|
||||
==== Request
|
||||
[[ml-preview-datafeed-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`GET _ml/datafeeds/<datafeed_id>/_preview`
|
||||
|
||||
[[ml-preview-datafeed-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
==== Description
|
||||
* If {es} {security-features} are enabled, you must have `monitor_ml`, `monitor`,
|
||||
`manage_ml`, or `manage` cluster privileges to use this API. See
|
||||
{stack-ov}/security-privileges.html[Security privileges].
|
||||
|
||||
[[ml-preview-datafeed-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
The preview {dfeeds} API returns the first "page" of results from the `search`
|
||||
that is created by using the current {dfeed} settings. This preview shows the
|
||||
structure of the data that will be passed to the anomaly detection engine.
|
||||
|
||||
|
||||
==== Path Parameters
|
||||
|
||||
`datafeed_id` (required)::
|
||||
(string) Identifier for the {dfeed}
|
||||
|
||||
|
||||
==== Authorization
|
||||
|
||||
If {es} {security-features} are enabled, you must have `monitor_ml`, `monitor`,
|
||||
`manage_ml`, or `manage` cluster privileges to use this API. For more
|
||||
information, see
|
||||
{stack-ov}/security-privileges.html[Security Privileges].
|
||||
|
||||
|
||||
==== Security Integration
|
||||
|
||||
When {es} {security-features} are enabled, the {dfeed} query is previewed using
|
||||
the credentials of the user calling the preview {dfeed} API. When the {dfeed}
|
||||
is started it runs the query using the roles of the last user to
|
||||
create or update it. If the two sets of roles differ then the preview may
|
||||
not accurately reflect what the {dfeed} will return when started. To avoid
|
||||
IMPORTANT: When {es} {security-features} are enabled, the {dfeed} query is
|
||||
previewed using the credentials of the user calling the preview {dfeed} API.
|
||||
When the {dfeed} is started it runs the query using the roles of the last user
|
||||
to create or update it. If the two sets of roles differ then the preview may
|
||||
not accurately reflect what the {dfeed} will return when started. To avoid
|
||||
such problems, the same user that creates/updates the {dfeed} should preview
|
||||
it to ensure it is returning the expected data.
|
||||
|
||||
[[ml-preview-datafeed-path-parms]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
==== Examples
|
||||
`<datafeed_id>` (Required)::
|
||||
(string) Identifier for the {dfeed}.
|
||||
|
||||
[[ml-preview-datafeed-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
The following example obtains a preview of the `datafeed-farequote` {dfeed}:
|
||||
|
||||
|
|
|
@ -8,28 +8,30 @@
|
|||
|
||||
Adds a job to a calendar.
|
||||
|
||||
==== Request
|
||||
[[ml-put-calendar-job-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`PUT _ml/calendars/<calendar_id>/jobs/<job_id>`
|
||||
|
||||
[[ml-put-calendar-job-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
==== Path Parameters
|
||||
* If the {es} {security-features} are enabled, you must have `manage_ml` or
|
||||
`manage` cluster privileges to use this API. See
|
||||
{stack-ov}/security-privileges.html[Security privileges].
|
||||
|
||||
`calendar_id` (required)::
|
||||
[[ml-put-calendar-job-path-parms]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
`<calendar_id>` (Required)::
|
||||
(string) Identifier for the calendar.
|
||||
|
||||
`job_id` (required)::
|
||||
(string) An identifier for the job. It can be a job identifier, a group name, or a
|
||||
comma-separated list of jobs or groups.
|
||||
`<job_id>` (Required)::
|
||||
(string) An identifier for the job. It can be a job identifier, a group name,
|
||||
or a comma-separated list of jobs or groups.
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||
|
||||
|
||||
==== Examples
|
||||
[[ml-put-calendar-job-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
The following example associates the `planned-outages` calendar with the
|
||||
`total-requests` job:
|
||||
|
|
|
@ -8,35 +8,38 @@
|
|||
|
||||
Instantiates a calendar.
|
||||
|
||||
==== Request
|
||||
[[ml-put-calendar-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`PUT _ml/calendars/<calendar_id>`
|
||||
|
||||
===== Description
|
||||
[[ml-put-calendar-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
* If the {es} {security-features} are enabled, you must have `manage_ml` or
|
||||
`manage` cluster privileges to use this API. See
|
||||
{stack-ov}/security-privileges.html[Security privileges].
|
||||
|
||||
[[ml-put-calendar-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
For more information, see
|
||||
{xpack-ref}/ml-calendars.html[Calendars and Scheduled Events].
|
||||
{stack-ov}/ml-calendars.html[Calendars and Scheduled Events].
|
||||
|
||||
==== Path Parameters
|
||||
[[ml-put-calendar-path-parms]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
`calendar_id` (required)::
|
||||
`<calendar_id>` (Required)::
|
||||
(string) Identifier for the calendar.
|
||||
|
||||
[[ml-put-calendar-request-body]]
|
||||
==== {api-request-body-title}
|
||||
|
||||
==== Request Body
|
||||
|
||||
`description`::
|
||||
`description` (Optional)::
|
||||
(string) A description of the calendar.
|
||||
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||
|
||||
|
||||
==== Examples
|
||||
[[ml-put-calendar-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
The following example creates the `planned-outages` calendar:
|
||||
|
||||
|
|
|
@ -10,102 +10,100 @@
|
|||
|
||||
Instantiates a {dfeed}.
|
||||
|
||||
|
||||
==== Request
|
||||
[[ml-put-datafeed-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`PUT _ml/datafeeds/<feed_id>`
|
||||
|
||||
[[ml-put-datafeed-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
==== Description
|
||||
* If {es} {security-features} are enabled, you must have `manage_ml` or `manage`
|
||||
cluster privileges to use this API. See
|
||||
{stack-ov}/security-privileges.html[Security privileges].
|
||||
|
||||
[[ml-put-datafeed-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
You must create a job before you create a {dfeed}. You can associate only one
|
||||
{dfeed} to each job.
|
||||
|
||||
IMPORTANT: You must use {kib} or this API to create a {dfeed}. Do not put a {dfeed}
|
||||
directly to the `.ml-config` index using the Elasticsearch index API.
|
||||
If {es} {security-features} are enabled, do not give users `write`
|
||||
privileges on the `.ml-config` index.
|
||||
[IMPORTANT]
|
||||
====
|
||||
* You must use {kib} or this API to create a {dfeed}. Do not put a
|
||||
{dfeed} directly to the `.ml-config` index using the {es} index API. If {es}
|
||||
{security-features} are enabled, do not give users `write` privileges on the
|
||||
`.ml-config` index.
|
||||
* When {es} {security-features} are enabled, your {dfeed} remembers which roles
|
||||
the user who created it had at the time of creation and runs the query using
|
||||
those same roles.
|
||||
====
|
||||
|
||||
[[ml-put-datafeed-path-parms]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
==== Path Parameters
|
||||
|
||||
`feed_id` (required)::
|
||||
`<feed_id>` (Required)::
|
||||
(string) A numerical character string that uniquely identifies the {dfeed}.
|
||||
This identifier can contain lowercase alphanumeric characters (a-z and 0-9),
|
||||
hyphens, and underscores. It must start and end with alphanumeric characters.
|
||||
|
||||
[[ml-put-datafeed-request-body]]
|
||||
==== {api-request-body-title}
|
||||
|
||||
==== Request Body
|
||||
|
||||
`aggregations`::
|
||||
`aggregations` (Optional)::
|
||||
(object) If set, the {dfeed} performs aggregation searches.
|
||||
For more information, see <<ml-datafeed-resource>>.
|
||||
|
||||
`chunking_config`::
|
||||
`chunking_config` (Optional)::
|
||||
(object) Specifies how data searches are split into time chunks.
|
||||
See <<ml-datafeed-chunking-config>>.
|
||||
|
||||
`delayed_data_check_config`::
|
||||
`delayed_data_check_config` (Optional)::
|
||||
(object) Specifies whether the data feed checks for missing data and
|
||||
the size of the window. See
|
||||
<<ml-datafeed-delayed-data-check-config>>.
|
||||
|
||||
`frequency`::
|
||||
`frequency` (Optional)::
|
||||
(time units) The interval at which scheduled queries are made while the {dfeed}
|
||||
runs in real time. The default value is either the bucket span for short
|
||||
bucket spans, or, for longer bucket spans, a sensible fraction of the bucket
|
||||
span. For example: `150s`.
|
||||
|
||||
`indices` (required)::
|
||||
`indices` (Required)::
|
||||
(array) An array of index names. Wildcards are supported. For example:
|
||||
`["it_ops_metrics", "server*"]`.
|
||||
|
||||
`job_id` (required)::
|
||||
`job_id` (Required)::
|
||||
(string) A numerical character string that uniquely identifies the job.
|
||||
|
||||
`query`::
|
||||
`query` (Optional)::
|
||||
(object) The {es} query domain-specific language (DSL). This value
|
||||
corresponds to the query object in an {es} search POST body. All the
|
||||
options that are supported by {Es} can be used, as this object is
|
||||
passed verbatim to {es}. By default, this property has the following
|
||||
value: `{"match_all": {"boost": 1}}`.
|
||||
|
||||
`query_delay`::
|
||||
`query_delay` (Optional)::
|
||||
(time units) The number of seconds behind real time that data is queried. For
|
||||
example, if data from 10:04 a.m. might not be searchable in {es} until
|
||||
10:06 a.m., set this property to 120 seconds. The default value is `60s`.
|
||||
|
||||
`script_fields`::
|
||||
`script_fields` (Optional)::
|
||||
(object) Specifies scripts that evaluate custom expressions and returns
|
||||
script fields to the {dfeed}.
|
||||
The <<ml-detectorconfig,detector configuration objects>> in a job can contain
|
||||
functions that use these script fields.
|
||||
For more information,
|
||||
functions that use these script fields. For more information,
|
||||
see {ref}/search-request-script-fields.html[Script Fields].
|
||||
|
||||
`scroll_size`::
|
||||
`scroll_size` (Optional)::
|
||||
(unsigned integer) The `size` parameter that is used in {es} searches.
|
||||
The default value is `1000`.
|
||||
|
||||
For more information about these properties,
|
||||
see <<ml-datafeed-resource>>.
|
||||
|
||||
|
||||
==== Authorization
|
||||
|
||||
If {es} {security-features} are enabled, you must have `manage_ml`, or `manage`
|
||||
cluster privileges to use this API. For more information, see
|
||||
{stack-ov}/security-privileges.html[Security Privileges].
|
||||
|
||||
|
||||
==== Security integration
|
||||
|
||||
When {es} {security-features} are enabled, your {dfeed} remembers which roles the
|
||||
user who created it had at the time of creation and runs the query using those
|
||||
same roles.
|
||||
|
||||
|
||||
==== Examples
|
||||
[[ml-put-datafeed-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
The following example creates the `datafeed-total-requests` {dfeed}:
|
||||
|
||||
|
|
|
@ -8,42 +8,45 @@
|
|||
|
||||
Instantiates a filter.
|
||||
|
||||
==== Request
|
||||
[[ml-put-filter-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`PUT _ml/filters/<filter_id>`
|
||||
|
||||
===== Description
|
||||
[[ml-put-filter-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
* If the {es} {security-features} are enabled, you must have `manage_ml` or
|
||||
`manage` cluster privileges to use this API. See
|
||||
{stack-ov}/security-privileges.html[Security privileges].
|
||||
|
||||
[[ml-put-filter-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
A {stack-ov}/ml-rules.html[filter] contains a list of strings.
|
||||
It can be used by one or more jobs. Specifically, filters are referenced in
|
||||
the `custom_rules` property of <<ml-detectorconfig,detector configuration objects>>.
|
||||
|
||||
==== Path Parameters
|
||||
[[ml-put-filter-path-parms]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
`filter_id` (required)::
|
||||
`<filter_id>` (Required)::
|
||||
(string) Identifier for the filter.
|
||||
|
||||
[[ml-put-filter-request-body]]
|
||||
==== {api-request-body-title}
|
||||
|
||||
==== Request Body
|
||||
|
||||
`description`::
|
||||
`description` (Optional)::
|
||||
(string) A description of the filter.
|
||||
|
||||
`items`::
|
||||
`items` (Required)::
|
||||
(array of strings) The items of the filter.
|
||||
A wildcard `*` can be used at the beginning
|
||||
or the end of an item. Up to 10000 items
|
||||
are allowed in each filter.
|
||||
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||
|
||||
|
||||
==== Examples
|
||||
[[ml-put-filter-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
The following example creates the `safe_domains` filter:
|
||||
|
||||
|
|
|
@ -8,84 +8,87 @@
|
|||
|
||||
Instantiates a job.
|
||||
|
||||
==== Request
|
||||
[[ml-put-job-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`PUT _ml/anomaly_detectors/<job_id>`
|
||||
|
||||
===== Description
|
||||
[[ml-put-job-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
* If the {es} {security-features} are enabled, you must have `manage_ml` or
|
||||
`manage` cluster privileges to use this API. See
|
||||
{stack-ov}/security-privileges.html[Security privileges].
|
||||
|
||||
[[ml-put-job-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
IMPORTANT: You must use {kib} or this API to create a {ml} job. Do not put a job
|
||||
directly to the `.ml-config` index using the Elasticsearch index API.
|
||||
If {es} {security-features} are enabled, do not give users `write`
|
||||
privileges on the `.ml-config` index.
|
||||
|
||||
[[ml-put-job-path-parms]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
==== Path Parameters
|
||||
|
||||
`job_id` (required)::
|
||||
`<job_id>` (Required)::
|
||||
(string) Identifier for the job. This identifier can contain lowercase
|
||||
alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must
|
||||
start and end with alphanumeric characters.
|
||||
|
||||
[[ml-put-job-request-body]]
|
||||
==== {api-request-body-title}
|
||||
|
||||
==== Request Body
|
||||
|
||||
`analysis_config`::
|
||||
`analysis_config` (Required)::
|
||||
(object) The analysis configuration, which specifies how to analyze the data.
|
||||
See <<ml-analysisconfig, analysis configuration objects>>.
|
||||
|
||||
`analysis_limits`::
|
||||
`analysis_limits` (Optional)::
|
||||
(object) Specifies runtime limits for the job. See
|
||||
<<ml-apilimits,analysis limits>>.
|
||||
|
||||
`background_persist_interval`::
|
||||
`background_persist_interval` (Optional)::
|
||||
(time units) Advanced configuration option. The time between each periodic
|
||||
persistence of the model. See <<ml-job-resource>>.
|
||||
|
||||
`custom_settings`::
|
||||
`custom_settings` (Optional)::
|
||||
(object) Advanced configuration option. Contains custom meta data about the
|
||||
job. See <<ml-job-resource>>.
|
||||
|
||||
`data_description` (required)::
|
||||
`data_description` (Required)::
|
||||
(object) Describes the format of the input data. This object is required, but
|
||||
it can be empty (`{}`). See <<ml-datadescription,data description objects>>.
|
||||
|
||||
`description`::
|
||||
`description` (Optional)::
|
||||
(string) A description of the job.
|
||||
|
||||
`groups`::
|
||||
`groups` (Optional)::
|
||||
(array of strings) A list of job groups. See <<ml-job-resource>>.
|
||||
|
||||
`model_plot_config`::
|
||||
`model_plot_config` (Optional)::
|
||||
(object) Advanced configuration option. Specifies to store model information
|
||||
along with the results. This adds overhead to the performance of the system
|
||||
and is not feasible for jobs with many entities, see <<ml-apimodelplotconfig>>.
|
||||
|
||||
`model_snapshot_retention_days`::
|
||||
`model_snapshot_retention_days` (Optional)::
|
||||
(long) The time in days that model snapshots are retained for the job.
|
||||
Older snapshots are deleted. The default value is `1`, which means snapshots
|
||||
are retained for one day (twenty-four hours).
|
||||
|
||||
`renormalization_window_days`::
|
||||
`renormalization_window_days` (Optional)::
|
||||
(long) Advanced configuration option. The period over which adjustments to the
|
||||
score are applied, as new data is seen. See <<ml-job-resource>>.
|
||||
|
||||
`results_index_name`::
|
||||
`results_index_name` (Optional)::
|
||||
(string) A text string that affects the name of the {ml} results index. The
|
||||
default value is `shared`, which generates an index named `.ml-anomalies-shared`.
|
||||
|
||||
`results_retention_days`::
|
||||
`results_retention_days` (Optional)::
|
||||
(long) Advanced configuration option. The number of days for which job results
|
||||
are retained. See <<ml-job-resource>>.
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||
|
||||
|
||||
==== Examples
|
||||
[[ml-put-job-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
The following example creates the `total-requests` job:
|
||||
|
||||
|
|
|
@ -8,12 +8,20 @@
|
|||
|
||||
Reverts to a specific snapshot.
|
||||
|
||||
==== Request
|
||||
[[ml-revert-snapshot-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`POST _ml/anomaly_detectors/<job_id>/model_snapshots/<snapshot_id>/_revert`
|
||||
|
||||
[[ml-revert-snapshot-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
==== Description
|
||||
* If the {es} {security-features} are enabled, you must have `manage_ml` or
|
||||
`manage` cluster privileges to use this API. See
|
||||
{stack-ov}/security-privileges.html[Security privileges].
|
||||
|
||||
[[ml-revert-snapshot-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
The {ml} feature in {xpack} reacts quickly to anomalous input, learning new
|
||||
behaviors in data. Highly anomalous input increases the variance in the models
|
||||
|
@ -25,18 +33,19 @@ Friday or a critical system failure.
|
|||
|
||||
IMPORTANT: Before you revert to a saved snapshot, you must close the job.
|
||||
|
||||
[[ml-revert-snapshot-path-parms]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
==== Path Parameters
|
||||
`<job_id>` (Required)::
|
||||
(string) Identifier for the job.
|
||||
|
||||
`job_id` (required)::
|
||||
(string) Identifier for the job
|
||||
`<snapshot_id>` (Required)::
|
||||
(string) Identifier for the model snapshot.
|
||||
|
||||
`snapshot_id` (required)::
|
||||
(string) Identifier for the model snapshot
|
||||
[[ml-revert-snapshot-request-body]]
|
||||
==== {api-request-body-title}
|
||||
|
||||
==== Request Body
|
||||
|
||||
`delete_intervening_results`::
|
||||
`delete_intervening_results` (Optional)::
|
||||
(boolean) If true, deletes the results in the time period between the
|
||||
latest results and the time of the reverted snapshot. It also resets the
|
||||
model to accept records for this time period. The default value is false.
|
||||
|
@ -45,15 +54,8 @@ NOTE: If you choose not to delete intervening results when reverting a snapshot,
|
|||
the job will not accept input data that is older than the current time.
|
||||
If you want to resend data, then delete the intervening results.
|
||||
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||
|
||||
|
||||
==== Examples
|
||||
[[ml-revert-snapshot-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
The following example reverts to the `1491856080` snapshot for the
|
||||
`it_ops_new_kpi` job:
|
||||
|
|
|
@ -9,7 +9,8 @@
|
|||
Sets a cluster wide upgrade_mode setting that prepares {ml} indices for an
|
||||
upgrade.
|
||||
|
||||
==== Request
|
||||
[[ml-set-upgrade-mode-request]]
|
||||
==== {api-request-title}
|
||||
//////////////////////////
|
||||
|
||||
[source,js]
|
||||
|
@ -25,7 +26,15 @@ POST /_ml/set_upgrade_mode?enabled=false&timeout=10m
|
|||
|
||||
`POST _ml/set_upgrade_mode`
|
||||
|
||||
==== Description
|
||||
[[ml-set-upgrade-mode-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
* If the {es} {security-features} are enabled, you must have `manage_ml` or
|
||||
`manage` cluster privileges to use this API. See
|
||||
{stack-ov}/security-privileges.html[Security privileges].
|
||||
|
||||
[[ml-set-upgrade-mode-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
When upgrading your cluster, in some circumstances you must restart your nodes and
|
||||
reindex your {ml} indices. In those circumstances, there must be no {ml} jobs running.
|
||||
|
@ -37,7 +46,6 @@ though stopping jobs is not a requirement in that case.
|
|||
|
||||
For more information, see {stack-ref}/upgrading-elastic-stack.html[Upgrading the {stack}].
|
||||
|
||||
|
||||
When `enabled=true` this API temporarily halts all job and {dfeed} tasks and
|
||||
prohibits new job and {dfeed} tasks from starting.
|
||||
|
||||
|
@ -50,23 +58,18 @@ You can see the current value for the `upgrade_mode` setting by using the
|
|||
IMPORTANT: No new {ml} jobs can be opened while the `upgrade_mode` setting is
|
||||
`true`.
|
||||
|
||||
==== Query Parameters
|
||||
[[ml-set-upgrade-mode-query-parms]]
|
||||
==== {api-query-parms-title}
|
||||
|
||||
`enabled`::
|
||||
`enabled` (Optional)::
|
||||
(boolean) When `true`, this enables `upgrade_mode`. Defaults to `false`
|
||||
|
||||
`timeout`::
|
||||
`timeout` (Optional)::
|
||||
(time) The time to wait for the request to be completed.
|
||||
The default value is 30 seconds.
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see
|
||||
{stack-ov}/security-privileges.html[Security privileges].
|
||||
|
||||
|
||||
==== Examples
|
||||
[[ml-set-upgrade-mode-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
The following example enables `upgrade_mode` for the cluster:
|
||||
|
||||
|
|
|
@ -12,11 +12,20 @@ Starts one or more {dfeeds}.
|
|||
A {dfeed} must be started in order to retrieve data from {es}.
|
||||
A {dfeed} can be started and stopped multiple times throughout its lifecycle.
|
||||
|
||||
==== Request
|
||||
[[ml-start-datafeed-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`POST _ml/datafeeds/<feed_id>/_start`
|
||||
|
||||
==== Description
|
||||
[[ml-start-datafeed-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
* If {es} {security-features} are enabled, you must have `manage_ml` or `manage`
|
||||
cluster privileges to use this API. See
|
||||
{stack-ov}/security-privileges.html[Security privileges].
|
||||
|
||||
[[ml-start-datafeed-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
NOTE: Before you can start a {dfeed}, the job must be open. Otherwise, an error
|
||||
occurs.
|
||||
|
@ -56,42 +65,33 @@ If you specify a `start` value that is earlier than the timestamp of the latest
|
|||
processed record, the {dfeed} continues from 1 millisecond after the timestamp
|
||||
of the latest processed record.
|
||||
|
||||
IMPORTANT: When {es} {security-features} are enabled, your {dfeed} remembers
|
||||
which roles the last user to create or update it had at the time of
|
||||
creation/update and runs the query using those same roles.
|
||||
|
||||
==== Path Parameters
|
||||
[[ml-start-datafeed-path-parms]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
`feed_id` (required)::
|
||||
(string) Identifier for the {dfeed}
|
||||
`<feed_id>` (Required)::
|
||||
(string) Identifier for the {dfeed}.
|
||||
|
||||
==== Request Body
|
||||
[[ml-start-datafeed-request-body]]
|
||||
==== {api-request-body-title}
|
||||
|
||||
`end`::
|
||||
`end` (Optional)::
|
||||
(string) The time that the {dfeed} should end. This value is exclusive.
|
||||
The default value is an empty string.
|
||||
|
||||
`start`::
|
||||
`start` (Optional)::
|
||||
(string) The time that the {dfeed} should begin. This value is inclusive.
|
||||
The default value is an empty string.
|
||||
|
||||
`timeout`::
|
||||
`timeout` (Optional)::
|
||||
(time) Controls the amount of time to wait until a {dfeed} starts.
|
||||
The default value is 20 seconds.
|
||||
|
||||
|
||||
==== Authorization
|
||||
|
||||
If {es} {security-features} are enabled, you must have `manage_ml`, or `manage`
|
||||
cluster privileges to use this API. For more information, see
|
||||
{stack-ov}/security-privileges.html[Security Privileges].
|
||||
|
||||
|
||||
==== Security integration
|
||||
|
||||
When {es} {security-features} are enabled, your {dfeed} remembers which roles the
|
||||
last user to create or update it had at the time of creation/update and runs the
|
||||
query using those same roles.
|
||||
|
||||
|
||||
==== Examples
|
||||
[[ml-start-datafeed-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
The following example starts the `datafeed-it-ops-kpi` {dfeed}:
|
||||
|
||||
|
|
|
@ -10,10 +10,8 @@
|
|||
|
||||
Stops one or more {dfeeds}.
|
||||
|
||||
A {dfeed} that is stopped ceases to retrieve data from {es}.
|
||||
A {dfeed} can be started and stopped multiple times throughout its lifecycle.
|
||||
|
||||
==== Request
|
||||
[[ml-stop-datafeed-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`POST _ml/datafeeds/<feed_id>/_stop` +
|
||||
|
||||
|
@ -21,39 +19,42 @@ A {dfeed} can be started and stopped multiple times throughout its lifecycle.
|
|||
|
||||
`POST _ml/datafeeds/_all/_stop`
|
||||
|
||||
[[ml-stop-datafeed-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
===== Description
|
||||
* If the {es} {security-features} are enabled, you must have `manage_ml` or
|
||||
`manage` cluster privileges to use this API. See
|
||||
{stack-ov}/security-privileges.html[Security privileges].
|
||||
|
||||
[[ml-stop-datafeed-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
A {dfeed} that is stopped ceases to retrieve data from {es}.
|
||||
A {dfeed} can be started and stopped multiple times throughout its lifecycle.
|
||||
|
||||
You can stop multiple {dfeeds} in a single API request by using a
|
||||
comma-separated list of {dfeeds} or a wildcard expression. You can close all
|
||||
{dfeeds} by using `_all` or by specifying `*` as the `<feed_id>`.
|
||||
|
||||
[[ml-stop-datafeed-path-parms]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
==== Path Parameters
|
||||
|
||||
`feed_id`::
|
||||
`<feed_id>` (Required)::
|
||||
(string) Identifier for the {dfeed}. It can be a {dfeed} identifier or a
|
||||
wildcard expression.
|
||||
|
||||
[[ml-stop-datafeed-request-body]]
|
||||
==== {api-request-body-title}
|
||||
|
||||
==== Request Body
|
||||
|
||||
`force`::
|
||||
`force` (Optional)::
|
||||
(boolean) If true, the {dfeed} is stopped forcefully.
|
||||
|
||||
`timeout`::
|
||||
`timeout` (Optional)::
|
||||
(time) Controls the amount of time to wait until a {dfeed} stops.
|
||||
The default value is 20 seconds.
|
||||
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||
|
||||
|
||||
==== Examples
|
||||
[[ml-stop-datafeed-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
The following example stops the `datafeed-total-requests` {dfeed}:
|
||||
|
||||
|
|
|
@ -10,62 +10,77 @@
|
|||
|
||||
Updates certain properties of a {dfeed}.
|
||||
|
||||
==== Request
|
||||
[[ml-update-datafeed-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`POST _ml/datafeeds/<feed_id>/_update`
|
||||
|
||||
===== Description
|
||||
[[ml-update-datafeed-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
NOTE: If you update the `delayed_data_check_config` property, you must stop and
|
||||
* If {es} {security-features} are enabled, you must have `manage_ml`, or `manage`
|
||||
cluster privileges to use this API. See
|
||||
{stack-ov}/security-privileges.html[Security privileges].
|
||||
|
||||
[[ml-update-datafeed-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
If you update the `delayed_data_check_config` property, you must stop and
|
||||
start the {dfeed} for the change to be applied.
|
||||
|
||||
==== Path Parameters
|
||||
IMPORTANT: When {es} {security-features} are enabled, your {dfeed} remembers
|
||||
which roles the user who updated it had at the time of update and runs the query
|
||||
using those same roles.
|
||||
|
||||
`feed_id` (required)::
|
||||
(string) Identifier for the {dfeed}
|
||||
[[ml-update-datafeed-path-parms]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
==== Request Body
|
||||
`<feed_id>` (Required)::
|
||||
(string) Identifier for the {dfeed}.
|
||||
|
||||
[[ml-update-datafeed-request-body]]
|
||||
==== {api-request-body-title}
|
||||
|
||||
The following properties can be updated after the {dfeed} is created:
|
||||
|
||||
`aggregations`::
|
||||
`aggregations` (Optional)::
|
||||
(object) If set, the {dfeed} performs aggregation searches.
|
||||
For more information, see <<ml-datafeed-resource>>.
|
||||
|
||||
`chunking_config`::
|
||||
`chunking_config` (Optional)::
|
||||
(object) Specifies how data searches are split into time chunks.
|
||||
See <<ml-datafeed-chunking-config>>.
|
||||
|
||||
`delayed_data_check_config`::
|
||||
`delayed_data_check_config` (Optional)::
|
||||
(object) Specifies whether the data feed checks for missing data and
|
||||
the size of the window. See <<ml-datafeed-delayed-data-check-config>>.
|
||||
|
||||
`frequency`::
|
||||
`frequency` (Optional)::
|
||||
(time units) The interval at which scheduled queries are made while the
|
||||
{dfeed} runs in real time. The default value is either the bucket span for short
|
||||
bucket spans, or, for longer bucket spans, a sensible fraction of the bucket
|
||||
span. For example: `150s`.
|
||||
|
||||
`indices`::
|
||||
`indices` (Optional)::
|
||||
(array) An array of index names. Wildcards are supported. For example:
|
||||
`["it_ops_metrics", "server*"]`.
|
||||
|
||||
`job_id`::
|
||||
`job_id` (Optional)::
|
||||
(string) A numerical character string that uniquely identifies the job.
|
||||
|
||||
`query`::
|
||||
`query` (Optional)::
|
||||
(object) The {es} query domain-specific language (DSL). This value
|
||||
corresponds to the query object in an {es} search POST body. All the
|
||||
options that are supported by {es} can be used, as this object is
|
||||
passed verbatim to {es}. By default, this property has the following
|
||||
value: `{"match_all": {"boost": 1}}`.
|
||||
|
||||
`query_delay`::
|
||||
`query_delay` (Optional)::
|
||||
(time units) The number of seconds behind real-time that data is queried. For
|
||||
example, if data from 10:04 a.m. might not be searchable in {es} until
|
||||
10:06 a.m., set this property to 120 seconds. The default value is `60s`.
|
||||
|
||||
`script_fields`::
|
||||
`script_fields` (Optional)::
|
||||
(object) Specifies scripts that evaluate custom expressions and returns
|
||||
script fields to the {dfeed}.
|
||||
The <<ml-detectorconfig,detector configuration objects>> in a job can contain
|
||||
|
@ -73,29 +88,15 @@ The following properties can be updated after the {dfeed} is created:
|
|||
For more information,
|
||||
see {ref}/search-request-script-fields.html[Script Fields].
|
||||
|
||||
`scroll_size`::
|
||||
`scroll_size` (Optional)::
|
||||
(unsigned integer) The `size` parameter that is used in {es} searches.
|
||||
The default value is `1000`.
|
||||
|
||||
For more information about these properties,
|
||||
see <<ml-datafeed-resource>>.
|
||||
|
||||
|
||||
==== Authorization
|
||||
|
||||
If {es} {security-features} are enabled, you must have `manage_ml`, or `manage`
|
||||
cluster privileges to use this API. For more information, see
|
||||
{stack-ov}/security-privileges.html[Security Privileges].
|
||||
|
||||
|
||||
==== Security Integration
|
||||
|
||||
When {es} {security-features} are enabled, your {dfeed} remembers which roles the
|
||||
user who updated it had at the time of update and runs the query using those
|
||||
same roles.
|
||||
|
||||
|
||||
==== Examples
|
||||
[[ml-update-datafeed-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
The following example updates the query for the `datafeed-total-requests`
|
||||
{dfeed} so that only log entries of error level are analyzed:
|
||||
|
|
|
@ -8,40 +8,41 @@
|
|||
|
||||
Updates the description of a filter, adds items, or removes items.
|
||||
|
||||
==== Request
|
||||
[[ml-update-filter-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`POST _ml/filters/<filter_id>/_update`
|
||||
|
||||
//==== Description
|
||||
[[ml-update-filter-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
==== Path Parameters
|
||||
* If the {es} {security-features} are enabled, you must have `manage_ml` or
|
||||
`manage` cluster privileges to use this API. See
|
||||
{stack-ov}/security-privileges.html[Security privileges].
|
||||
|
||||
`filter_id` (required)::
|
||||
[[ml-update-filter-path-parms]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
`<filter_id>` (Required)::
|
||||
(string) Identifier for the filter.
|
||||
|
||||
[[ml-update-filter-request-body]]
|
||||
==== {api-request-body-title}
|
||||
|
||||
==== Request Body
|
||||
|
||||
`description`::
|
||||
`description` (Optional)::
|
||||
(string) A description for the filter. See <<ml-filter-resource>>.
|
||||
|
||||
`add_items`::
|
||||
`add_items` (Optional)::
|
||||
(array of strings) The items to add to the filter.
|
||||
|
||||
`remove_items`::
|
||||
`remove_items` (Optional)::
|
||||
(array of strings) The items to remove from the filter.
|
||||
|
||||
[[ml-update-filter-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||
|
||||
|
||||
==== Examples
|
||||
|
||||
You can change the description, add and remove items to the `safe_domains` filter as follows:
|
||||
You can change the description, add and remove items to the `safe_domains`
|
||||
filter as follows:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
|
|
@ -8,17 +8,27 @@
|
|||
|
||||
Updates certain properties of a job.
|
||||
|
||||
==== Request
|
||||
[[ml-update-job-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`POST _ml/anomaly_detectors/<job_id>/_update`
|
||||
|
||||
[[ml-update-job-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
==== Path Parameters
|
||||
* If the {es} {security-features} are enabled, you must have `manage_ml` or
|
||||
`manage` cluster privileges to use this API. See
|
||||
{stack-ov}/security-privileges.html[Security privileges].
|
||||
|
||||
`job_id` (required)::
|
||||
(string) Identifier for the job
|
||||
|
||||
==== Request Body
|
||||
[[ml-update-job-path-parms]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
`<job_id>` (Required)::
|
||||
(string) Identifier for the job.
|
||||
|
||||
[[ml-update-job-request-body]]
|
||||
==== {api-request-body-title}
|
||||
|
||||
The following properties can be updated after the job is created:
|
||||
|
||||
|
@ -86,14 +96,8 @@ A detector update object has the following properties:
|
|||
|
||||
No other detector property can be updated.
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||
|
||||
|
||||
==== Examples
|
||||
[[ml-update-job-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
The following example updates the `total-requests` job:
|
||||
|
||||
|
|
|
@ -8,45 +8,45 @@
|
|||
|
||||
Updates certain properties of a snapshot.
|
||||
|
||||
==== Request
|
||||
[[ml-update-snapshot-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`POST _ml/anomaly_detectors/<job_id>/model_snapshots/<snapshot_id>/_update`
|
||||
|
||||
[[ml-update-snapshot-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
//==== Description
|
||||
* If the {es} {security-features} are enabled, you must have `manage_ml` or
|
||||
`manage` cluster privileges to use this API. See
|
||||
{stack-ov}/security-privileges.html[Security privileges].
|
||||
|
||||
==== Path Parameters
|
||||
|
||||
`job_id` (required)::
|
||||
(string) Identifier for the job
|
||||
[[ml-update-snapshot-path-parms]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
`snapshot_id` (required)::
|
||||
(string) Identifier for the model snapshot
|
||||
`<job_id>` (Required)::
|
||||
(string) Identifier for the job.
|
||||
|
||||
==== Request Body
|
||||
`<snapshot_id>` (Required)::
|
||||
(string) Identifier for the model snapshot.
|
||||
|
||||
[[ml-update-snapshot-request-body]]
|
||||
==== {api-request-body-title}
|
||||
|
||||
The following properties can be updated after the model snapshot is created:
|
||||
|
||||
`description`::
|
||||
(string) An optional description of the model snapshot. For example,
|
||||
`description` (Optional)::
|
||||
(string) A description of the model snapshot. For example,
|
||||
"Before black friday".
|
||||
|
||||
`retain`::
|
||||
`retain` (Optional)::
|
||||
(boolean) If true, this snapshot will not be deleted during automatic cleanup
|
||||
of snapshots older than `model_snapshot_retention_days`.
|
||||
Note that this snapshot will still be deleted when the job is deleted.
|
||||
The default value is false.
|
||||
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||
//<<privileges-list-cluster>>.
|
||||
|
||||
|
||||
==== Examples
|
||||
[[ml-update-snapshot-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
The following example updates the snapshot identified as `1491852978`:
|
||||
|
||||
|
|
|
@ -8,30 +8,32 @@
|
|||
|
||||
Validates detector configuration information.
|
||||
|
||||
==== Request
|
||||
[[ml-valid-detector-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`POST _ml/anomaly_detectors/_validate/detector`
|
||||
|
||||
==== Description
|
||||
[[ml-valid-detector-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
* If the {es} {security-features} are enabled, you must have `manage_ml` or
|
||||
`manage` cluster privileges to use this API. See
|
||||
{stack-ov}/security-privileges.html[Security privileges].
|
||||
|
||||
[[ml-valid-detector-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
The validate detectors API enables you validate the detector configuration
|
||||
before you create a job.
|
||||
|
||||
|
||||
==== Request Body
|
||||
[[ml-valid-detector-request-body]]
|
||||
==== {api-request-body-title}
|
||||
|
||||
For a list of the properties that you can specify in the body of this API,
|
||||
see <<ml-detectorconfig,detector configuration objects>>.
|
||||
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||
|
||||
|
||||
==== Examples
|
||||
[[ml-valid-detector-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
The following example validates detector configuration information:
|
||||
|
||||
|
|
|
@ -8,30 +8,32 @@
|
|||
|
||||
Validates job configuration information.
|
||||
|
||||
==== Request
|
||||
[[ml-valid-job-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`POST _ml/anomaly_detectors/_validate`
|
||||
|
||||
==== Description
|
||||
[[ml-valid-job-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
* If the {es} {security-features} are enabled, you must have `manage_ml` or
|
||||
`manage` cluster privileges to use this API. See
|
||||
{stack-ov}/security-privileges.html[Security privileges].
|
||||
|
||||
[[ml-valid-job-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
The validate jobs API enables you validate the job configuration before you
|
||||
create the job.
|
||||
|
||||
|
||||
==== Request Body
|
||||
[[ml-valid-job-request-body]]
|
||||
==== {api-request-body-title}
|
||||
|
||||
For a list of the properties that you can specify in the body of this API,
|
||||
see <<ml-job-resource,Job Resources>>.
|
||||
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||
|
||||
|
||||
==== Examples
|
||||
[[ml-valid-job-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
The following example validates job configuration information:
|
||||
|
||||
|
|
|
@ -1,36 +1,60 @@
|
|||
[[query-dsl-boosting-query]]
|
||||
=== Boosting Query
|
||||
|
||||
The `boosting` query can be used to effectively demote results that
|
||||
match a given query. Unlike the "NOT" clause in bool query, this still
|
||||
selects documents that contain undesirable terms, but reduces their
|
||||
overall score.
|
||||
Returns documents matching a `positive` query while reducing the
|
||||
<<query-filter-context, relevance score>> of documents that also match a
|
||||
`negative` query.
|
||||
|
||||
It accepts a `positive` query and a `negative` query.
|
||||
Only documents that match the `positive` query will be included
|
||||
in the results list, but documents that also match the `negative` query
|
||||
will be downgraded by multiplying the original `_score` of the document
|
||||
with the `negative_boost`.
|
||||
You can use the `boosting` query to demote certain documents without
|
||||
excluding them from the search results.
|
||||
|
||||
[[boosting-query-ex-request]]
|
||||
==== Example request
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
----
|
||||
GET /_search
|
||||
{
|
||||
"query": {
|
||||
"boosting" : {
|
||||
"positive" : {
|
||||
"term" : {
|
||||
"field1" : "value1"
|
||||
"text" : "apple"
|
||||
}
|
||||
},
|
||||
"negative" : {
|
||||
"term" : {
|
||||
"field2" : "value2"
|
||||
"text" : "pie tart fruit crumble tree"
|
||||
}
|
||||
},
|
||||
"negative_boost" : 0.2
|
||||
"negative_boost" : 0.5
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
----
|
||||
// CONSOLE
|
||||
|
||||
[[boosting-top-level-params]]
|
||||
==== Top-level parameters for `boosting`
|
||||
|
||||
`positive` (Required)::
|
||||
Query you wish to run. Any returned documents must match this query.
|
||||
|
||||
`negative` (Required)::
|
||||
+
|
||||
--
|
||||
Query used to decrease the <<query-filter-context, relevance score>> of matching
|
||||
documents.
|
||||
|
||||
If a returned document matches the `positive` query and this query, the
|
||||
`boosting` query calculates the final <<query-filter-context, relevance score>>
|
||||
for the document as follows:
|
||||
|
||||
. Take the original relevance score from the `positive` query.
|
||||
. Multiply the score by the `negative_boost` value.
|
||||
--
|
||||
|
||||
`negative_boost` (Required)::
|
||||
Floating point number between `0` and `1.0` used to decrease the
|
||||
<<query-filter-context, relevance scores>> of documents matching the `negative`
|
||||
query.
|
|
@ -101,6 +101,34 @@ If specified, then match intervals from this field rather than the top-level fie
|
|||
The `prefix` will be normalized using the search analyzer from this field, unless
|
||||
`analyzer` is specified separately.
|
||||
|
||||
[[intervals-wildcard]]
|
||||
==== `wildcard`
|
||||
|
||||
The `wildcard` rule finds terms that match a wildcard pattern. The pattern will
|
||||
expand to match at most 128 terms; if there are more matching terms in the index,
|
||||
then an error will be returned.
|
||||
|
||||
[horizontal]
|
||||
`pattern`::
|
||||
Find terms matching this pattern
|
||||
+
|
||||
--
|
||||
This parameter supports two wildcard operators:
|
||||
|
||||
* `?`, which matches any single character
|
||||
* `*`, which can match zero or more characters, including an empty one
|
||||
|
||||
WARNING: Avoid beginning patterns with `*` or `?`. This can increase
|
||||
the iterations needed to find matching terms and slow search performance.
|
||||
--
|
||||
`analyzer`::
|
||||
Which analyzer should be used to normalize the `pattern`. By default, the
|
||||
search analyzer of the top-level field will be used.
|
||||
`use_field`::
|
||||
If specified, then match intervals from this field rather than the top-level field.
|
||||
The `pattern` will be normalized using the search analyzer from this field, unless
|
||||
`analyzer` is specified separately.
|
||||
|
||||
[[intervals-all_of]]
|
||||
==== `all_of`
|
||||
|
||||
|
|
|
@ -1,121 +1,231 @@
|
|||
[[query-dsl-terms-set-query]]
|
||||
=== Terms Set Query
|
||||
|
||||
Returns any documents that match with at least one or more of the
|
||||
provided terms. The terms are not analyzed and thus must match exactly.
|
||||
The number of terms that must match varies per document and is either
|
||||
controlled by a minimum should match field or computed per document in
|
||||
a minimum should match script.
|
||||
Returns documents that contain a minimum number of *exact* terms in a provided
|
||||
field.
|
||||
|
||||
The field that controls the number of required terms that must match must
|
||||
be a number field:
|
||||
The `terms_set` query is the same as the <<query-dsl-terms-query, `terms`
|
||||
query>>, except you can define the number of matching terms required to
|
||||
return a document. For example:
|
||||
|
||||
* A field, `programming_languages`, contains a list of known programming
|
||||
languages, such as `c++`, `java`, or `php` for job candidates. You can use the
|
||||
`terms_set` query to return documents that match at least two of these
|
||||
languages.
|
||||
|
||||
* A field, `permissions`, contains a list of possible user permissions for an
|
||||
application. You can use the `terms_set` query to return documents that
|
||||
match a subset of these permissions.
|
||||
|
||||
[[terms-set-query-ex-request]]
|
||||
==== Example request
|
||||
|
||||
[[terms-set-query-ex-request-index-setup]]
|
||||
===== Index setup
|
||||
In most cases, you'll need to include a <<number, numeric>> field mapping in
|
||||
your index to use the `terms_set` query. This numeric field contains the
|
||||
number of matching terms required to return a document.
|
||||
|
||||
To see how you can set up an index for the `terms_set` query, try the
|
||||
following example.
|
||||
|
||||
. Create an index, `job-candidates`, with the following field mappings:
|
||||
+
|
||||
--
|
||||
|
||||
* `name`, a <<keyword, `keyword`>> field. This field contains the name of the
|
||||
job candidate.
|
||||
|
||||
* `programming_languages`, a <<keyword, `keyword`>> field. This field contains
|
||||
programming languages known by the job candidate.
|
||||
|
||||
* `required_matches`, a <<number, numeric>> `long` field. This field contains
|
||||
the number of matching terms required to return a document.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /my-index
|
||||
----
|
||||
PUT /job-candidates
|
||||
{
|
||||
"mappings": {
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "keyword"
|
||||
},
|
||||
"programming_languages": {
|
||||
"type": "keyword"
|
||||
},
|
||||
"required_matches": {
|
||||
"type": "long"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
PUT /my-index/_doc/1?refresh
|
||||
{
|
||||
"codes": ["ghi", "jkl"],
|
||||
"required_matches": 2
|
||||
}
|
||||
|
||||
PUT /my-index/_doc/2?refresh
|
||||
{
|
||||
"codes": ["def", "ghi"],
|
||||
"required_matches": 2
|
||||
}
|
||||
--------------------------------------------------
|
||||
----
|
||||
// CONSOLE
|
||||
// TESTSETUP
|
||||
|
||||
An example that uses the minimum should match field:
|
||||
--
|
||||
|
||||
. Index a document with an ID of `1` and the following values:
|
||||
+
|
||||
--
|
||||
|
||||
* `Jane Smith` in the `name` field.
|
||||
|
||||
* `["c++", "java"]` in the `programming_languages` field.
|
||||
|
||||
* `2` in the `required_matches` field.
|
||||
|
||||
Include the `?refresh` parameter so the document is immediately available for
|
||||
search.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /my-index/_search
|
||||
----
|
||||
PUT /job-candidates/_doc/1?refresh
|
||||
{
|
||||
"name": "Jane Smith",
|
||||
"programming_languages": ["c++", "java"],
|
||||
"required_matches": 2
|
||||
}
|
||||
----
|
||||
// CONSOLE
|
||||
|
||||
--
|
||||
|
||||
. Index another document with an ID of `2` and the following values:
|
||||
+
|
||||
--
|
||||
|
||||
* `Jason Response` in the `name` field.
|
||||
|
||||
* `["java", "php"]` in the `programming_languages` field.
|
||||
|
||||
* `2` in the `required_matches` field.
|
||||
|
||||
[source,js]
|
||||
----
|
||||
PUT /job-candidates/_doc/2?refresh
|
||||
{
|
||||
"name": "Jason Response",
|
||||
"programming_languages": ["java", "php"],
|
||||
"required_matches": 2
|
||||
}
|
||||
----
|
||||
// CONSOLE
|
||||
|
||||
--
|
||||
|
||||
You can now use the `required_matches` field value as the number of
|
||||
matching terms required to return a document in the `terms_set` query.
|
||||
|
||||
[[terms-set-query-ex-request-query]]
|
||||
===== Example query
|
||||
|
||||
The following search returns documents where the `programming_languages` field
|
||||
contains at least two of the following terms:
|
||||
|
||||
* `c++`
|
||||
* `java`
|
||||
* `php`
|
||||
|
||||
The `minimum_should_match_field` is `required_matches`. This means the
|
||||
number of matching terms required is `2`, the value of the `required_matches`
|
||||
field.
|
||||
|
||||
[source,js]
|
||||
----
|
||||
GET /job-candidates/_search
|
||||
{
|
||||
"query": {
|
||||
"terms_set": {
|
||||
"codes" : {
|
||||
"terms" : ["abc", "def", "ghi"],
|
||||
"programming_languages": {
|
||||
"terms": ["c++", "java", "php"],
|
||||
"minimum_should_match_field": "required_matches"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
----
|
||||
// CONSOLE
|
||||
|
||||
Response:
|
||||
[[terms-set-top-level-params]]
|
||||
==== Top-level parameters for `terms_set`
|
||||
|
||||
`<field>`::
|
||||
Field you wish to search.
|
||||
|
||||
[[terms-set-field-params]]
|
||||
==== Parameters for `<field>`
|
||||
|
||||
`terms`::
|
||||
+
|
||||
--
|
||||
Array of terms you wish to find in the provided `<field>`. To return a document,
|
||||
a required number of terms must exactly match the field values, including
|
||||
whitespace and capitalization.
|
||||
|
||||
The required number of matching terms is defined in the
|
||||
`minimum_should_match_field` or `minimum_should_match_script` parameter.
|
||||
--
|
||||
|
||||
`minimum_should_match_field`::
|
||||
<<number, Numeric>> field containing the number of matching terms
|
||||
required to return a document.
|
||||
|
||||
`minimum_should_match_script`::
|
||||
+
|
||||
--
|
||||
Custom script containing the number of matching terms required to return a
|
||||
document.
|
||||
|
||||
For parameters and valid values, see <<modules-scripting, Scripting>>.
|
||||
|
||||
For an example query using the `minimum_should_match_script` parameter, see
|
||||
<<terms-set-query-script, How to use the `minimum_should_match_script`
|
||||
parameter>>.
|
||||
--
|
||||
|
||||
[[terms-set-query-notes]]
|
||||
==== Notes
|
||||
|
||||
[[terms-set-query-script]]
|
||||
===== How to use the `minimum_should_match_script` parameter
|
||||
You can use `minimum_should_match_script` to define the required number of
|
||||
matching terms using a script. This is useful if you need to set the number of
|
||||
required terms dynamically.
|
||||
|
||||
[[terms-set-query-script-ex]]
|
||||
====== Example query using `minimum_should_match_script`
|
||||
|
||||
The following search returns documents where the `programming_languages` field
|
||||
contains at least two of the following terms:
|
||||
|
||||
* `c++`
|
||||
* `java`
|
||||
* `php`
|
||||
|
||||
The `source` parameter of this query indicates:
|
||||
|
||||
* The required number of terms to match cannot exceed `params.num_terms`, the
|
||||
number of terms provided in the `terms` field.
|
||||
* The required number of terms to match is `2`, the value of the
|
||||
`required_matches` field.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"took": 13,
|
||||
"timed_out": false,
|
||||
"_shards": {
|
||||
"total": 1,
|
||||
"successful": 1,
|
||||
"skipped" : 0,
|
||||
"failed": 0
|
||||
},
|
||||
"hits": {
|
||||
"total" : {
|
||||
"value": 1,
|
||||
"relation": "eq"
|
||||
},
|
||||
"max_score": 0.87546873,
|
||||
"hits": [
|
||||
{
|
||||
"_index": "my-index",
|
||||
"_type": "_doc",
|
||||
"_id": "2",
|
||||
"_score": 0.87546873,
|
||||
"_source": {
|
||||
"codes": ["def", "ghi"],
|
||||
"required_matches": 2
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE[s/"took": 13,/"took": "$body.took",/]
|
||||
|
||||
Scripts can also be used to control how many terms are required to match
|
||||
in a more dynamic way. For example a create date or a popularity field
|
||||
can be used as basis for the number of required terms to match.
|
||||
|
||||
Also the `params.num_terms` parameter is available in the script to indicate the
|
||||
number of terms that have been specified.
|
||||
|
||||
An example that always limits the number of required terms to match to never
|
||||
become larger than the number of terms specified:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /my-index/_search
|
||||
----
|
||||
GET /job-candidates/_search
|
||||
{
|
||||
"query": {
|
||||
"terms_set": {
|
||||
"codes" : {
|
||||
"terms" : ["abc", "def", "ghi"],
|
||||
"programming_languages": {
|
||||
"terms": ["c++", "java", "php"],
|
||||
"minimum_should_match_script": {
|
||||
"source": "Math.min(params.num_terms, doc['required_matches'].value)"
|
||||
}
|
||||
},
|
||||
"boost": 1.0
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
----
|
||||
// CONSOLE
|
|
@ -15,6 +15,7 @@ not be included yet.
|
|||
* <<data-frame-apis,{dataframe-cap} APIs>>
|
||||
* <<graph-explore-api,Graph Explore API>>
|
||||
* <<freeze-index-api>>, <<unfreeze-index-api>>
|
||||
* <<indices-reload-analyzers,Reload Search Analyzers API>>
|
||||
* <<index-lifecycle-management-api,Index lifecycle management APIs>>
|
||||
* <<licensing-apis,Licensing APIs>>
|
||||
* <<ml-apis,Machine Learning APIs>>
|
||||
|
@ -38,4 +39,5 @@ include::{es-repo-dir}/rollup/rollup-api.asciidoc[]
|
|||
include::{xes-repo-dir}/rest-api/security.asciidoc[]
|
||||
include::{es-repo-dir}/indices/apis/unfreeze.asciidoc[]
|
||||
include::{xes-repo-dir}/rest-api/watcher.asciidoc[]
|
||||
include::{es-repo-dir}/indices/apis/reload-analyzers.asciidoc[]
|
||||
include::defs.asciidoc[]
|
||||
|
|
|
@ -49,8 +49,6 @@ public class Circle implements Geometry {
|
|||
if (radiusMeters < 0 ) {
|
||||
throw new IllegalArgumentException("Circle radius [" + radiusMeters + "] cannot be negative");
|
||||
}
|
||||
GeometryUtils.checkLatitude(lat);
|
||||
GeometryUtils.checkLongitude(lon);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -1,78 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.geo.geometry;
|
||||
|
||||
/**
|
||||
* Geometry-related utility methods
|
||||
*/
|
||||
public final class GeometryUtils {
|
||||
/**
|
||||
* Minimum longitude value.
|
||||
*/
|
||||
static final double MIN_LON_INCL = -180.0D;
|
||||
|
||||
/**
|
||||
* Maximum longitude value.
|
||||
*/
|
||||
static final double MAX_LON_INCL = 180.0D;
|
||||
|
||||
/**
|
||||
* Minimum latitude value.
|
||||
*/
|
||||
static final double MIN_LAT_INCL = -90.0D;
|
||||
|
||||
/**
|
||||
* Maximum latitude value.
|
||||
*/
|
||||
static final double MAX_LAT_INCL = 90.0D;
|
||||
|
||||
// No instance:
|
||||
private GeometryUtils() {
|
||||
}
|
||||
|
||||
/**
|
||||
* validates latitude value is within standard +/-90 coordinate bounds
|
||||
*/
|
||||
static void checkLatitude(double latitude) {
|
||||
if (Double.isNaN(latitude) || latitude < MIN_LAT_INCL || latitude > MAX_LAT_INCL) {
|
||||
throw new IllegalArgumentException(
|
||||
"invalid latitude " + latitude + "; must be between " + MIN_LAT_INCL + " and " + MAX_LAT_INCL);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* validates longitude value is within standard +/-180 coordinate bounds
|
||||
*/
|
||||
static void checkLongitude(double longitude) {
|
||||
if (Double.isNaN(longitude) || longitude < MIN_LON_INCL || longitude > MAX_LON_INCL) {
|
||||
throw new IllegalArgumentException(
|
||||
"invalid longitude " + longitude + "; must be between " + MIN_LON_INCL + " and " + MAX_LON_INCL);
|
||||
}
|
||||
}
|
||||
|
||||
public static double checkAltitude(final boolean ignoreZValue, double zValue) {
|
||||
if (ignoreZValue == false) {
|
||||
throw new IllegalArgumentException("found Z value [" + zValue + "] but [ignore_z_value] "
|
||||
+ "parameter is [" + ignoreZValue + "]");
|
||||
}
|
||||
return zValue;
|
||||
}
|
||||
|
||||
}
|
|
@ -59,10 +59,6 @@ public class Line implements Geometry {
|
|||
if (alts != null && alts.length != lats.length) {
|
||||
throw new IllegalArgumentException("alts and lats must be equal length");
|
||||
}
|
||||
for (int i = 0; i < lats.length; i++) {
|
||||
GeometryUtils.checkLatitude(lats[i]);
|
||||
GeometryUtils.checkLongitude(lons[i]);
|
||||
}
|
||||
}
|
||||
|
||||
public int length() {
|
||||
|
|
|
@ -42,8 +42,6 @@ public class Point implements Geometry {
|
|||
}
|
||||
|
||||
public Point(double lat, double lon, double alt) {
|
||||
GeometryUtils.checkLatitude(lat);
|
||||
GeometryUtils.checkLongitude(lon);
|
||||
this.lat = lat;
|
||||
this.lon = lon;
|
||||
this.alt = alt;
|
||||
|
|
|
@ -71,10 +71,6 @@ public class Rectangle implements Geometry {
|
|||
* Constructs a bounding box by first validating the provided latitude and longitude coordinates
|
||||
*/
|
||||
public Rectangle(double minLat, double maxLat, double minLon, double maxLon, double minAlt, double maxAlt) {
|
||||
GeometryUtils.checkLatitude(minLat);
|
||||
GeometryUtils.checkLatitude(maxLat);
|
||||
GeometryUtils.checkLongitude(minLon);
|
||||
GeometryUtils.checkLongitude(maxLon);
|
||||
this.minLon = minLon;
|
||||
this.maxLon = maxLon;
|
||||
this.minLat = minLat;
|
||||
|
@ -90,17 +86,6 @@ public class Rectangle implements Geometry {
|
|||
}
|
||||
}
|
||||
|
||||
public double getWidth() {
|
||||
if (crossesDateline()) {
|
||||
return GeometryUtils.MAX_LON_INCL - minLon + maxLon - GeometryUtils.MIN_LON_INCL;
|
||||
}
|
||||
return maxLon - minLon;
|
||||
}
|
||||
|
||||
public double getHeight() {
|
||||
return maxLat - minLat;
|
||||
}
|
||||
|
||||
public double getMinLat() {
|
||||
return minLat;
|
||||
}
|
||||
|
@ -156,21 +141,6 @@ public class Rectangle implements Geometry {
|
|||
return b.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if this bounding box crosses the dateline
|
||||
*/
|
||||
public boolean crossesDateline() {
|
||||
return maxLon < minLon;
|
||||
}
|
||||
|
||||
/** returns true if rectangle (defined by minLat, maxLat, minLon, maxLon) contains the lat lon point */
|
||||
public boolean containsPoint(final double lat, final double lon) {
|
||||
if (lat >= minLat && lat <= maxLat) {
|
||||
return crossesDateline() ? lon >= minLon || lon <= maxLon : lon >= minLon && lon <= maxLon;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
|
|
|
@ -0,0 +1,178 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.geo.utils;
|
||||
|
||||
import org.elasticsearch.geo.geometry.Circle;
|
||||
import org.elasticsearch.geo.geometry.Geometry;
|
||||
import org.elasticsearch.geo.geometry.GeometryCollection;
|
||||
import org.elasticsearch.geo.geometry.GeometryVisitor;
|
||||
import org.elasticsearch.geo.geometry.Line;
|
||||
import org.elasticsearch.geo.geometry.LinearRing;
|
||||
import org.elasticsearch.geo.geometry.MultiLine;
|
||||
import org.elasticsearch.geo.geometry.MultiPoint;
|
||||
import org.elasticsearch.geo.geometry.MultiPolygon;
|
||||
import org.elasticsearch.geo.geometry.Point;
|
||||
import org.elasticsearch.geo.geometry.Polygon;
|
||||
import org.elasticsearch.geo.geometry.Rectangle;
|
||||
|
||||
/**
|
||||
* Validator that checks that lats are between -90 and +90 and lons are between -180 and +180 and altitude is present only if
|
||||
* ignoreZValue is set to true
|
||||
*/
|
||||
public class GeographyValidator implements GeometryValidator {
|
||||
|
||||
/**
|
||||
* Minimum longitude value.
|
||||
*/
|
||||
private static final double MIN_LON_INCL = -180.0D;
|
||||
|
||||
/**
|
||||
* Maximum longitude value.
|
||||
*/
|
||||
private static final double MAX_LON_INCL = 180.0D;
|
||||
|
||||
/**
|
||||
* Minimum latitude value.
|
||||
*/
|
||||
private static final double MIN_LAT_INCL = -90.0D;
|
||||
|
||||
/**
|
||||
* Maximum latitude value.
|
||||
*/
|
||||
private static final double MAX_LAT_INCL = 90.0D;
|
||||
|
||||
private final boolean ignoreZValue;
|
||||
|
||||
public GeographyValidator(boolean ignoreZValue) {
|
||||
this.ignoreZValue = ignoreZValue;
|
||||
}
|
||||
|
||||
/**
|
||||
* validates latitude value is within standard +/-90 coordinate bounds
|
||||
*/
|
||||
protected void checkLatitude(double latitude) {
|
||||
if (Double.isNaN(latitude) || latitude < MIN_LAT_INCL || latitude > MAX_LAT_INCL) {
|
||||
throw new IllegalArgumentException(
|
||||
"invalid latitude " + latitude + "; must be between " + MIN_LAT_INCL + " and " + MAX_LAT_INCL);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* validates longitude value is within standard +/-180 coordinate bounds
|
||||
*/
|
||||
protected void checkLongitude(double longitude) {
|
||||
if (Double.isNaN(longitude) || longitude < MIN_LON_INCL || longitude > MAX_LON_INCL) {
|
||||
throw new IllegalArgumentException(
|
||||
"invalid longitude " + longitude + "; must be between " + MIN_LON_INCL + " and " + MAX_LON_INCL);
|
||||
}
|
||||
}
|
||||
|
||||
protected void checkAltitude(double zValue) {
|
||||
if (ignoreZValue == false && Double.isNaN(zValue) == false) {
|
||||
throw new IllegalArgumentException("found Z value [" + zValue + "] but [ignore_z_value] "
|
||||
+ "parameter is [" + ignoreZValue + "]");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void validate(Geometry geometry) {
|
||||
geometry.visit(new GeometryVisitor<Void, RuntimeException>() {
|
||||
|
||||
@Override
|
||||
public Void visit(Circle circle) throws RuntimeException {
|
||||
checkLatitude(circle.getLat());
|
||||
checkLongitude(circle.getLon());
|
||||
checkAltitude(circle.getAlt());
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Void visit(GeometryCollection<?> collection) throws RuntimeException {
|
||||
for (Geometry g : collection) {
|
||||
g.visit(this);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Void visit(Line line) throws RuntimeException {
|
||||
for (int i = 0; i < line.length(); i++) {
|
||||
checkLatitude(line.getLat(i));
|
||||
checkLongitude(line.getLon(i));
|
||||
checkAltitude(line.getAlt(i));
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Void visit(LinearRing ring) throws RuntimeException {
|
||||
for (int i = 0; i < ring.length(); i++) {
|
||||
checkLatitude(ring.getLat(i));
|
||||
checkLongitude(ring.getLon(i));
|
||||
checkAltitude(ring.getAlt(i));
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Void visit(MultiLine multiLine) throws RuntimeException {
|
||||
return visit((GeometryCollection<?>) multiLine);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Void visit(MultiPoint multiPoint) throws RuntimeException {
|
||||
return visit((GeometryCollection<?>) multiPoint);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Void visit(MultiPolygon multiPolygon) throws RuntimeException {
|
||||
return visit((GeometryCollection<?>) multiPolygon);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Void visit(Point point) throws RuntimeException {
|
||||
checkLatitude(point.getLat());
|
||||
checkLongitude(point.getLon());
|
||||
checkAltitude(point.getAlt());
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Void visit(Polygon polygon) throws RuntimeException {
|
||||
polygon.getPolygon().visit(this);
|
||||
for (int i = 0; i < polygon.getNumberOfHoles(); i++) {
|
||||
polygon.getHole(i).visit(this);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Void visit(Rectangle rectangle) throws RuntimeException {
|
||||
checkLatitude(rectangle.getMinLat());
|
||||
checkLatitude(rectangle.getMaxLat());
|
||||
checkLongitude(rectangle.getMinLon());
|
||||
checkLongitude(rectangle.getMaxLon());
|
||||
checkAltitude(rectangle.getMinAlt());
|
||||
checkAltitude(rectangle.getMaxAlt());
|
||||
return null;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
|
@ -17,30 +17,18 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.lucene.search;
|
||||
package org.elasticsearch.geo.utils;
|
||||
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import org.elasticsearch.geo.geometry.Geometry;
|
||||
|
||||
/**
|
||||
* A wrapper for {@link IndexSearcher} that makes {@link IndexSearcher#search(List, Weight, Collector)}
|
||||
* visible by sub-classes.
|
||||
* Generic geometry validator that can be used by the parser to verify the validity of the parsed geometry
|
||||
*/
|
||||
public class XIndexSearcher extends IndexSearcher {
|
||||
private final IndexSearcher in;
|
||||
public interface GeometryValidator {
|
||||
|
||||
public XIndexSearcher(IndexSearcher in) {
|
||||
super(in.getIndexReader());
|
||||
this.in = in;
|
||||
setSimilarity(in.getSimilarity());
|
||||
setQueryCache(in.getQueryCache());
|
||||
setQueryCachingPolicy(in.getQueryCachingPolicy());
|
||||
}
|
||||
/**
|
||||
* Validates the geometry and throws IllegalArgumentException if the geometry is not valid
|
||||
*/
|
||||
void validate(Geometry geometry);
|
||||
|
||||
@Override
|
||||
public void search(List<LeafReaderContext> leaves, Weight weight, Collector collector) throws IOException {
|
||||
in.search(leaves, weight, collector);
|
||||
}
|
||||
}
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.geo.utils;
|
|||
import org.elasticsearch.geo.geometry.Circle;
|
||||
import org.elasticsearch.geo.geometry.Geometry;
|
||||
import org.elasticsearch.geo.geometry.GeometryCollection;
|
||||
import org.elasticsearch.geo.geometry.GeometryUtils;
|
||||
import org.elasticsearch.geo.geometry.GeometryVisitor;
|
||||
import org.elasticsearch.geo.geometry.Line;
|
||||
import org.elasticsearch.geo.geometry.LinearRing;
|
||||
|
@ -58,11 +57,11 @@ public class WellKnownText {
|
|||
private final String EOL = "END-OF-LINE";
|
||||
|
||||
private final boolean coerce;
|
||||
private final boolean ignoreZValue;
|
||||
private final GeometryValidator validator;
|
||||
|
||||
public WellKnownText(boolean coerce, boolean ignoreZValue) {
|
||||
public WellKnownText(boolean coerce, GeometryValidator validator) {
|
||||
this.coerce = coerce;
|
||||
this.ignoreZValue = ignoreZValue;
|
||||
this.validator = validator;
|
||||
}
|
||||
|
||||
public String toWKT(Geometry geometry) {
|
||||
|
@ -243,7 +242,9 @@ public class WellKnownText {
|
|||
tokenizer.whitespaceChars('\r', '\r');
|
||||
tokenizer.whitespaceChars('\n', '\n');
|
||||
tokenizer.commentChar('#');
|
||||
return parseGeometry(tokenizer);
|
||||
Geometry geometry = parseGeometry(tokenizer);
|
||||
validator.validate(geometry);
|
||||
return geometry;
|
||||
} finally {
|
||||
reader.close();
|
||||
}
|
||||
|
@ -297,7 +298,7 @@ public class WellKnownText {
|
|||
double lat = nextNumber(stream);
|
||||
Point pt;
|
||||
if (isNumberNext(stream)) {
|
||||
pt = new Point(lat, lon, GeometryUtils.checkAltitude(ignoreZValue, nextNumber(stream)));
|
||||
pt = new Point(lat, lon, nextNumber(stream));
|
||||
} else {
|
||||
pt = new Point(lat, lon);
|
||||
}
|
||||
|
@ -318,7 +319,7 @@ public class WellKnownText {
|
|||
lons.add(nextNumber(stream));
|
||||
lats.add(nextNumber(stream));
|
||||
if (isNumberNext(stream)) {
|
||||
alts.add(GeometryUtils.checkAltitude(ignoreZValue, nextNumber(stream)));
|
||||
alts.add(nextNumber(stream));
|
||||
}
|
||||
if (alts.isEmpty() == false && alts.size() != lons.size()) {
|
||||
throw new ParseException("coordinate dimensions do not match: " + tokenString(stream), stream.lineno());
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.geo.geometry;
|
|||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.geo.utils.GeographyValidator;
|
||||
import org.elasticsearch.geo.utils.WellKnownText;
|
||||
import org.elasticsearch.test.AbstractWireTestCase;
|
||||
|
||||
|
@ -53,7 +54,7 @@ abstract class BaseGeometryTestCase<T extends Geometry> extends AbstractWireTest
|
|||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
protected T copyInstance(T instance, Version version) throws IOException {
|
||||
WellKnownText wkt = new WellKnownText(true, true);
|
||||
WellKnownText wkt = new WellKnownText(true, new GeographyValidator(true));
|
||||
String text = wkt.toWKT(instance);
|
||||
try {
|
||||
return (T) wkt.fromWKT(text);
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
|
||||
package org.elasticsearch.geo.geometry;
|
||||
|
||||
import org.elasticsearch.geo.utils.GeographyValidator;
|
||||
import org.elasticsearch.geo.utils.GeometryValidator;
|
||||
import org.elasticsearch.geo.utils.WellKnownText;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -36,7 +38,7 @@ public class CircleTests extends BaseGeometryTestCase<Circle> {
|
|||
}
|
||||
|
||||
public void testBasicSerialization() throws IOException, ParseException {
|
||||
WellKnownText wkt = new WellKnownText(true, true);
|
||||
WellKnownText wkt = new WellKnownText(true, new GeographyValidator(true));
|
||||
assertEquals("circle (20.0 10.0 15.0)", wkt.toWKT(new Circle(10, 20, 15)));
|
||||
assertEquals(new Circle(10, 20, 15), wkt.fromWKT("circle (20.0 10.0 15.0)"));
|
||||
|
||||
|
@ -48,13 +50,14 @@ public class CircleTests extends BaseGeometryTestCase<Circle> {
|
|||
}
|
||||
|
||||
public void testInitValidation() {
|
||||
IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> new Circle(10, 20, -1));
|
||||
GeometryValidator validator = new GeographyValidator(true);
|
||||
IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> validator.validate(new Circle(10, 20, -1)));
|
||||
assertEquals("Circle radius [-1.0] cannot be negative", ex.getMessage());
|
||||
|
||||
ex = expectThrows(IllegalArgumentException.class, () -> new Circle(100, 20, 1));
|
||||
ex = expectThrows(IllegalArgumentException.class, () -> validator.validate(new Circle(100, 20, 1)));
|
||||
assertEquals("invalid latitude 100.0; must be between -90.0 and 90.0", ex.getMessage());
|
||||
|
||||
ex = expectThrows(IllegalArgumentException.class, () -> new Circle(10, 200, 1));
|
||||
ex = expectThrows(IllegalArgumentException.class, () -> validator.validate(new Circle(10, 200, 1)));
|
||||
assertEquals("invalid longitude 200.0; must be between -180.0 and 180.0", ex.getMessage());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.geo.geometry;
|
||||
|
||||
import org.elasticsearch.geo.utils.GeographyValidator;
|
||||
import org.elasticsearch.geo.utils.WellKnownText;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -35,7 +36,7 @@ public class GeometryCollectionTests extends BaseGeometryTestCase<GeometryCollec
|
|||
|
||||
|
||||
public void testBasicSerialization() throws IOException, ParseException {
|
||||
WellKnownText wkt = new WellKnownText(true, true);
|
||||
WellKnownText wkt = new WellKnownText(true, new GeographyValidator(true));
|
||||
assertEquals("geometrycollection (point (20.0 10.0),point EMPTY)",
|
||||
wkt.toWKT(new GeometryCollection<Geometry>(Arrays.asList(new Point(10, 20), Point.EMPTY))));
|
||||
|
||||
|
|
|
@ -0,0 +1,127 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.geo.geometry;
|
||||
|
||||
import org.elasticsearch.geo.utils.GeographyValidator;
|
||||
import org.elasticsearch.geo.utils.GeometryValidator;
|
||||
import org.elasticsearch.geo.utils.WellKnownText;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
public class GeometryValidatorTests extends ESTestCase {
|
||||
|
||||
public static class NoopValidator implements GeometryValidator {
|
||||
|
||||
@Override
|
||||
public void validate(Geometry geometry) {
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
public static class OneValidator extends GeographyValidator {
|
||||
/**
|
||||
* Minimum longitude value.
|
||||
*/
|
||||
private static final double MIN_LON_INCL = -1D;
|
||||
|
||||
/**
|
||||
* Maximum longitude value.
|
||||
*/
|
||||
private static final double MAX_LON_INCL = 1D;
|
||||
|
||||
/**
|
||||
* Minimum latitude value.
|
||||
*/
|
||||
private static final double MIN_LAT_INCL = -1D;
|
||||
|
||||
/**
|
||||
* Maximum latitude value.
|
||||
*/
|
||||
private static final double MAX_LAT_INCL = 1D;
|
||||
|
||||
/**
|
||||
* Minimum altitude value.
|
||||
*/
|
||||
private static final double MIN_ALT_INCL = -1D;
|
||||
|
||||
/**
|
||||
* Maximum altitude value.
|
||||
*/
|
||||
private static final double MAX_ALT_INCL = 1D;
|
||||
|
||||
public OneValidator() {
|
||||
super(true);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void checkLatitude(double latitude) {
|
||||
if (Double.isNaN(latitude) || latitude < MIN_LAT_INCL || latitude > MAX_LAT_INCL) {
|
||||
throw new IllegalArgumentException(
|
||||
"invalid latitude " + latitude + "; must be between " + MIN_LAT_INCL + " and " + MAX_LAT_INCL);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void checkLongitude(double longitude) {
|
||||
if (Double.isNaN(longitude) || longitude < MIN_LON_INCL || longitude > MAX_LON_INCL) {
|
||||
throw new IllegalArgumentException(
|
||||
"invalid longitude " + longitude + "; must be between " + MIN_LON_INCL + " and " + MAX_LON_INCL);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void checkAltitude(double zValue) {
|
||||
if (Double.isNaN(zValue) == false && (zValue < MIN_ALT_INCL || zValue > MAX_ALT_INCL)) {
|
||||
throw new IllegalArgumentException(
|
||||
"invalid altitude " + zValue + "; must be between " + MIN_ALT_INCL + " and " + MAX_ALT_INCL);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testNoopValidator() throws Exception {
|
||||
WellKnownText parser = new WellKnownText(true, new NoopValidator());
|
||||
parser.fromWKT("CIRCLE (10000 20000 30000)");
|
||||
parser.fromWKT("POINT (10000 20000)");
|
||||
parser.fromWKT("LINESTRING (10000 20000, 0 0)");
|
||||
parser.fromWKT("POLYGON ((300 100, 400 200, 500 300, 300 100), (50 150, 250 150, 200 100))");
|
||||
parser.fromWKT("MULTIPOINT (10000 20000, 20000 30000)");
|
||||
}
|
||||
|
||||
public void testOneValidator() throws Exception {
|
||||
WellKnownText parser = new WellKnownText(true, new OneValidator());
|
||||
parser.fromWKT("POINT (0 1)");
|
||||
parser.fromWKT("POINT (0 1 0.5)");
|
||||
IllegalArgumentException ex;
|
||||
ex = expectThrows(IllegalArgumentException.class, () -> parser.fromWKT("CIRCLE (1 2 3)"));
|
||||
assertEquals("invalid latitude 2.0; must be between -1.0 and 1.0", ex.getMessage());
|
||||
ex = expectThrows(IllegalArgumentException.class, () -> parser.fromWKT("POINT (2 1)"));
|
||||
assertEquals("invalid longitude 2.0; must be between -1.0 and 1.0", ex.getMessage());
|
||||
ex = expectThrows(IllegalArgumentException.class, () -> parser.fromWKT("LINESTRING (1 -1 0, 0 0 2)"));
|
||||
assertEquals("invalid altitude 2.0; must be between -1.0 and 1.0", ex.getMessage());
|
||||
ex = expectThrows(IllegalArgumentException.class, () -> parser.fromWKT("POLYGON ((0.3 0.1, 0.4 0.2, 5 0.3, 0.3 0.1))"));
|
||||
assertEquals("invalid longitude 5.0; must be between -1.0 and 1.0", ex.getMessage());
|
||||
ex = expectThrows(IllegalArgumentException.class, () -> parser.fromWKT(
|
||||
"POLYGON ((0.3 0.1, 0.4 0.2, 0.5 0.3, 0.3 0.1), (0.5 1.5, 2.5 1.5, 2.0 1.0))"));
|
||||
assertEquals("invalid latitude 1.5; must be between -1.0 and 1.0", ex.getMessage());
|
||||
ex = expectThrows(IllegalArgumentException.class, () -> parser.fromWKT("MULTIPOINT (0 1, -2 1)"));
|
||||
assertEquals("invalid longitude -2.0; must be between -1.0 and 1.0", ex.getMessage());
|
||||
}
|
||||
|
||||
|
||||
}
|
|
@ -19,6 +19,8 @@
|
|||
|
||||
package org.elasticsearch.geo.geometry;
|
||||
|
||||
import org.elasticsearch.geo.utils.GeographyValidator;
|
||||
import org.elasticsearch.geo.utils.GeometryValidator;
|
||||
import org.elasticsearch.geo.utils.WellKnownText;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -31,7 +33,7 @@ public class LineTests extends BaseGeometryTestCase<Line> {
|
|||
}
|
||||
|
||||
public void testBasicSerialization() throws IOException, ParseException {
|
||||
WellKnownText wkt = new WellKnownText(true, true);
|
||||
WellKnownText wkt = new WellKnownText(true, new GeographyValidator(true));
|
||||
assertEquals("linestring (3.0 1.0, 4.0 2.0)", wkt.toWKT(new Line(new double[]{1, 2}, new double[]{3, 4})));
|
||||
assertEquals(new Line(new double[]{1, 2}, new double[]{3, 4}), wkt.fromWKT("linestring (3 1, 4 2)"));
|
||||
|
||||
|
@ -45,19 +47,23 @@ public class LineTests extends BaseGeometryTestCase<Line> {
|
|||
}
|
||||
|
||||
public void testInitValidation() {
|
||||
IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> new Line(new double[]{1}, new double[]{3}));
|
||||
GeometryValidator validator = new GeographyValidator(true);
|
||||
IllegalArgumentException ex = expectThrows(IllegalArgumentException.class,
|
||||
() -> validator.validate(new Line(new double[]{1}, new double[]{3})));
|
||||
assertEquals("at least two points in the line is required", ex.getMessage());
|
||||
|
||||
ex = expectThrows(IllegalArgumentException.class, () -> new Line(new double[]{1, 2, 3, 1}, new double[]{3, 4, 500, 3}));
|
||||
ex = expectThrows(IllegalArgumentException.class,
|
||||
() -> validator.validate(new Line(new double[]{1, 2, 3, 1}, new double[]{3, 4, 500, 3})));
|
||||
assertEquals("invalid longitude 500.0; must be between -180.0 and 180.0", ex.getMessage());
|
||||
|
||||
ex = expectThrows(IllegalArgumentException.class, () -> new Line(new double[]{1, 100, 3, 1}, new double[]{3, 4, 5, 3}));
|
||||
ex = expectThrows(IllegalArgumentException.class,
|
||||
() -> validator.validate(new Line(new double[]{1, 100, 3, 1}, new double[]{3, 4, 5, 3})));
|
||||
assertEquals("invalid latitude 100.0; must be between -90.0 and 90.0", ex.getMessage());
|
||||
}
|
||||
|
||||
public void testWKTValidation() {
|
||||
IllegalArgumentException ex = expectThrows(IllegalArgumentException.class,
|
||||
() -> new WellKnownText(randomBoolean(), false).fromWKT("linestring (3 1 6, 4 2 5)"));
|
||||
() -> new WellKnownText(randomBoolean(), new GeographyValidator(false)).fromWKT("linestring (3 1 6, 4 2 5)"));
|
||||
assertEquals("found Z value [6.0] but [ignore_z_value] parameter is [false]", ex.getMessage());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
|
||||
package org.elasticsearch.geo.geometry;
|
||||
|
||||
import org.elasticsearch.geo.utils.GeographyValidator;
|
||||
import org.elasticsearch.geo.utils.GeometryValidator;
|
||||
import org.elasticsearch.geo.utils.WellKnownText;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
|
@ -26,30 +28,35 @@ public class LinearRingTests extends ESTestCase {
|
|||
|
||||
public void testBasicSerialization() {
|
||||
UnsupportedOperationException ex = expectThrows(UnsupportedOperationException.class,
|
||||
() -> new WellKnownText(true, true).toWKT(new LinearRing(new double[]{1, 2, 3, 1}, new double[]{3, 4, 5, 3})));
|
||||
() -> new WellKnownText(true, new GeographyValidator(true))
|
||||
.toWKT(new LinearRing(new double[]{1, 2, 3, 1}, new double[]{3, 4, 5, 3})));
|
||||
assertEquals("line ring cannot be serialized using WKT", ex.getMessage());
|
||||
}
|
||||
|
||||
public void testInitValidation() {
|
||||
GeometryValidator validator = new GeographyValidator(true);
|
||||
IllegalArgumentException ex = expectThrows(IllegalArgumentException.class,
|
||||
() -> new LinearRing(new double[]{1, 2, 3}, new double[]{3, 4, 5}));
|
||||
() -> validator.validate(new LinearRing(new double[]{1, 2, 3}, new double[]{3, 4, 5})));
|
||||
assertEquals("first and last points of the linear ring must be the same (it must close itself): lats[0]=1.0 lats[2]=3.0 " +
|
||||
"lons[0]=3.0 lons[2]=5.0",
|
||||
ex.getMessage());
|
||||
|
||||
ex = expectThrows(IllegalArgumentException.class,
|
||||
() -> new LinearRing(new double[]{1, 2, 1}, new double[]{3, 4, 3}, new double[]{1, 2, 3}));
|
||||
() -> validator.validate(new LinearRing(new double[]{1, 2, 1}, new double[]{3, 4, 3}, new double[]{1, 2, 3})));
|
||||
assertEquals("first and last points of the linear ring must be the same (it must close itself): lats[0]=1.0 lats[2]=1.0 " +
|
||||
"lons[0]=3.0 lons[2]=3.0 alts[0]=1.0 alts[2]=3.0",
|
||||
ex.getMessage());
|
||||
|
||||
ex = expectThrows(IllegalArgumentException.class, () -> new LinearRing(new double[]{1}, new double[]{3}));
|
||||
ex = expectThrows(IllegalArgumentException.class,
|
||||
() -> validator.validate(new LinearRing(new double[]{1}, new double[]{3})));
|
||||
assertEquals("at least two points in the line is required", ex.getMessage());
|
||||
|
||||
ex = expectThrows(IllegalArgumentException.class, () -> new LinearRing(new double[]{1, 2, 3, 1}, new double[]{3, 4, 500, 3}));
|
||||
ex = expectThrows(IllegalArgumentException.class,
|
||||
() -> validator.validate(new LinearRing(new double[]{1, 2, 3, 1}, new double[]{3, 4, 500, 3})));
|
||||
assertEquals("invalid longitude 500.0; must be between -180.0 and 180.0", ex.getMessage());
|
||||
|
||||
ex = expectThrows(IllegalArgumentException.class, () -> new LinearRing(new double[]{1, 100, 3, 1}, new double[]{3, 4, 5, 3}));
|
||||
ex = expectThrows(IllegalArgumentException.class,
|
||||
() -> validator.validate(new LinearRing(new double[]{1, 100, 3, 1}, new double[]{3, 4, 5, 3})));
|
||||
assertEquals("invalid latitude 100.0; must be between -90.0 and 90.0", ex.getMessage());
|
||||
}
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.geo.geometry;
|
||||
|
||||
import org.elasticsearch.geo.utils.GeographyValidator;
|
||||
import org.elasticsearch.geo.utils.WellKnownText;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -40,7 +41,7 @@ public class MultiLineTests extends BaseGeometryTestCase<MultiLine> {
|
|||
}
|
||||
|
||||
public void testBasicSerialization() throws IOException, ParseException {
|
||||
WellKnownText wkt = new WellKnownText(true, true);
|
||||
WellKnownText wkt = new WellKnownText(true, new GeographyValidator(true));
|
||||
assertEquals("multilinestring ((3.0 1.0, 4.0 2.0))", wkt.toWKT(
|
||||
new MultiLine(Collections.singletonList(new Line(new double[]{1, 2}, new double[]{3, 4})))));
|
||||
assertEquals(new MultiLine(Collections.singletonList(new Line(new double[]{1, 2}, new double[]{3, 4}))),
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.geo.geometry;
|
||||
|
||||
import org.elasticsearch.geo.utils.GeographyValidator;
|
||||
import org.elasticsearch.geo.utils.WellKnownText;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -41,7 +42,7 @@ public class MultiPointTests extends BaseGeometryTestCase<MultiPoint> {
|
|||
}
|
||||
|
||||
public void testBasicSerialization() throws IOException, ParseException {
|
||||
WellKnownText wkt = new WellKnownText(true, true);
|
||||
WellKnownText wkt = new WellKnownText(true, new GeographyValidator(true));
|
||||
assertEquals("multipoint (2.0 1.0)", wkt.toWKT(
|
||||
new MultiPoint(Collections.singletonList(new Point(1, 2)))));
|
||||
assertEquals(new MultiPoint(Collections.singletonList(new Point(1 ,2))),
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue