Merge branch '7.x' of https://github.com/elastic/elasticsearch into 7.x
This commit is contained in:
commit
bb932c8581
|
@ -673,3 +673,9 @@ which you can use to measure the performance impact. It comes with a set of
|
|||
default benchmarks that we also
|
||||
https://elasticsearch-benchmarks.elastic.co/[run every night]. To get started,
|
||||
please see https://esrally.readthedocs.io/en/stable/[Rally's documentation].
|
||||
|
||||
== Test doc builds
|
||||
|
||||
The Elasticsearch docs are in AsciiDoc format. You can test and build the docs
|
||||
locally using the Elasticsearch documentation build process. See
|
||||
https://github.com/elastic/docs.
|
|
@ -170,13 +170,23 @@ public class LazyPropertyList<T> extends AbstractLazyPropertyCollection implemen
|
|||
}
|
||||
|
||||
@Override
|
||||
public List<? extends Object> getNormalizedCollection() {
|
||||
public List<? extends PropertyListEntry<T>> getNormalizedCollection() {
|
||||
return delegate.stream()
|
||||
.peek(this::validate)
|
||||
.filter(entry -> entry.getNormalization() != PropertyNormalization.IGNORE_VALUE)
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
|
||||
/**
|
||||
* Return a "flattened" collection. This should be used when the collection type is itself a complex type with properties
|
||||
* annotated as Gradle inputs rather than a simple type like {@link String}.
|
||||
*
|
||||
* @return a flattened collection filtered according to normalization strategy
|
||||
*/
|
||||
public List<? extends T> getFlatNormalizedCollection() {
|
||||
return getNormalizedCollection().stream().map(PropertyListEntry::getValue).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
private void validate(PropertyListEntry<T> entry) {
|
||||
assertNotNull(entry.getValue(), "entry");
|
||||
}
|
||||
|
|
|
@ -1188,7 +1188,7 @@ public class ElasticsearchNode implements TestClusterConfiguration {
|
|||
|
||||
@Nested
|
||||
public List<?> getCliSetup() {
|
||||
return cliSetup.getNormalizedCollection();
|
||||
return cliSetup.getFlatNormalizedCollection();
|
||||
}
|
||||
|
||||
@Nested
|
||||
|
|
|
@ -54,6 +54,10 @@ import org.elasticsearch.action.search.SearchScrollRequest;
|
|||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.action.update.UpdateRequest;
|
||||
import org.elasticsearch.action.update.UpdateResponse;
|
||||
import org.elasticsearch.client.analytics.ParsedStringStats;
|
||||
import org.elasticsearch.client.analytics.ParsedTopMetrics;
|
||||
import org.elasticsearch.client.analytics.StringStatsAggregationBuilder;
|
||||
import org.elasticsearch.client.analytics.TopMetricsAggregationBuilder;
|
||||
import org.elasticsearch.client.core.CountRequest;
|
||||
import org.elasticsearch.client.core.CountResponse;
|
||||
import org.elasticsearch.client.core.GetSourceRequest;
|
||||
|
@ -1926,6 +1930,8 @@ public class RestHighLevelClient implements Closeable {
|
|||
map.put(IpRangeAggregationBuilder.NAME, (p, c) -> ParsedBinaryRange.fromXContent(p, (String) c));
|
||||
map.put(TopHitsAggregationBuilder.NAME, (p, c) -> ParsedTopHits.fromXContent(p, (String) c));
|
||||
map.put(CompositeAggregationBuilder.NAME, (p, c) -> ParsedComposite.fromXContent(p, (String) c));
|
||||
map.put(StringStatsAggregationBuilder.NAME, (p, c) -> ParsedStringStats.PARSER.parse(p, (String) c));
|
||||
map.put(TopMetricsAggregationBuilder.NAME, (p, c) -> ParsedTopMetrics.PARSER.parse(p, (String) c));
|
||||
List<NamedXContentRegistry.Entry> entries = map.entrySet().stream()
|
||||
.map(entry -> new NamedXContentRegistry.Entry(Aggregation.class, new ParseField(entry.getKey()), entry.getValue()))
|
||||
.collect(Collectors.toList());
|
||||
|
|
|
@ -0,0 +1,172 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.analytics;
|
||||
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.search.aggregations.ParsedAggregation;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
|
||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;
|
||||
|
||||
/**
|
||||
* Results from the {@code string_stats} aggregation.
|
||||
*/
|
||||
public class ParsedStringStats extends ParsedAggregation {
|
||||
private static final ParseField COUNT_FIELD = new ParseField("count");
|
||||
private static final ParseField MIN_LENGTH_FIELD = new ParseField("min_length");
|
||||
private static final ParseField MAX_LENGTH_FIELD = new ParseField("max_length");
|
||||
private static final ParseField AVG_LENGTH_FIELD = new ParseField("avg_length");
|
||||
private static final ParseField ENTROPY_FIELD = new ParseField("entropy");
|
||||
private static final ParseField DISTRIBUTION_FIELD = new ParseField("distribution");
|
||||
|
||||
private final long count;
|
||||
private final int minLength;
|
||||
private final int maxLength;
|
||||
private final double avgLength;
|
||||
private final double entropy;
|
||||
private final boolean showDistribution;
|
||||
private final Map<String, Double> distribution;
|
||||
|
||||
private ParsedStringStats(String name, long count, int minLength, int maxLength, double avgLength, double entropy,
|
||||
boolean showDistribution, Map<String, Double> distribution) {
|
||||
setName(name);
|
||||
this.count = count;
|
||||
this.minLength = minLength;
|
||||
this.maxLength = maxLength;
|
||||
this.avgLength = avgLength;
|
||||
this.entropy = entropy;
|
||||
this.showDistribution = showDistribution;
|
||||
this.distribution = distribution;
|
||||
}
|
||||
|
||||
/**
|
||||
* The number of non-empty fields counted.
|
||||
*/
|
||||
public long getCount() {
|
||||
return count;
|
||||
}
|
||||
|
||||
/**
|
||||
* The length of the shortest term.
|
||||
*/
|
||||
public int getMinLength() {
|
||||
return minLength;
|
||||
}
|
||||
|
||||
/**
|
||||
* The length of the longest term.
|
||||
*/
|
||||
public int getMaxLength() {
|
||||
return maxLength;
|
||||
}
|
||||
|
||||
/**
|
||||
* The average length computed over all terms.
|
||||
*/
|
||||
public double getAvgLength() {
|
||||
return avgLength;
|
||||
}
|
||||
|
||||
/**
|
||||
* The <a href="https://en.wikipedia.org/wiki/Entropy_(information_theory)">Shannon Entropy</a>
|
||||
* value computed over all terms collected by the aggregation.
|
||||
* Shannon entropy quantifies the amount of information contained in
|
||||
* the field. It is a very useful metric for measuring a wide range of
|
||||
* properties of a data set, such as diversity, similarity,
|
||||
* randomness etc.
|
||||
*/
|
||||
public double getEntropy() {
|
||||
return entropy;
|
||||
}
|
||||
|
||||
/**
|
||||
* The probability distribution for all characters. {@code null} unless
|
||||
* explicitly requested with {@link StringStatsAggregationBuilder#showDistribution(boolean)}.
|
||||
*/
|
||||
public Map<String, Double> getDistribution() {
|
||||
return distribution;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getType() {
|
||||
return StringStatsAggregationBuilder.NAME;
|
||||
}
|
||||
|
||||
private static final Object NULL_DISTRIBUTION_MARKER = new Object();
|
||||
public static final ConstructingObjectParser<ParsedStringStats, String> PARSER = new ConstructingObjectParser<>(
|
||||
StringStatsAggregationBuilder.NAME, true, (args, name) -> {
|
||||
long count = (long) args[0];
|
||||
boolean disributionWasExplicitNull = args[5] == NULL_DISTRIBUTION_MARKER;
|
||||
if (count == 0) {
|
||||
return new ParsedStringStats(name, count, 0, 0, 0, 0, disributionWasExplicitNull, null);
|
||||
}
|
||||
int minLength = (int) args[1];
|
||||
int maxLength = (int) args[2];
|
||||
double averageLength = (double) args[3];
|
||||
double entropy = (double) args[4];
|
||||
if (disributionWasExplicitNull) {
|
||||
return new ParsedStringStats(name, count, minLength, maxLength, averageLength, entropy,
|
||||
disributionWasExplicitNull, null);
|
||||
} else {
|
||||
@SuppressWarnings("unchecked")
|
||||
Map<String, Double> distribution = (Map<String, Double>) args[5];
|
||||
return new ParsedStringStats(name, count, minLength, maxLength, averageLength, entropy,
|
||||
distribution != null, distribution);
|
||||
}
|
||||
});
|
||||
static {
|
||||
PARSER.declareLong(constructorArg(), COUNT_FIELD);
|
||||
PARSER.declareIntOrNull(constructorArg(), 0, MIN_LENGTH_FIELD);
|
||||
PARSER.declareIntOrNull(constructorArg(), 0, MAX_LENGTH_FIELD);
|
||||
PARSER.declareDoubleOrNull(constructorArg(), 0, AVG_LENGTH_FIELD);
|
||||
PARSER.declareDoubleOrNull(constructorArg(), 0, ENTROPY_FIELD);
|
||||
PARSER.declareObjectOrNull(optionalConstructorArg(), (p, c) -> unmodifiableMap(p.map(HashMap::new, XContentParser::doubleValue)),
|
||||
NULL_DISTRIBUTION_MARKER, DISTRIBUTION_FIELD);
|
||||
ParsedAggregation.declareAggregationFields(PARSER);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field(COUNT_FIELD.getPreferredName(), count);
|
||||
if (count == 0) {
|
||||
builder.nullField(MIN_LENGTH_FIELD.getPreferredName());
|
||||
builder.nullField(MAX_LENGTH_FIELD.getPreferredName());
|
||||
builder.nullField(AVG_LENGTH_FIELD.getPreferredName());
|
||||
builder.field(ENTROPY_FIELD.getPreferredName(), 0.0);
|
||||
} else {
|
||||
builder.field(MIN_LENGTH_FIELD.getPreferredName(), minLength);
|
||||
builder.field(MAX_LENGTH_FIELD.getPreferredName(), maxLength);
|
||||
builder.field(AVG_LENGTH_FIELD.getPreferredName(), avgLength);
|
||||
builder.field(ENTROPY_FIELD.getPreferredName(), entropy);
|
||||
}
|
||||
if (showDistribution) {
|
||||
builder.field(DISTRIBUTION_FIELD.getPreferredName(), distribution);
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,134 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.analytics;
|
||||
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParserUtils;
|
||||
import org.elasticsearch.search.aggregations.ParsedAggregation;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
|
||||
|
||||
/**
|
||||
* Results of the {@code top_metrics} aggregation.
|
||||
*/
|
||||
public class ParsedTopMetrics extends ParsedAggregation {
|
||||
private static final ParseField TOP_FIELD = new ParseField("top");
|
||||
|
||||
private final List<TopMetrics> topMetrics;
|
||||
|
||||
private ParsedTopMetrics(String name, List<TopMetrics> topMetrics) {
|
||||
setName(name);
|
||||
this.topMetrics = topMetrics;
|
||||
}
|
||||
|
||||
/**
|
||||
* The list of top metrics, in sorted order.
|
||||
*/
|
||||
public List<TopMetrics> getTopMetrics() {
|
||||
return topMetrics;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getType() {
|
||||
return TopMetricsAggregationBuilder.NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startArray(TOP_FIELD.getPreferredName());
|
||||
for (TopMetrics top : topMetrics) {
|
||||
top.toXContent(builder, params);
|
||||
}
|
||||
return builder.endArray();
|
||||
}
|
||||
|
||||
public static final ConstructingObjectParser<ParsedTopMetrics, String> PARSER = new ConstructingObjectParser<>(
|
||||
TopMetricsAggregationBuilder.NAME, true, (args, name) -> {
|
||||
@SuppressWarnings("unchecked")
|
||||
List<TopMetrics> topMetrics = (List<TopMetrics>) args[0];
|
||||
return new ParsedTopMetrics(name, topMetrics);
|
||||
});
|
||||
static {
|
||||
PARSER.declareObjectArray(constructorArg(), (p, c) -> TopMetrics.PARSER.parse(p, null), TOP_FIELD);
|
||||
ParsedAggregation.declareAggregationFields(PARSER);
|
||||
}
|
||||
|
||||
/**
|
||||
* The metrics belonging to the document with the "top" sort key.
|
||||
*/
|
||||
public static class TopMetrics implements ToXContent {
|
||||
private static final ParseField SORT_FIELD = new ParseField("sort");
|
||||
private static final ParseField METRICS_FIELD = new ParseField("metrics");
|
||||
|
||||
private final List<Object> sort;
|
||||
private final Map<String, Double> metrics;
|
||||
|
||||
private TopMetrics(List<Object> sort, Map<String, Double> metrics) {
|
||||
this.sort = sort;
|
||||
this.metrics = metrics;
|
||||
}
|
||||
|
||||
/**
|
||||
* The sort key for these top metrics.
|
||||
*/
|
||||
public List<Object> getSort() {
|
||||
return sort;
|
||||
}
|
||||
|
||||
/**
|
||||
* The top metric values returned by the aggregation.
|
||||
*/
|
||||
public Map<String, Double> getMetrics() {
|
||||
return metrics;
|
||||
}
|
||||
|
||||
private static final ConstructingObjectParser<TopMetrics, Void> PARSER = new ConstructingObjectParser<>("top", true,
|
||||
(args, name) -> {
|
||||
@SuppressWarnings("unchecked")
|
||||
List<Object> sort = (List<Object>) args[0];
|
||||
@SuppressWarnings("unchecked")
|
||||
Map<String, Double> metrics = (Map<String, Double>) args[1];
|
||||
return new TopMetrics(sort, metrics);
|
||||
});
|
||||
static {
|
||||
PARSER.declareFieldArray(constructorArg(), (p, c) -> XContentParserUtils.parseFieldsValue(p),
|
||||
SORT_FIELD, ObjectParser.ValueType.VALUE_ARRAY);
|
||||
PARSER.declareObject(constructorArg(), (p, c) -> p.map(HashMap::new, XContentParser::doubleValue), METRICS_FIELD);
|
||||
}
|
||||
|
||||
public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(SORT_FIELD.getPreferredName(), sort);
|
||||
builder.field(METRICS_FIELD.getPreferredName(), metrics);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
};
|
||||
}
|
||||
}
|
|
@ -0,0 +1,116 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.analytics;
|
||||
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.query.QueryRewriteContext;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.search.aggregations.AbstractAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.AggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactories.Builder;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactory;
|
||||
import org.elasticsearch.search.aggregations.support.CoreValuesSourceType;
|
||||
import org.elasticsearch.search.aggregations.support.ValueType;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSource;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSource.Bytes;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Builds the {@code string_stats} aggregation request.
|
||||
* <p>
|
||||
* NOTE: This extends {@linkplain AbstractAggregationBuilder} for compatibility
|
||||
* with {@link SearchSourceBuilder#aggregation(AggregationBuilder)} but it
|
||||
* doesn't support any "server" side things like
|
||||
* {@linkplain Writeable#writeTo(StreamOutput)},
|
||||
* {@linkplain AggregationBuilder#rewrite(QueryRewriteContext)}, or
|
||||
* {@linkplain AbstractAggregationBuilder#build(QueryShardContext, AggregatorFactory)}.
|
||||
*/
|
||||
public class StringStatsAggregationBuilder extends ValuesSourceAggregationBuilder<ValuesSource.Bytes, StringStatsAggregationBuilder> {
|
||||
public static final String NAME = "string_stats";
|
||||
private static final ParseField SHOW_DISTRIBUTION_FIELD = new ParseField("show_distribution");
|
||||
|
||||
private boolean showDistribution = false;
|
||||
|
||||
public StringStatsAggregationBuilder(String name) {
|
||||
super(name, CoreValuesSourceType.BYTES, ValueType.STRING);
|
||||
}
|
||||
|
||||
/**
|
||||
* Compute the distribution of each character. Disabled by default.
|
||||
* @return this for chaining
|
||||
*/
|
||||
public StringStatsAggregationBuilder showDistribution(boolean showDistribution) {
|
||||
this.showDistribution = showDistribution;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getType() {
|
||||
return NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
|
||||
return builder.field(StringStatsAggregationBuilder.SHOW_DISTRIBUTION_FIELD.getPreferredName(), showDistribution);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void innerWriteTo(StreamOutput out) throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ValuesSourceAggregatorFactory<Bytes> innerBuild(QueryShardContext queryShardContext, ValuesSourceConfig<Bytes> config,
|
||||
AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected AggregationBuilder shallowCopy(Builder factoriesBuilder, Map<String, Object> metaData) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(super.hashCode(), showDistribution);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null || getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
if (false == super.equals(obj)) {
|
||||
return false;
|
||||
}
|
||||
StringStatsAggregationBuilder other = (StringStatsAggregationBuilder) obj;
|
||||
return showDistribution == other.showDistribution;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,97 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.analytics;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.query.QueryRewriteContext;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.search.aggregations.AbstractAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.AggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactories.Builder;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactory;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.elasticsearch.search.sort.SortBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Builds the Top Metrics aggregation request.
|
||||
* <p>
|
||||
* NOTE: This extends {@linkplain AbstractAggregationBuilder} for compatibility
|
||||
* with {@link SearchSourceBuilder#aggregation(AggregationBuilder)} but it
|
||||
* doesn't support any "server" side things like
|
||||
* {@linkplain Writeable#writeTo(StreamOutput)},
|
||||
* {@linkplain AggregationBuilder#rewrite(QueryRewriteContext)}, or
|
||||
* {@linkplain AbstractAggregationBuilder#build(QueryShardContext, AggregatorFactory)}.
|
||||
*/
|
||||
public class TopMetricsAggregationBuilder extends AbstractAggregationBuilder<TopMetricsAggregationBuilder> {
|
||||
public static final String NAME = "top_metrics";
|
||||
|
||||
private final SortBuilder<?> sort;
|
||||
private final String metric;
|
||||
|
||||
/**
|
||||
* Build the request.
|
||||
* @param name the name of the metric
|
||||
* @param sort the sort key used to select the top metrics
|
||||
* @param metric the name of the field to select
|
||||
*/
|
||||
public TopMetricsAggregationBuilder(String name, SortBuilder<?> sort, String metric) {
|
||||
super(name);
|
||||
this.sort = sort;
|
||||
this.metric = metric;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getType() {
|
||||
return NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
{
|
||||
builder.startArray("sort");
|
||||
sort.toXContent(builder, params);
|
||||
builder.endArray();
|
||||
builder.startObject("metric").field("field", metric).endObject();
|
||||
}
|
||||
return builder.endObject();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doWriteTo(StreamOutput out) throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected AggregatorFactory doBuild(QueryShardContext queryShardContext, AggregatorFactory parent, Builder subfactoriesBuilder)
|
||||
throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected AggregationBuilder shallowCopy(Builder factoriesBuilder, Map<String, Object> metaData) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
}
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.client;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonParseException;
|
||||
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.HttpResponse;
|
||||
|
@ -675,6 +676,11 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
List<NamedXContentRegistry.Entry> namedXContents = RestHighLevelClient.getDefaultNamedXContents();
|
||||
int expectedInternalAggregations = InternalAggregationTestCase.getDefaultNamedXContents().size();
|
||||
int expectedSuggestions = 3;
|
||||
|
||||
// Explicitly check for metrics from the analytics module because they aren't in InternalAggregationTestCase
|
||||
assertTrue(namedXContents.removeIf(e -> e.name.getPreferredName().equals("string_stats")));
|
||||
assertTrue(namedXContents.removeIf(e -> e.name.getPreferredName().equals("top_metrics")));
|
||||
|
||||
assertEquals(expectedInternalAggregations + expectedSuggestions, namedXContents.size());
|
||||
Map<Class<?>, Integer> categories = new HashMap<>();
|
||||
for (NamedXContentRegistry.Entry namedXContent : namedXContents) {
|
||||
|
|
|
@ -0,0 +1,79 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.analytics;
|
||||
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.support.WriteRequest.RefreshPolicy;
|
||||
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||
import org.elasticsearch.client.RequestOptions;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.search.sort.FieldSortBuilder;
|
||||
import org.elasticsearch.search.sort.SortOrder;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static java.util.Collections.singletonList;
|
||||
import static java.util.Collections.singletonMap;
|
||||
import static org.hamcrest.Matchers.aMapWithSize;
|
||||
import static org.hamcrest.Matchers.closeTo;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.hasEntry;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
|
||||
public class AnalyticsAggsIT extends ESRestHighLevelClientTestCase {
|
||||
public void testStringStats() throws IOException {
|
||||
BulkRequest bulk = new BulkRequest("test").setRefreshPolicy(RefreshPolicy.IMMEDIATE);
|
||||
bulk.add(new IndexRequest().source(XContentType.JSON, "message", "trying out elasticsearch"));
|
||||
bulk.add(new IndexRequest().source(XContentType.JSON, "message", "more words"));
|
||||
highLevelClient().bulk(bulk, RequestOptions.DEFAULT);
|
||||
SearchRequest search = new SearchRequest("test");
|
||||
search.source().aggregation(new StringStatsAggregationBuilder("test").field("message.keyword").showDistribution(true));
|
||||
SearchResponse response = highLevelClient().search(search, RequestOptions.DEFAULT);
|
||||
ParsedStringStats stats = response.getAggregations().get("test");
|
||||
assertThat(stats.getCount(), equalTo(2L));
|
||||
assertThat(stats.getMinLength(), equalTo(10));
|
||||
assertThat(stats.getMaxLength(), equalTo(24));
|
||||
assertThat(stats.getAvgLength(), equalTo(17.0));
|
||||
assertThat(stats.getEntropy(), closeTo(4, .1));
|
||||
assertThat(stats.getDistribution(), aMapWithSize(18));
|
||||
assertThat(stats.getDistribution(), hasEntry(equalTo("o"), closeTo(.09, .005)));
|
||||
assertThat(stats.getDistribution(), hasEntry(equalTo("r"), closeTo(.12, .005)));
|
||||
assertThat(stats.getDistribution(), hasEntry(equalTo("t"), closeTo(.09, .005)));
|
||||
}
|
||||
|
||||
public void testBasic() throws IOException {
|
||||
BulkRequest bulk = new BulkRequest("test").setRefreshPolicy(RefreshPolicy.IMMEDIATE);
|
||||
bulk.add(new IndexRequest().source(XContentType.JSON, "s", 1, "v", 2));
|
||||
bulk.add(new IndexRequest().source(XContentType.JSON, "s", 2, "v", 3));
|
||||
highLevelClient().bulk(bulk, RequestOptions.DEFAULT);
|
||||
SearchRequest search = new SearchRequest("test");
|
||||
search.source().aggregation(new TopMetricsAggregationBuilder(
|
||||
"test", new FieldSortBuilder("s").order(SortOrder.DESC), "v"));
|
||||
SearchResponse response = highLevelClient().search(search, RequestOptions.DEFAULT);
|
||||
ParsedTopMetrics top = response.getAggregations().get("test");
|
||||
assertThat(top.getTopMetrics(), hasSize(1));
|
||||
ParsedTopMetrics.TopMetrics metric = top.getTopMetrics().get(0);
|
||||
assertThat(metric.getSort(), equalTo(singletonList(2)));
|
||||
assertThat(metric.getMetrics(), equalTo(singletonMap("v", 3.0)));
|
||||
}
|
||||
}
|
|
@ -67,6 +67,9 @@ ENV PATH /usr/share/elasticsearch/bin:\$PATH
|
|||
|
||||
COPY --chown=1000:0 bin/docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
|
||||
|
||||
# Ensure that there are no files with setuid or setgid, in order to mitigate "stackclash" attacks.
|
||||
RUN find / -xdev -perm -4000 -exec chmod ug-s {} +
|
||||
|
||||
# Openshift overrides USER and uses ones with randomly uid>1024 and gid=0
|
||||
# Allow ENTRYPOINT (and ES) to run even with a different user
|
||||
RUN chgrp 0 /usr/local/bin/docker-entrypoint.sh && \
|
||||
|
|
|
@ -25,7 +25,9 @@ This page lists all the available aggregations with their corresponding `Aggrega
|
|||
| {ref}/search-aggregations-metrics-stats-aggregation.html[Stats] | {agg-ref}/metrics/stats/StatsAggregationBuilder.html[StatsAggregationBuilder] | {agg-ref}/AggregationBuilders.html#stats-java.lang.String-[AggregationBuilders.stats()]
|
||||
| {ref}/search-aggregations-metrics-sum-aggregation.html[Sum] | {agg-ref}/metrics/sum/SumAggregationBuilder.html[SumAggregationBuilder] | {agg-ref}/AggregationBuilders.html#sum-java.lang.String-[AggregationBuilders.sum()]
|
||||
| {ref}/search-aggregations-metrics-top-hits-aggregation.html[Top hits] | {agg-ref}/metrics/tophits/TopHitsAggregationBuilder.html[TopHitsAggregationBuilder] | {agg-ref}/AggregationBuilders.html#topHits-java.lang.String-[AggregationBuilders.topHits()]
|
||||
| {ref}/search-aggregations-metrics-top-metrics.html[Top Metrics] | {javadoc-client}/analytics/TopMetricsAggregationBuilder.html[TopMetricsAggregationBuilder] | None
|
||||
| {ref}/search-aggregations-metrics-valuecount-aggregation.html[Value Count] | {agg-ref}/metrics/valuecount/ValueCountAggregationBuilder.html[ValueCountAggregationBuilder] | {agg-ref}/AggregationBuilders.html#count-java.lang.String-[AggregationBuilders.count()]
|
||||
| {ref}/search-aggregations-metrics-string-stats-aggregation.html[String Stats] | {javadoc-client}/analytics/StringStatsAggregationBuilder.html[StringStatsAggregationBuilder] | None
|
||||
|======
|
||||
|
||||
==== Bucket Aggregations
|
||||
|
|
|
@ -41,10 +41,13 @@ include::metrics/sum-aggregation.asciidoc[]
|
|||
|
||||
include::metrics/tophits-aggregation.asciidoc[]
|
||||
|
||||
include::metrics/top-metrics-aggregation.asciidoc[]
|
||||
|
||||
include::metrics/valuecount-aggregation.asciidoc[]
|
||||
|
||||
include::metrics/median-absolute-deviation-aggregation.asciidoc[]
|
||||
|
||||
include::metrics/boxplot-aggregation.asciidoc[]
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -4,7 +4,8 @@
|
|||
=== Boxplot Aggregation
|
||||
|
||||
A `boxplot` metrics aggregation that computes boxplot of numeric values extracted from the aggregated documents.
|
||||
These values can be extracted either from specific numeric fields in the documents, or be generated by a provided script.
|
||||
These values can be generated by a provided script or extracted from specific numeric or
|
||||
<<histogram,histogram fields>> in the documents.
|
||||
|
||||
The `boxplot` aggregation returns essential information for making a https://en.wikipedia.org/wiki/Box_plot[box plot]: minimum, maximum
|
||||
median, first quartile (25th percentile) and third quartile (75th percentile) values.
|
||||
|
|
|
@ -285,7 +285,7 @@ GET latency/_search
|
|||
|
||||
<1> Compression controls memory usage and approximation error
|
||||
|
||||
// tag::[t-digest]
|
||||
// tag::t-digest[]
|
||||
The TDigest algorithm uses a number of "nodes" to approximate percentiles -- the
|
||||
more nodes available, the higher the accuracy (and large memory footprint) proportional
|
||||
to the volume of data. The `compression` parameter limits the maximum number of
|
||||
|
@ -301,7 +301,7 @@ A "node" uses roughly 32 bytes of memory, so under worst-case scenarios (large a
|
|||
of data which arrives sorted and in-order) the default settings will produce a
|
||||
TDigest roughly 64KB in size. In practice data tends to be more random and
|
||||
the TDigest will use less memory.
|
||||
// tag::[t-digest]
|
||||
// end::t-digest[]
|
||||
|
||||
==== HDR Histogram
|
||||
|
||||
|
|
|
@ -0,0 +1,284 @@
|
|||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[search-aggregations-metrics-top-metrics]]
|
||||
=== Top Metrics Aggregation
|
||||
|
||||
experimental[We expect to change the response format of this aggregation as we add more features., https://github.com/elastic/elasticsearch/issues/51813]
|
||||
|
||||
The `top_metrics` aggregation selects metrics from the document with the largest or smallest "sort"
|
||||
value. For example, This gets the value of the `v` field on the document with the largest value of `s`:
|
||||
|
||||
[source,console,id=search-aggregations-metrics-top-metrics-simple]
|
||||
----
|
||||
POST /test/_bulk?refresh
|
||||
{"index": {}}
|
||||
{"s": 1, "v": 3.1415}
|
||||
{"index": {}}
|
||||
{"s": 2, "v": 1}
|
||||
{"index": {}}
|
||||
{"s": 3, "v": 2.71828}
|
||||
POST /test/_search?filter_path=aggregations
|
||||
{
|
||||
"aggs": {
|
||||
"tm": {
|
||||
"top_metrics": {
|
||||
"metric": {"field": "v"},
|
||||
"sort": {"s": "desc"}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
----
|
||||
|
||||
Which returns:
|
||||
|
||||
[source,js]
|
||||
----
|
||||
{
|
||||
"aggregations": {
|
||||
"tm": {
|
||||
"top": [ {"sort": [3], "metrics": {"v": 2.718280076980591 } } ]
|
||||
}
|
||||
}
|
||||
}
|
||||
----
|
||||
// TESTRESPONSE
|
||||
|
||||
`top_metrics` is fairly similar to <<search-aggregations-metrics-top-hits-aggregation, `top_hits`>>
|
||||
in spirit but because it is more limited it is able to do its job using less memory and is often
|
||||
faster.
|
||||
|
||||
==== `sort`
|
||||
|
||||
The `sort` field in the metric request functions exactly the same as the `sort` field in the
|
||||
<<request-body-search-sort, search>> request except:
|
||||
* It can't be used on <<binary,binary>>, <<flattened,flattened>, <<ip,ip>>,
|
||||
<<keyword,keyword>>, or <<text,text>> fields.
|
||||
* It only supports a single sort value.
|
||||
|
||||
The metrics that the aggregation returns is the first hit that would be returned by the search
|
||||
request. So,
|
||||
|
||||
`"sort": {"s": "desc"}`:: gets metrics from the document with the highest `s`
|
||||
`"sort": {"s": "asc"}`:: gets the metrics from the document with the lowest `s`
|
||||
`"sort": {"_geo_distance": {"location": "35.7796, -78.6382"}}`::
|
||||
gets metrics from the documents with `location` *closest* to `35.7796, -78.6382`
|
||||
`"sort": "_score"`:: gets metrics from the document with the highest score
|
||||
|
||||
NOTE: This aggregation doesn't support any sort of "tie breaking". If two documents have
|
||||
the same sort values then this aggregation could return either document's fields.
|
||||
|
||||
==== `metric`
|
||||
|
||||
At this point `metric` supports only `{"field": "field_name"}` and all metrics
|
||||
are returned as double precision floating point numbers. Expect more to
|
||||
come here.
|
||||
|
||||
==== Examples
|
||||
|
||||
===== Use with terms
|
||||
|
||||
This aggregation should be quite useful inside of <<search-aggregations-bucket-terms-aggregation, `terms`>>
|
||||
aggregation, to, say, find the last value reported by each server.
|
||||
|
||||
[source,console,id=search-aggregations-metrics-top-metrics-terms]
|
||||
----
|
||||
PUT /node
|
||||
{
|
||||
"mappings": {
|
||||
"properties": {
|
||||
"ip": {"type": "ip"},
|
||||
"date": {"type": "date"}
|
||||
}
|
||||
}
|
||||
}
|
||||
POST /node/_bulk?refresh
|
||||
{"index": {}}
|
||||
{"ip": "192.168.0.1", "date": "2020-01-01T01:01:01", "v": 1}
|
||||
{"index": {}}
|
||||
{"ip": "192.168.0.1", "date": "2020-01-01T02:01:01", "v": 2}
|
||||
{"index": {}}
|
||||
{"ip": "192.168.0.2", "date": "2020-01-01T02:01:01", "v": 3}
|
||||
POST /node/_search?filter_path=aggregations
|
||||
{
|
||||
"aggs": {
|
||||
"ip": {
|
||||
"terms": {
|
||||
"field": "ip"
|
||||
},
|
||||
"aggs": {
|
||||
"tm": {
|
||||
"top_metrics": {
|
||||
"metric": {"field": "v"},
|
||||
"sort": {"date": "desc"}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
----
|
||||
|
||||
Which returns:
|
||||
|
||||
[source,js]
|
||||
----
|
||||
{
|
||||
"aggregations": {
|
||||
"ip": {
|
||||
"buckets": [
|
||||
{
|
||||
"key": "192.168.0.1",
|
||||
"doc_count": 2,
|
||||
"tm": {
|
||||
"top": [ {"sort": ["2020-01-01T02:01:01.000Z"], "metrics": {"v": 2.0 } } ]
|
||||
}
|
||||
},
|
||||
{
|
||||
"key": "192.168.0.2",
|
||||
"doc_count": 1,
|
||||
"tm": {
|
||||
"top": [ {"sort": ["2020-01-01T02:01:01.000Z"], "metrics": {"v": 3.0 } } ]
|
||||
}
|
||||
}
|
||||
],
|
||||
"doc_count_error_upper_bound": 0,
|
||||
"sum_other_doc_count": 0
|
||||
}
|
||||
}
|
||||
}
|
||||
----
|
||||
// TESTRESPONSE
|
||||
|
||||
Unlike `top_hits`, you can sort buckets by the results of this metric:
|
||||
|
||||
[source,console]
|
||||
----
|
||||
POST /node/_search?filter_path=aggregations
|
||||
{
|
||||
"aggs": {
|
||||
"ip": {
|
||||
"terms": {
|
||||
"field": "ip",
|
||||
"order": {"tm.v": "desc"}
|
||||
},
|
||||
"aggs": {
|
||||
"tm": {
|
||||
"top_metrics": {
|
||||
"metric": {"field": "v"},
|
||||
"sort": {"date": "desc"}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
----
|
||||
// TEST[continued]
|
||||
|
||||
Which returns:
|
||||
|
||||
[source,js]
|
||||
----
|
||||
{
|
||||
"aggregations": {
|
||||
"ip": {
|
||||
"buckets": [
|
||||
{
|
||||
"key": "192.168.0.2",
|
||||
"doc_count": 1,
|
||||
"tm": {
|
||||
"top": [ {"sort": ["2020-01-01T02:01:01.000Z"], "metrics": {"v": 3.0 } } ]
|
||||
}
|
||||
},
|
||||
{
|
||||
"key": "192.168.0.1",
|
||||
"doc_count": 2,
|
||||
"tm": {
|
||||
"top": [ {"sort": ["2020-01-01T02:01:01.000Z"], "metrics": {"v": 2.0 } } ]
|
||||
}
|
||||
}
|
||||
],
|
||||
"doc_count_error_upper_bound": 0,
|
||||
"sum_other_doc_count": 0
|
||||
}
|
||||
}
|
||||
}
|
||||
----
|
||||
// TESTRESPONSE
|
||||
|
||||
===== Mixed sort types
|
||||
|
||||
Sorting `top_metrics` by a field that has different types across different
|
||||
indices producs somewhat suprising results: floating point fields are
|
||||
always sorted independantly of whole numbered fields.
|
||||
|
||||
[source,console,id=search-aggregations-metrics-top-metrics-mixed-sort]
|
||||
----
|
||||
POST /test/_bulk?refresh
|
||||
{"index": {"_index": "test1"}}
|
||||
{"s": 1, "v": 3.1415}
|
||||
{"index": {"_index": "test1"}}
|
||||
{"s": 2, "v": 1}
|
||||
{"index": {"_index": "test2"}}
|
||||
{"s": 3.1, "v": 2.71828}
|
||||
POST /test*/_search?filter_path=aggregations
|
||||
{
|
||||
"aggs": {
|
||||
"tm": {
|
||||
"top_metrics": {
|
||||
"metric": {"field": "v"},
|
||||
"sort": {"s": "asc"}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
----
|
||||
|
||||
Which returns:
|
||||
|
||||
[source,js]
|
||||
----
|
||||
{
|
||||
"aggregations": {
|
||||
"tm": {
|
||||
"top": [ {"sort": [3.0999999046325684], "metrics": {"v": 2.718280076980591 } } ]
|
||||
}
|
||||
}
|
||||
}
|
||||
----
|
||||
// TESTRESPONSE
|
||||
|
||||
While this is better than an error it *probably* isn't what you were going for.
|
||||
While it does lose some precision, you can explictly cast the whole number
|
||||
fields to floating points with something like:
|
||||
|
||||
[source,console]
|
||||
----
|
||||
POST /test*/_search?filter_path=aggregations
|
||||
{
|
||||
"aggs": {
|
||||
"tm": {
|
||||
"top_metrics": {
|
||||
"metric": {"field": "v"},
|
||||
"sort": {"s": {"order": "asc", "numeric_type": "double"}}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
----
|
||||
// TEST[continued]
|
||||
|
||||
Which returns the much more expected:
|
||||
|
||||
[source,js]
|
||||
----
|
||||
{
|
||||
"aggregations": {
|
||||
"tm": {
|
||||
"top": [ {"sort": [1.0], "metrics": {"v": 3.1414999961853027 } } ]
|
||||
}
|
||||
}
|
||||
}
|
||||
----
|
||||
// TESTRESPONSE
|
|
@ -20,7 +20,7 @@ the process.
|
|||
* <<test-analyzer>>
|
||||
* <<configuring-analyzers>>
|
||||
* <<analysis-custom-analyzer>>
|
||||
* <specify-analyer>>
|
||||
* <<specify-analyzer>>
|
||||
|
||||
|
||||
include::testing.asciidoc[]
|
||||
|
|
|
@ -30,7 +30,11 @@ Consider using EQL if you:
|
|||
=== In this section
|
||||
|
||||
* <<eql-requirements>>
|
||||
* <<eql-search>>
|
||||
* <<eql-syntax>>
|
||||
* <<eql-limitations>>
|
||||
|
||||
include::requirements.asciidoc[]
|
||||
include::search.asciidoc[]
|
||||
include::syntax.asciidoc[]
|
||||
include::limitations.asciidoc[]
|
||||
|
|
|
@ -0,0 +1,29 @@
|
|||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[eql-limitations]]
|
||||
== EQL limitations
|
||||
++++
|
||||
<titleabbrev>Limitations</titleabbrev>
|
||||
++++
|
||||
|
||||
experimental::[]
|
||||
|
||||
[discrete]
|
||||
[[eql-unsupported-syntax]]
|
||||
=== Unsupported syntax
|
||||
|
||||
{es} supports a subset of {eql-ref}/index.html[EQL syntax]. {es} cannot run EQL
|
||||
queries that contain:
|
||||
|
||||
* {eql-ref}/functions.html[Functions]
|
||||
|
||||
* {eql-ref}/joins.html[Joins]
|
||||
|
||||
* {eql-ref}/basic-syntax.html#event-relationships[Lineage-related keywords]:
|
||||
** `child of`
|
||||
** `descendant of`
|
||||
** `event of`
|
||||
|
||||
* {eql-ref}/pipes.html[Pipes]
|
||||
|
||||
* {eql-ref}/sequences.html[Sequences]
|
|
@ -6,6 +6,8 @@
|
|||
<titleabbrev>Requirements</titleabbrev>
|
||||
++++
|
||||
|
||||
experimental::[]
|
||||
|
||||
EQL is schemaless and works out-of-the-box with most common log formats. If you
|
||||
use a standard log format and already know what fields in your index contain
|
||||
event type and timestamp information, you can skip this page.
|
||||
|
|
|
@ -0,0 +1,46 @@
|
|||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[eql-search]]
|
||||
== Run an EQL search
|
||||
|
||||
experimental::[]
|
||||
|
||||
To start using EQL in {es}, first ensure your event data meets
|
||||
<<eql-requirements,EQL requirements>>. Then ingest or add the data to an {es}
|
||||
index.
|
||||
|
||||
The following <<docs-bulk,bulk API>> request adds some example log data to the
|
||||
`sec_logs` index. This log data follows the {ecs-ref}[Elastic Common Schema
|
||||
(ECS)].
|
||||
|
||||
[source,console]
|
||||
----
|
||||
PUT sec_logs/_bulk?refresh
|
||||
{"index":{"_index" : "sec_logs"}}
|
||||
{ "@timestamp": "2020-12-07T11:06:07.000Z", "agent": { "id": "8a4f500d" }, "event": { "category": "process" }, "process": { "name": "cmd.exe", "path": "C:\\Windows\\System32\\cmd.exe" } }
|
||||
{"index":{"_index" : "sec_logs"}}
|
||||
{ "@timestamp": "2020-12-07T11:07:08.000Z", "agent": { "id": "8a4f500d" }, "event": { "category": "image_load" }, "file": { "name": "cmd.exe", "path": "C:\\Windows\\System32\\cmd.exe" }, "process": { "name": "cmd.exe", "path": "C:\\Windows\\System32\\cmd.exe" } }
|
||||
{"index":{"_index" : "sec_logs"}}
|
||||
{ "@timestamp": "2020-12-07T11:07:09.000Z", "agent": { "id": "8a4f500d" }, "event": { "category": "process" }, "process": { "name": "regsvr32.exe", "path": "C:\\Windows\\System32\\regsvr32.exe" } }
|
||||
----
|
||||
|
||||
You can now use the EQL search API to search this index using an EQL query.
|
||||
|
||||
The following request searches the `sec_logs` index using the EQL query
|
||||
specified in the `rule` parameter. The EQL query matches events with an
|
||||
`event.category` of `process` that have a `process.name` of `cmd.exe`.
|
||||
|
||||
[source,console]
|
||||
----
|
||||
GET sec_logs/_eql/search
|
||||
{
|
||||
"rule": """
|
||||
process where process.name == "cmd.exe"
|
||||
"""
|
||||
}
|
||||
----
|
||||
// TEST[continued]
|
||||
|
||||
Because the `sec_log` index follows the ECS, you don't need to specify the
|
||||
event type or timestamp fields. The request uses the `event.category` and
|
||||
`@timestamp` fields by default.
|
|
@ -135,7 +135,6 @@ for the alias's indexing operations.
|
|||
See <<aliases-routing>> for an example.
|
||||
|
||||
`search_routing`::
|
||||
`index_routing`::
|
||||
(Optional, string)
|
||||
Custom <<mapping-routing-field, routing value>> used
|
||||
for the alias's search operations.
|
||||
|
|
|
@ -63,7 +63,7 @@ GET my_index/_search
|
|||
"my_field" : {
|
||||
"script" : {
|
||||
"lang" : "painless",
|
||||
"source" : "doc['date'].date.nanos" <6>
|
||||
"source" : "doc['date'].value.nano" <6>
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -252,6 +252,10 @@ between index size and a reasonable level of precision of 50m at the
|
|||
equator. This allows for indexing tens of millions of shapes without
|
||||
overly bloating the resulting index too much relative to the input size.
|
||||
|
||||
[NOTE]
|
||||
Geo-shape queries on geo-shapes implemented with PrefixTrees will not be executed if
|
||||
<<query-dsl-allow-expensive-queries, `search.allow_expensive_queries`>> is set to false.
|
||||
|
||||
[[input-structure]]
|
||||
[float]
|
||||
==== Input Structure
|
||||
|
|
|
@ -37,6 +37,7 @@ following aggregations and queries:
|
|||
|
||||
* <<search-aggregations-metrics-percentile-aggregation,percentiles>> aggregation
|
||||
* <<search-aggregations-metrics-percentile-rank-aggregation,percentile ranks>> aggregation
|
||||
* <<search-aggregations-metrics-boxplot-aggregation,boxplot>> aggregation
|
||||
* <<query-dsl-exists-query,exists>> query
|
||||
|
||||
[[mapping-types-histogram-building-histogram]]
|
||||
|
|
|
@ -642,23 +642,9 @@ to `false`. When `true`, only a single model must match the ID patterns
|
|||
provided, otherwise a bad request is returned.
|
||||
end::include-model-definition[]
|
||||
|
||||
tag::tags[]
|
||||
A comma delimited string of tags. A {infer} model can have many tags, or none.
|
||||
When supplied, only {infer} models that contain all the supplied tags are
|
||||
returned.
|
||||
end::tags[]
|
||||
|
||||
tag::indices[]
|
||||
An array of index names. Wildcards are supported. For example:
|
||||
`["it_ops_metrics", "server*"]`.
|
||||
|
||||
tag::num-top-feature-importance-values[]
|
||||
Advanced configuration option. If set, feature importance for the top
|
||||
most important features will be computed. Importance is calculated
|
||||
using the SHAP (SHapley Additive exPlanations) method as described in
|
||||
https://papers.nips.cc/paper/7062-a-unified-approach-to-interpreting-model-predictions.pdf[Lundberg, S. M., & Lee, S.-I. A Unified Approach to Interpreting Model Predictions. In NeurIPS 2017.].
|
||||
end::num-top-feature-importance-values[]
|
||||
|
||||
+
|
||||
--
|
||||
NOTE: If any indices are in remote clusters then `cluster.remote.connect` must
|
||||
|
@ -918,6 +904,13 @@ total number of categories (in the {version} version of the {stack}, it's two)
|
|||
to predict then we will report all category probabilities. Defaults to 2.
|
||||
end::num-top-classes[]
|
||||
|
||||
tag::num-top-feature-importance-values[]
|
||||
Advanced configuration option. If set, feature importance for the top
|
||||
most important features will be computed. Importance is calculated
|
||||
using the SHAP (SHapley Additive exPlanations) method as described in
|
||||
https://papers.nips.cc/paper/7062-a-unified-approach-to-interpreting-model-predictions.pdf[Lundberg, S. M., & Lee, S.-I. A Unified Approach to Interpreting Model Predictions. In NeurIPS 2017.].
|
||||
end::num-top-feature-importance-values[]
|
||||
|
||||
tag::over-field-name[]
|
||||
The field used to split the data. In particular, this property is used for
|
||||
analyzing the splits with respect to the history of all splits. It is used for
|
||||
|
@ -1062,6 +1055,12 @@ function.
|
|||
--
|
||||
end::summary-count-field-name[]
|
||||
|
||||
tag::tags[]
|
||||
A comma delimited string of tags. A {infer} model can have many tags, or none.
|
||||
When supplied, only {infer} models that contain all the supplied tags are
|
||||
returned.
|
||||
end::tags[]
|
||||
|
||||
tag::time-format[]
|
||||
The time format, which can be `epoch`, `epoch_ms`, or a custom pattern. The
|
||||
default value is `epoch`, which refers to UNIX or Epoch time (the number of
|
||||
|
|
|
@ -25,6 +25,27 @@ or to alter their behaviour (such as the
|
|||
|
||||
Query clauses behave differently depending on whether they are used in
|
||||
<<query-filter-context,query context or filter context>>.
|
||||
|
||||
[[query-dsl-allow-expensive-queries]]
|
||||
Allow expensive queries::
|
||||
Certain types of queries will generally execute slowly due to the way they are implemented, which can affect
|
||||
the stability of the cluster. Those queries can be categorised as follows:
|
||||
* Queries that need to do linear scans to identify matches:
|
||||
** <<query-dsl-script-query, `script queries`>>
|
||||
* Queries that have a high up-front cost:
|
||||
** <<query-dsl-fuzzy-query,`fuzzy queries`>>
|
||||
** <<query-dsl-regexp-query,`regexp queries`>>
|
||||
** <<query-dsl-prefix-query,`prefix queries`>> without <<index-prefixes, `index_prefixes`>>
|
||||
** <<query-dsl-wildcard-query, `wildcard queries`>>
|
||||
** <<query-dsl-range-query, `range queries>> on <<text, `text`>> and <<keyword, `keyword`>> fields
|
||||
* <<joining-queries, `Joining queries`>>
|
||||
* Queries on <<prefix-trees, deprecated geo shapes>>
|
||||
* Queries that may have a high per-document cost:
|
||||
** <<query-dsl-script-score-query, `script score queries`>>
|
||||
** <<query-dsl-percolate-query, `percolate queries`>>
|
||||
|
||||
The execution of such queries can be prevented by setting the value of the `search.allow_expensive_queries`
|
||||
setting to `false` (defaults to `true`).
|
||||
--
|
||||
|
||||
include::query-dsl/query_filter_context.asciidoc[]
|
||||
|
@ -51,4 +72,4 @@ include::query-dsl/minimum-should-match.asciidoc[]
|
|||
|
||||
include::query-dsl/multi-term-rewrite.asciidoc[]
|
||||
|
||||
include::query-dsl/regexp-syntax.asciidoc[]
|
||||
include::query-dsl/regexp-syntax.asciidoc[]
|
||||
|
|
|
@ -97,4 +97,8 @@ adjacent characters (ab → ba). Defaults to `true`.
|
|||
|
||||
`rewrite`::
|
||||
(Optional, string) Method used to rewrite the query. For valid values and more
|
||||
information, see the <<query-dsl-multi-term-rewrite, `rewrite` parameter>>.
|
||||
information, see the <<query-dsl-multi-term-rewrite, `rewrite` parameter>>.
|
||||
|
||||
==== Notes
|
||||
Fuzzy queries will not be executed if <<query-dsl-allow-expensive-queries, `search.allow_expensive_queries`>>
|
||||
is set to false.
|
||||
|
|
|
@ -161,3 +161,7 @@ and will not match any documents for this query. This can be useful when
|
|||
querying multiple indexes which might have different mappings. When set to
|
||||
`false` (the default value) the query will throw an exception if the field
|
||||
is not mapped.
|
||||
|
||||
==== Notes
|
||||
Geo-shape queries on geo-shapes implemented with <<prefix-trees, `PrefixTrees`>> will not be executed if
|
||||
<<query-dsl-allow-expensive-queries, `search.allow_expensive_queries`>> is set to false.
|
||||
|
|
|
@ -29,4 +29,7 @@ include::has-parent-query.asciidoc[]
|
|||
|
||||
include::parent-id-query.asciidoc[]
|
||||
|
||||
|
||||
=== Notes
|
||||
==== Allow expensive queries
|
||||
Joining queries will not be executed if <<query-dsl-allow-expensive-queries, `search.allow_expensive_queries`>>
|
||||
is set to false.
|
||||
|
|
|
@ -693,3 +693,8 @@ being percolated, as opposed to a single index as we do in examples. There are a
|
|||
allows for fields to be stored in a denser, more efficient way.
|
||||
- Percolate queries do not scale in the same way as other queries, so percolation performance may benefit from using
|
||||
a different index configuration, like the number of primary shards.
|
||||
|
||||
=== Notes
|
||||
==== Allow expensive queries
|
||||
Percolate queries will not be executed if <<query-dsl-allow-expensive-queries, `search.allow_expensive_queries`>>
|
||||
is set to false.
|
||||
|
|
|
@ -64,4 +64,10 @@ GET /_search
|
|||
You can speed up prefix queries using the <<index-prefixes,`index_prefixes`>>
|
||||
mapping parameter. If enabled, {es} indexes prefixes between 2 and 5
|
||||
characters in a separate field. This lets {es} run prefix queries more
|
||||
efficiently at the cost of a larger index.
|
||||
efficiently at the cost of a larger index.
|
||||
|
||||
[[prefix-query-allow-expensive-queries]]
|
||||
===== Allow expensive queries
|
||||
Prefix queries will not be executed if <<query-dsl-allow-expensive-queries, `search.allow_expensive_queries`>>
|
||||
is set to false. However, if <<index-prefixes, `index_prefixes`>> are enabled, an optimised query is built which
|
||||
is not considered slow, and will be executed in spite of this setting.
|
||||
|
|
|
@ -537,3 +537,9 @@ The example above creates a boolean query:
|
|||
`(blended(terms:[field2:this, field1:this]) blended(terms:[field2:that, field1:that]) blended(terms:[field2:thus, field1:thus]))~2`
|
||||
|
||||
that matches documents with at least two of the three per-term blended queries.
|
||||
|
||||
==== Notes
|
||||
===== Allow expensive queries
|
||||
Query string query can be internally be transformed to a <<query-dsl-prefix-query, `prefix query`>> which means
|
||||
that if the prefix queries are disabled as explained <<prefix-query-allow-expensive-queries, here>> the query will not be
|
||||
executed and an exception will be thrown.
|
||||
|
|
|
@ -134,6 +134,11 @@ increases the relevance score.
|
|||
[[range-query-notes]]
|
||||
==== Notes
|
||||
|
||||
[[ranges-on-text-and-keyword]]
|
||||
===== Using the `range` query with `text` and `keyword` fields
|
||||
Range queries on <<text, `text`>> or <<keyword, `keyword`>> files will not be executed if
|
||||
<<query-dsl-allow-expensive-queries, `search.allow_expensive_queries`>> is set to false.
|
||||
|
||||
[[ranges-on-dates]]
|
||||
===== Using the `range` query with `date` fields
|
||||
|
||||
|
|
|
@ -86,3 +86,8 @@ regular expressions.
|
|||
`rewrite`::
|
||||
(Optional, string) Method used to rewrite the query. For valid values and more
|
||||
information, see the <<query-dsl-multi-term-rewrite, `rewrite` parameter>>.
|
||||
|
||||
==== Notes
|
||||
===== Allow expensive queries
|
||||
Regexp queries will not be executed if <<query-dsl-allow-expensive-queries, `search.allow_expensive_queries`>>
|
||||
is set to false.
|
||||
|
|
|
@ -69,3 +69,7 @@ GET /_search
|
|||
}
|
||||
}
|
||||
----
|
||||
|
||||
===== Allow expensive queries
|
||||
Script queries will not be executed if <<query-dsl-allow-expensive-queries, `search.allow_expensive_queries`>>
|
||||
is set to false.
|
||||
|
|
|
@ -221,6 +221,10 @@ and default time zone. Also calculations with `now` are not supported.
|
|||
<<vector-functions, Functions for vector fields>> are accessible through
|
||||
`script_score` query.
|
||||
|
||||
===== Allow expensive queries
|
||||
Script score queries will not be executed if <<query-dsl-allow-expensive-queries, `search.allow_expensive_queries`>>
|
||||
is set to false.
|
||||
|
||||
[[script-score-faster-alt]]
|
||||
===== Faster alternatives
|
||||
The `script_score` query calculates the score for
|
||||
|
|
|
@ -67,4 +67,9 @@ increases the relevance score.
|
|||
|
||||
`rewrite`::
|
||||
(Optional, string) Method used to rewrite the query. For valid values and more information, see the
|
||||
<<query-dsl-multi-term-rewrite, `rewrite` parameter>>.
|
||||
<<query-dsl-multi-term-rewrite, `rewrite` parameter>>.
|
||||
|
||||
==== Notes
|
||||
===== Allow expensive queries
|
||||
Wildcard queries will not be executed if <<query-dsl-allow-expensive-queries, `search.allow_expensive_queries`>>
|
||||
is set to false.
|
||||
|
|
|
@ -330,6 +330,11 @@ See <<slm-api-delete-policy>>.
|
|||
|
||||
See <<slm-api-execute-lifecycle>>.
|
||||
|
||||
[role="exclude",id="slm-api-execute-policy"]
|
||||
=== {slm-init} execute lifecycle API
|
||||
|
||||
See <<slm-api-execute-lifecycle>>.
|
||||
|
||||
[role="exclude",id="slm-api-get"]
|
||||
=== {slm-init} get policy API
|
||||
|
||||
|
@ -340,7 +345,22 @@ See <<slm-api-get-policy>>.
|
|||
|
||||
See <<slm-api-get-stats>>.
|
||||
|
||||
[role="exclude",id="slm-get-status"]
|
||||
=== {slm-init} status API
|
||||
|
||||
See <<slm-api-get-status>>.
|
||||
|
||||
[role="exclude",id="slm-api-put"]
|
||||
=== {slm-init} put policy API
|
||||
|
||||
See <<slm-api-put-policy>>.
|
||||
|
||||
[role="exclude",id="slm-start"]
|
||||
=== Start {slm} API
|
||||
|
||||
See <<slm-api-start>>.
|
||||
|
||||
[role="exclude",id="slm-stop"]
|
||||
=== Stop {slm} API
|
||||
|
||||
See <<slm-api-stop>>.
|
||||
|
|
|
@ -30,3 +30,43 @@ This lets {es} skip non-competitive hits, which often improves query speed.
|
|||
In benchmarking tests, this sped up sorts on `long` fields by 10x.
|
||||
|
||||
// end::notable-highlights[]
|
||||
|
||||
// tag::notable-highlights[]
|
||||
[float]
|
||||
==== Simplifying and operationalizing machine learning
|
||||
|
||||
With the release of 7.6 the {stack} delivers an end-to-end {ml} pipeline
|
||||
providing the path from raw data to building, testing, and deploying {ml} models
|
||||
in production. Up to this point {ml} in the {stack} had primarily focused on
|
||||
unsupervised techniques by using sophisticated pattern recognition that builds
|
||||
time series models used for {anomaly-detect}. With the new {dfanalytics}, you
|
||||
can now use labelled data to train and test your own models, store those models
|
||||
as {es} indices, and use {ml-docs}/ml-inference.html[inference] to add predicted
|
||||
values to the indices based on your trained models.
|
||||
|
||||
One packaged model that we are releasing in 7.6 is
|
||||
{ml-docs}/ml-lang-ident.html[{lang-ident}]. If you have documents or sources
|
||||
that come in a variety of languages, {lang-ident} can be used to determine the
|
||||
language of text so you can improve the overall search relevance.
|
||||
{lang-ident-cap} is a trained model that can provide a prediction of the
|
||||
language of any text field.
|
||||
// end::notable-highlights[]
|
||||
|
||||
// tag::notable-highlights[]
|
||||
[float]
|
||||
==== {ccs-cap} in {transforms}
|
||||
|
||||
{ref}/transforms.html[{transforms-cap}] can now use {ccs} (CCS) for the source
|
||||
index. Now you can have separate clusters (for example, project clusters) build
|
||||
entity-centric or feature indices against a primary cluster.
|
||||
|
||||
// end::notable-highlights[]
|
||||
|
||||
[float]
|
||||
=== Learn more
|
||||
|
||||
Get more details on these features in the
|
||||
https://www.elastic.co/blog/elasticsearch-7-6-0-released[{es} 7.6 release blog].
|
||||
For a complete list of enhancements and other changes, check out the
|
||||
<<release-notes-7.6.0,{es} 7.6 release notes>>.
|
||||
|
||||
|
|
|
@ -118,6 +118,17 @@ SELECT * FROM test GROUP BY age ORDER BY COUNT(*) LIMIT 100;
|
|||
It is possible to run the same queries without a `LIMIT` however in that case if the maximum size (*10000*) is passed,
|
||||
an exception will be returned as {es-sql} is unable to track (and sort) all the results returned.
|
||||
|
||||
Moreover, the aggregation(s) used in the `ORDER BY` must be only plain aggregate functions. No scalar
|
||||
functions or operators can be used, and therefore no complex columns that combine two ore more aggregate
|
||||
functions can be used for ordering. Here are some examples of queries that are *not allowed*:
|
||||
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
SELECT age, ROUND(AVG(salary)) AS avg FROM test GROUP BY age ORDER BY avg;
|
||||
|
||||
SELECT age, MAX(salary) - MIN(salary) AS diff FROM test GROUP BY age ORDER BY diff;
|
||||
--------------------------------------------------
|
||||
|
||||
[float]
|
||||
=== Using aggregation functions on top of scalar functions
|
||||
|
||||
|
|
|
@ -148,6 +148,15 @@ public abstract class AbstractObjectParser<Value, Context>
|
|||
declareField(consumer, (p, c) -> objectParser.parse(p, c), field, ValueType.OBJECT);
|
||||
}
|
||||
|
||||
/**
|
||||
* Declare an object field that parses explicit {@code null}s in the json to a default value.
|
||||
*/
|
||||
public <T> void declareObjectOrNull(BiConsumer<Value, T> consumer, ContextParser<Context, T> objectParser, T nullValue,
|
||||
ParseField field) {
|
||||
declareField(consumer, (p, c) -> p.currentToken() == XContentParser.Token.VALUE_NULL ? nullValue : objectParser.parse(p, c),
|
||||
field, ValueType.OBJECT_OR_NULL);
|
||||
}
|
||||
|
||||
public void declareFloat(BiConsumer<Value, Float> consumer, ParseField field) {
|
||||
// Using a method reference here angers some compilers
|
||||
declareField(consumer, p -> p.floatValue(), field, ValueType.FLOAT);
|
||||
|
@ -158,16 +167,33 @@ public abstract class AbstractObjectParser<Value, Context>
|
|||
declareField(consumer, p -> p.doubleValue(), field, ValueType.DOUBLE);
|
||||
}
|
||||
|
||||
/**
|
||||
* Declare a double field that parses explicit {@code null}s in the json to a default value.
|
||||
*/
|
||||
public void declareDoubleOrNull(BiConsumer<Value, Double> consumer, double nullValue, ParseField field) {
|
||||
declareField(consumer, p -> p.currentToken() == XContentParser.Token.VALUE_NULL ? nullValue : p.doubleValue(),
|
||||
field, ValueType.DOUBLE_OR_NULL);
|
||||
}
|
||||
|
||||
public void declareLong(BiConsumer<Value, Long> consumer, ParseField field) {
|
||||
// Using a method reference here angers some compilers
|
||||
declareField(consumer, p -> p.longValue(), field, ValueType.LONG);
|
||||
}
|
||||
|
||||
public void declareInt(BiConsumer<Value, Integer> consumer, ParseField field) {
|
||||
// Using a method reference here angers some compilers
|
||||
// Using a method reference here angers some compilers
|
||||
declareField(consumer, p -> p.intValue(), field, ValueType.INT);
|
||||
}
|
||||
|
||||
/**
|
||||
* Declare a double field that parses explicit {@code null}s in the json to a default value.
|
||||
*/
|
||||
public void declareIntOrNull(BiConsumer<Value, Integer> consumer, int nullValue, ParseField field) {
|
||||
declareField(consumer, p -> p.currentToken() == XContentParser.Token.VALUE_NULL ? nullValue : p.intValue(),
|
||||
field, ValueType.INT_OR_NULL);
|
||||
}
|
||||
|
||||
|
||||
public void declareString(BiConsumer<Value, String> consumer, ParseField field) {
|
||||
declareField(consumer, XContentParser::text, field, ValueType.STRING);
|
||||
}
|
||||
|
|
|
@ -43,6 +43,7 @@ import static org.hamcrest.Matchers.containsString;
|
|||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
|
||||
public class ObjectParserTests extends ESTestCase {
|
||||
|
@ -275,6 +276,24 @@ public class ObjectParserTests extends ESTestCase {
|
|||
assertNotNull(s.object);
|
||||
}
|
||||
|
||||
public void testObjectOrNullWhenNull() throws IOException {
|
||||
StaticTestStruct nullMarker = new StaticTestStruct();
|
||||
XContentParser parser = createParser(JsonXContent.jsonXContent, "{\"object\" : null}");
|
||||
ObjectParser<StaticTestStruct, Void> objectParser = new ObjectParser<>("foo", StaticTestStruct::new);
|
||||
objectParser.declareObjectOrNull(StaticTestStruct::setObject, objectParser, nullMarker, new ParseField("object"));
|
||||
StaticTestStruct s = objectParser.parse(parser, null);
|
||||
assertThat(s.object, equalTo(nullMarker));
|
||||
}
|
||||
|
||||
public void testObjectOrNullWhenNonNull() throws IOException {
|
||||
StaticTestStruct nullMarker = new StaticTestStruct();
|
||||
XContentParser parser = createParser(JsonXContent.jsonXContent, "{\"object\" : {}}");
|
||||
ObjectParser<StaticTestStruct, Void> objectParser = new ObjectParser<>("foo", StaticTestStruct::new);
|
||||
objectParser.declareObjectOrNull(StaticTestStruct::setObject, objectParser, nullMarker, new ParseField("object"));
|
||||
StaticTestStruct s = objectParser.parse(parser, null);
|
||||
assertThat(s.object, not(nullValue()));
|
||||
}
|
||||
|
||||
public void testEmptyObjectInArray() throws IOException {
|
||||
XContentParser parser = createParser(JsonXContent.jsonXContent, "{\"object_array\" : [{}]}");
|
||||
ObjectParser<StaticTestStruct, Void> objectParser = new ObjectParser<>("foo", StaticTestStruct::new);
|
||||
|
@ -321,15 +340,32 @@ public class ObjectParserTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testAllVariants() throws IOException {
|
||||
double expectedNullableDouble;
|
||||
int expectedNullableInt;
|
||||
|
||||
XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent());
|
||||
builder.startObject();
|
||||
builder.field("int_field", randomBoolean() ? "1" : 1);
|
||||
if (randomBoolean()) {
|
||||
builder.nullField("nullable_int_field");
|
||||
expectedNullableInt = -1;
|
||||
} else {
|
||||
expectedNullableInt = randomInt();
|
||||
builder.field("nullable_int_field", expectedNullableInt);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.array("int_array_field", randomBoolean() ? "1" : 1);
|
||||
} else {
|
||||
builder.field("int_array_field", randomBoolean() ? "1" : 1);
|
||||
}
|
||||
builder.field("double_field", randomBoolean() ? "2.1" : 2.1d);
|
||||
if (randomBoolean()) {
|
||||
builder.nullField("nullable_double_field");
|
||||
expectedNullableDouble = Double.NaN;
|
||||
} else {
|
||||
expectedNullableDouble = randomDouble();
|
||||
builder.field("nullable_double_field", expectedNullableDouble);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.array("double_array_field", randomBoolean() ? "2.1" : 2.1d);
|
||||
} else {
|
||||
|
@ -364,9 +400,11 @@ public class ObjectParserTests extends ESTestCase {
|
|||
XContentParser parser = createParser(JsonXContent.jsonXContent, Strings.toString(builder));
|
||||
class TestStruct {
|
||||
int int_field;
|
||||
int nullableIntField;
|
||||
long long_field;
|
||||
float float_field;
|
||||
double double_field;
|
||||
double nullableDoubleField;
|
||||
String string_field;
|
||||
List<Integer> int_array_field;
|
||||
List<Long> long_array_field;
|
||||
|
@ -378,6 +416,9 @@ public class ObjectParserTests extends ESTestCase {
|
|||
public void setInt_field(int int_field) {
|
||||
this.int_field = int_field;
|
||||
}
|
||||
public void setNullableIntField(int nullableIntField) {
|
||||
this.nullableIntField = nullableIntField;
|
||||
}
|
||||
public void setLong_field(long long_field) {
|
||||
this.long_field = long_field;
|
||||
}
|
||||
|
@ -387,6 +428,9 @@ public class ObjectParserTests extends ESTestCase {
|
|||
public void setDouble_field(double double_field) {
|
||||
this.double_field = double_field;
|
||||
}
|
||||
public void setNullableDoubleField(double nullableDoubleField) {
|
||||
this.nullableDoubleField = nullableDoubleField;
|
||||
}
|
||||
public void setString_field(String string_field) {
|
||||
this.string_field = string_field;
|
||||
}
|
||||
|
@ -416,10 +460,12 @@ public class ObjectParserTests extends ESTestCase {
|
|||
}
|
||||
ObjectParser<TestStruct, Void> objectParser = new ObjectParser<>("foo");
|
||||
objectParser.declareInt(TestStruct::setInt_field, new ParseField("int_field"));
|
||||
objectParser.declareIntOrNull(TestStruct::setNullableIntField, -1, new ParseField("nullable_int_field"));
|
||||
objectParser.declareIntArray(TestStruct::setInt_array_field, new ParseField("int_array_field"));
|
||||
objectParser.declareLong(TestStruct::setLong_field, new ParseField("long_field"));
|
||||
objectParser.declareLongArray(TestStruct::setLong_array_field, new ParseField("long_array_field"));
|
||||
objectParser.declareDouble(TestStruct::setDouble_field, new ParseField("double_field"));
|
||||
objectParser.declareDoubleOrNull(TestStruct::setNullableDoubleField, Double.NaN, new ParseField("nullable_double_field"));
|
||||
objectParser.declareDoubleArray(TestStruct::setDouble_array_field, new ParseField("double_array_field"));
|
||||
objectParser.declareFloat(TestStruct::setFloat_field, new ParseField("float_field"));
|
||||
objectParser.declareFloatArray(TestStruct::setFloat_array_field, new ParseField("float_array_field"));
|
||||
|
@ -431,6 +477,7 @@ public class ObjectParserTests extends ESTestCase {
|
|||
TestStruct parse = objectParser.parse(parser, new TestStruct(), null);
|
||||
assertArrayEquals(parse.double_array_field.toArray(), Collections.singletonList(2.1d).toArray());
|
||||
assertEquals(parse.double_field, 2.1d, 0.0d);
|
||||
assertThat(parse.nullableDoubleField, equalTo(expectedNullableDouble));
|
||||
|
||||
assertArrayEquals(parse.long_array_field.toArray(), Collections.singletonList(4L).toArray());
|
||||
assertEquals(parse.long_field, 4L);
|
||||
|
@ -440,6 +487,7 @@ public class ObjectParserTests extends ESTestCase {
|
|||
|
||||
assertArrayEquals(parse.int_array_field.toArray(), Collections.singletonList(1).toArray());
|
||||
assertEquals(parse.int_field, 1);
|
||||
assertThat(parse.nullableIntField, equalTo(expectedNullableInt));
|
||||
|
||||
assertArrayEquals(parse.float_array_field.toArray(), Collections.singletonList(3.1f).toArray());
|
||||
assertEquals(parse.float_field, 3.1f, 0.0f);
|
||||
|
|
|
@ -81,6 +81,8 @@
|
|||
---
|
||||
"Test user agent processor with non-ECS schema":
|
||||
- skip:
|
||||
version : "all"
|
||||
reason : "tracked at https://github.com/elastic/elasticsearch/issues/52266"
|
||||
features: warnings
|
||||
|
||||
- do:
|
||||
|
|
|
@ -2,8 +2,8 @@
|
|||
|
||||
setup:
|
||||
- skip:
|
||||
version: " - 7.09.99"
|
||||
reason: "random score function of script score was added in 7.1"
|
||||
version: " - 7.1.99"
|
||||
reason: "random score function of script score was added in 7.2"
|
||||
|
||||
---
|
||||
"Random score function with _seq_no field":
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.elasticsearch.common.Explicit;
|
|||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser.Token;
|
||||
|
@ -59,6 +60,8 @@ import org.elasticsearch.index.query.QueryShardContext;
|
|||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.MultiValueMode;
|
||||
import org.elasticsearch.search.sort.BucketedSort;
|
||||
import org.elasticsearch.search.sort.SortOrder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.math.BigDecimal;
|
||||
|
@ -522,6 +525,12 @@ public class ScaledFloatFieldMapper extends FieldMapper {
|
|||
return new SortField(getFieldName(), source, reverse);
|
||||
}
|
||||
|
||||
@Override
|
||||
public BucketedSort newBucketedSort(BigArrays bigArrays, Object missingValue, MultiValueMode sortMode, Nested nested,
|
||||
SortOrder sortOrder, DocValueFormat format) {
|
||||
return new DoubleValuesComparatorSource(this, missingValue, sortMode, nested).newBucketedSort(bigArrays, sortOrder, format);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clear() {
|
||||
scaledFieldData.clear();
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.lucene.search.PrefixQuery;
|
|||
import org.apache.lucene.search.TermInSetQuery;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.index.mapper.SearchAsYouTypeFieldMapper.Defaults;
|
||||
import org.elasticsearch.index.mapper.SearchAsYouTypeFieldMapper.PrefixFieldType;
|
||||
import org.elasticsearch.index.mapper.SearchAsYouTypeFieldMapper.SearchAsYouTypeFieldType;
|
||||
|
@ -100,14 +101,19 @@ public class SearchAsYouTypeFieldTypeTests extends FieldTypeTestCase {
|
|||
|
||||
// this term should be a length that can be rewriteable to a term query on the prefix field
|
||||
final String withinBoundsTerm = "foo";
|
||||
assertThat(fieldType.prefixQuery(withinBoundsTerm, CONSTANT_SCORE_REWRITE, null),
|
||||
assertThat(fieldType.prefixQuery(withinBoundsTerm, CONSTANT_SCORE_REWRITE, randomMockShardContext()),
|
||||
equalTo(new ConstantScoreQuery(new TermQuery(new Term(PREFIX_NAME, withinBoundsTerm)))));
|
||||
|
||||
// our defaults don't allow a situation where a term can be too small
|
||||
|
||||
// this term should be too long to be rewriteable to a term query on the prefix field
|
||||
final String longTerm = "toolongforourprefixfieldthistermis";
|
||||
assertThat(fieldType.prefixQuery(longTerm, CONSTANT_SCORE_REWRITE, null),
|
||||
assertThat(fieldType.prefixQuery(longTerm, CONSTANT_SCORE_REWRITE, MOCK_QSC),
|
||||
equalTo(new PrefixQuery(new Term(NAME, longTerm))));
|
||||
|
||||
ElasticsearchException ee = expectThrows(ElasticsearchException.class,
|
||||
() -> fieldType.prefixQuery(longTerm, CONSTANT_SCORE_REWRITE, MOCK_QSC_DISALLOW_EXPENSIVE));
|
||||
assertEquals("[prefix] queries cannot be executed when 'search.allow_expensive_queries' is set to false. " +
|
||||
"For optimised prefix queries on text fields please enable [index_prefixes].", ee.getMessage());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.apache.lucene.search.Query;
|
|||
import org.apache.lucene.search.join.JoinUtil;
|
||||
import org.apache.lucene.search.join.ScoreMode;
|
||||
import org.apache.lucene.search.similarities.Similarity;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
@ -55,6 +56,8 @@ import java.util.HashMap;
|
|||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.search.SearchService.ALLOW_EXPENSIVE_QUERIES;
|
||||
|
||||
/**
|
||||
* A query builder for {@code has_child} query.
|
||||
*/
|
||||
|
@ -302,6 +305,11 @@ public class HasChildQueryBuilder extends AbstractQueryBuilder<HasChildQueryBuil
|
|||
|
||||
@Override
|
||||
protected Query doToQuery(QueryShardContext context) throws IOException {
|
||||
if (context.allowExpensiveQueries() == false) {
|
||||
throw new ElasticsearchException("[joining] queries cannot be executed when '" +
|
||||
ALLOW_EXPENSIVE_QUERIES.getKey() + "' is set to false.");
|
||||
}
|
||||
|
||||
ParentJoinFieldMapper joinFieldMapper = ParentJoinFieldMapper.getMapper(context.getMapperService());
|
||||
if (joinFieldMapper == null) {
|
||||
if (ignoreUnmapped) {
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.join.query;
|
|||
import org.apache.lucene.search.MatchNoDocsQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.join.ScoreMode;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
@ -45,6 +46,8 @@ import java.util.HashMap;
|
|||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.search.SearchService.ALLOW_EXPENSIVE_QUERIES;
|
||||
|
||||
/**
|
||||
* Builder for the 'has_parent' query.
|
||||
*/
|
||||
|
@ -158,6 +161,11 @@ public class HasParentQueryBuilder extends AbstractQueryBuilder<HasParentQueryBu
|
|||
|
||||
@Override
|
||||
protected Query doToQuery(QueryShardContext context) throws IOException {
|
||||
if (context.allowExpensiveQueries() == false) {
|
||||
throw new ElasticsearchException("[joining] queries cannot be executed when '" +
|
||||
ALLOW_EXPENSIVE_QUERIES.getKey() + "' is set to false.");
|
||||
}
|
||||
|
||||
ParentJoinFieldMapper joinFieldMapper = ParentJoinFieldMapper.getMapper(context.getMapperService());
|
||||
if (joinFieldMapper == null) {
|
||||
if (ignoreUnmapped) {
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.apache.lucene.search.BooleanClause;
|
|||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.MatchNoDocsQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
@ -38,6 +39,8 @@ import org.elasticsearch.join.mapper.ParentJoinFieldMapper;
|
|||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.search.SearchService.ALLOW_EXPENSIVE_QUERIES;
|
||||
|
||||
public final class ParentIdQueryBuilder extends AbstractQueryBuilder<ParentIdQueryBuilder> {
|
||||
public static final String NAME = "parent_id";
|
||||
|
||||
|
@ -153,6 +156,11 @@ public final class ParentIdQueryBuilder extends AbstractQueryBuilder<ParentIdQue
|
|||
|
||||
@Override
|
||||
protected Query doToQuery(QueryShardContext context) throws IOException {
|
||||
if (context.allowExpensiveQueries() == false) {
|
||||
throw new ElasticsearchException("[joining] queries cannot be executed when '" +
|
||||
ALLOW_EXPENSIVE_QUERIES.getKey() + "' is set to false.");
|
||||
}
|
||||
|
||||
ParentJoinFieldMapper joinFieldMapper = ParentJoinFieldMapper.getMapper(context.getMapperService());
|
||||
if (joinFieldMapper == null) {
|
||||
if (ignoreUnmapped) {
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.apache.lucene.search.TermQuery;
|
|||
import org.apache.lucene.search.join.ScoreMode;
|
||||
import org.apache.lucene.search.similarities.PerFieldSimilarityWrapper;
|
||||
import org.apache.lucene.search.similarities.Similarity;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.Strings;
|
||||
|
@ -69,6 +70,8 @@ import static org.hamcrest.CoreMatchers.containsString;
|
|||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
import static org.hamcrest.CoreMatchers.instanceOf;
|
||||
import static org.hamcrest.CoreMatchers.notNullValue;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
public class HasChildQueryBuilderTests extends AbstractQueryTestCase<HasChildQueryBuilder> {
|
||||
|
||||
|
@ -371,5 +374,18 @@ public class HasChildQueryBuilderTests extends AbstractQueryTestCase<HasChildQue
|
|||
queryBuilder.innerHit(new InnerHitBuilder("some_name"));
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
|
||||
() -> InnerHitContextBuilder.extractInnerHits(queryBuilder, Collections.singletonMap("some_name", null)));
|
||||
assertEquals("[inner_hits] already contains an entry for key [some_name]", e.getMessage());
|
||||
}
|
||||
|
||||
public void testDisallowExpensiveQueries() {
|
||||
QueryShardContext queryShardContext = mock(QueryShardContext.class);
|
||||
when(queryShardContext.allowExpensiveQueries()).thenReturn(false);
|
||||
|
||||
HasChildQueryBuilder queryBuilder =
|
||||
hasChildQuery(CHILD_DOC, new TermQueryBuilder("custom_string", "value"), ScoreMode.None);
|
||||
ElasticsearchException e = expectThrows(ElasticsearchException.class,
|
||||
() -> queryBuilder.toQuery(queryShardContext));
|
||||
assertEquals("[joining] queries cannot be executed when 'search.allow_expensive_queries' is set to false.",
|
||||
e.getMessage());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.join.query;
|
|||
import org.apache.lucene.search.MatchNoDocsQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.join.ScoreMode;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.Strings;
|
||||
|
@ -57,6 +58,8 @@ import static org.hamcrest.CoreMatchers.containsString;
|
|||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
import static org.hamcrest.CoreMatchers.instanceOf;
|
||||
import static org.hamcrest.CoreMatchers.notNullValue;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
public class HasParentQueryBuilderTests extends AbstractQueryTestCase<HasParentQueryBuilder> {
|
||||
private static final String TYPE = "_doc";
|
||||
|
@ -265,5 +268,18 @@ public class HasParentQueryBuilderTests extends AbstractQueryTestCase<HasParentQ
|
|||
queryBuilder.innerHit(new InnerHitBuilder("some_name"));
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
|
||||
() -> InnerHitContextBuilder.extractInnerHits(queryBuilder, Collections.singletonMap("some_name", null)));
|
||||
assertEquals("[inner_hits] already contains an entry for key [some_name]", e.getMessage());
|
||||
}
|
||||
|
||||
public void testDisallowExpensiveQueries() {
|
||||
QueryShardContext queryShardContext = mock(QueryShardContext.class);
|
||||
when(queryShardContext.allowExpensiveQueries()).thenReturn(false);
|
||||
|
||||
HasParentQueryBuilder queryBuilder = new HasParentQueryBuilder(
|
||||
CHILD_DOC, new WrapperQueryBuilder(new MatchAllQueryBuilder().toString()), false);
|
||||
ElasticsearchException e = expectThrows(ElasticsearchException.class,
|
||||
() -> queryBuilder.toQuery(queryShardContext));
|
||||
assertEquals("[joining] queries cannot be executed when 'search.allow_expensive_queries' is set to false.",
|
||||
e.getMessage());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.lucene.search.BooleanQuery;
|
|||
import org.apache.lucene.search.MatchNoDocsQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.Strings;
|
||||
|
@ -48,6 +49,8 @@ import static org.hamcrest.CoreMatchers.containsString;
|
|||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
import static org.hamcrest.CoreMatchers.instanceOf;
|
||||
import static org.hamcrest.CoreMatchers.notNullValue;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
public class ParentIdQueryBuilderTests extends AbstractQueryTestCase<ParentIdQueryBuilder> {
|
||||
|
||||
|
@ -154,4 +157,14 @@ public class ParentIdQueryBuilderTests extends AbstractQueryTestCase<ParentIdQue
|
|||
assertThat(e.getMessage(), containsString("[" + ParentIdQueryBuilder.NAME + "] no relation found for child [unmapped]"));
|
||||
}
|
||||
|
||||
public void testDisallowExpensiveQueries() {
|
||||
QueryShardContext queryShardContext = mock(QueryShardContext.class);
|
||||
when(queryShardContext.allowExpensiveQueries()).thenReturn(false);
|
||||
|
||||
ParentIdQueryBuilder queryBuilder = doCreateTestQueryBuilder();
|
||||
ElasticsearchException e = expectThrows(ElasticsearchException.class,
|
||||
() -> queryBuilder.toQuery(queryShardContext));
|
||||
assertEquals("[joining] queries cannot be executed when 'search.allow_expensive_queries' is set to false.",
|
||||
e.getMessage());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,6 +26,18 @@ setup:
|
|||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
---
|
||||
teardown:
|
||||
- skip:
|
||||
version: " - 7.6.99"
|
||||
reason: "implemented in 7.7.0"
|
||||
|
||||
- do:
|
||||
cluster.put_settings:
|
||||
body:
|
||||
transient:
|
||||
search.allow_expensive_queries: null
|
||||
|
||||
---
|
||||
"Parent/child inner hits":
|
||||
- do:
|
||||
|
@ -53,3 +65,24 @@ setup:
|
|||
- is_false: hits.hits.0.inner_hits.child.hits.hits.0._nested
|
||||
- gte: { hits.hits.0.inner_hits.child.hits.hits.0._seq_no: 0 }
|
||||
- gte: { hits.hits.0.inner_hits.child.hits.hits.0._primary_term: 1 }
|
||||
|
||||
---
|
||||
"HasChild disallow expensive queries":
|
||||
- skip:
|
||||
version: " - 7.6.99"
|
||||
reason: "implemented in 7.7.0"
|
||||
|
||||
### Update setting to false
|
||||
- do:
|
||||
cluster.put_settings:
|
||||
body:
|
||||
transient:
|
||||
search.allow_expensive_queries: "false"
|
||||
flat_settings: true
|
||||
|
||||
- match: {transient: {search.allow_expensive_queries: "false"}}
|
||||
|
||||
- do:
|
||||
catch: /\[joining\] queries cannot be executed when \'search.allow_expensive_queries\' is set to false./
|
||||
search:
|
||||
body: { "query": { "has_child": { "type": "child", "query": { "match_all": {} }, "inner_hits": {} } } }
|
||||
|
|
|
@ -51,6 +51,18 @@ setup:
|
|||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
---
|
||||
teardown:
|
||||
- skip:
|
||||
version: " - 7.6.99"
|
||||
reason: "implemented in 7.7.0"
|
||||
|
||||
- do:
|
||||
cluster.put_settings:
|
||||
body:
|
||||
transient:
|
||||
search.allow_expensive_queries: null
|
||||
|
||||
---
|
||||
"Test basic":
|
||||
- do:
|
||||
|
@ -116,3 +128,29 @@ setup:
|
|||
- match: { hits.hits.1._id: "4" }
|
||||
- match: { hits.hits.1._source.join_field.name: "child" }
|
||||
- match: { hits.hits.1._source.join_field.parent: "1" }
|
||||
|
||||
---
|
||||
"HasChild disallow expensive queries":
|
||||
- skip:
|
||||
version: " - 7.6.99"
|
||||
reason: "implemented in 7.7.0"
|
||||
|
||||
### Update setting to false
|
||||
- do:
|
||||
cluster.put_settings:
|
||||
body:
|
||||
transient:
|
||||
search.allow_expensive_queries: "false"
|
||||
flat_settings: true
|
||||
|
||||
- match: {transient: {search.allow_expensive_queries: "false"}}
|
||||
|
||||
- do:
|
||||
catch: /\[joining\] queries cannot be executed when \'search.allow_expensive_queries\' is set to false./
|
||||
search:
|
||||
body:
|
||||
sort: [ "id" ]
|
||||
query:
|
||||
parent_id:
|
||||
type: child
|
||||
id: 1
|
||||
|
|
|
@ -1,3 +1,61 @@
|
|||
---
|
||||
setup:
|
||||
- skip:
|
||||
version: " - 7.6.99"
|
||||
reason: "implemented in 7.7.0"
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: test
|
||||
body:
|
||||
mappings:
|
||||
properties:
|
||||
entity_type: { "type": "keyword" }
|
||||
join_field: { "type": "join", "relations": { "question": "answer", "person": "address" } }
|
||||
settings:
|
||||
number_of_shards: 1
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
id: 1
|
||||
body: { "join_field": { "name": "question" }, "entity_type": "question" }
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
id: 2
|
||||
routing: 1
|
||||
body: { "join_field": { "name": "answer", "parent": 1} , "entity_type": "answer" }
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
id: 3
|
||||
body: { "join_field": { "name": "person" }, "entity_type": "person" }
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
routing: 3
|
||||
id: 4
|
||||
body: { "join_field": { "name": "address", "parent": 3 }, "entity_type": "address" }
|
||||
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
---
|
||||
teardown:
|
||||
- skip:
|
||||
version: " - 7.6.99"
|
||||
reason: "implemented in 7.7.0"
|
||||
|
||||
- do:
|
||||
cluster.put_settings:
|
||||
body:
|
||||
transient:
|
||||
search.allow_expensive_queries: null
|
||||
|
||||
---
|
||||
"Test two sub-queries with only one having inner_hits":
|
||||
- skip:
|
||||
|
@ -66,3 +124,35 @@
|
|||
- match: { hits.hits.1._id: "2" }
|
||||
- match: { hits.hits.1.inner_hits.question.hits.total.value: 1}
|
||||
- match: { hits.hits.1.inner_hits.question.hits.hits.0._id: "1"}
|
||||
|
||||
---
|
||||
"HasParent disallow expensive queries":
|
||||
- skip:
|
||||
version: " - 7.6.99"
|
||||
reason: "implemented in 7.7.0"
|
||||
|
||||
### Update setting to false
|
||||
- do:
|
||||
cluster.put_settings:
|
||||
body:
|
||||
transient:
|
||||
search.allow_expensive_queries: "false"
|
||||
flat_settings: true
|
||||
|
||||
- match: {transient: {search.allow_expensive_queries: "false"}}
|
||||
|
||||
- do:
|
||||
catch: /\[joining\] queries cannot be executed when \'search.allow_expensive_queries\' is set to false./
|
||||
search:
|
||||
index: test
|
||||
body:
|
||||
query:
|
||||
bool:
|
||||
should:
|
||||
- term:
|
||||
entity_type: person
|
||||
- has_parent:
|
||||
parent_type: question
|
||||
query:
|
||||
match_all: {}
|
||||
inner_hits: {}
|
||||
|
|
|
@ -91,6 +91,7 @@ import java.util.Objects;
|
|||
import java.util.function.Supplier;
|
||||
|
||||
import static org.elasticsearch.percolator.PercolatorFieldMapper.parseQuery;
|
||||
import static org.elasticsearch.search.SearchService.ALLOW_EXPENSIVE_QUERIES;
|
||||
|
||||
public class PercolateQueryBuilder extends AbstractQueryBuilder<PercolateQueryBuilder> {
|
||||
public static final String NAME = "percolate";
|
||||
|
@ -569,6 +570,11 @@ public class PercolateQueryBuilder extends AbstractQueryBuilder<PercolateQueryBu
|
|||
|
||||
@Override
|
||||
protected Query doToQuery(QueryShardContext context) throws IOException {
|
||||
if (context.allowExpensiveQueries() == false) {
|
||||
throw new ElasticsearchException("[percolate] queries cannot be executed when '" +
|
||||
ALLOW_EXPENSIVE_QUERIES.getKey() + "' is set to false.");
|
||||
}
|
||||
|
||||
// Call nowInMillis() so that this query becomes un-cacheable since we
|
||||
// can't be sure that it doesn't use now or scripts
|
||||
context.nowInMillis();
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.percolator;
|
||||
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ResourceNotFoundException;
|
||||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
|
||||
import org.elasticsearch.action.get.GetRequest;
|
||||
|
@ -57,6 +58,8 @@ import java.util.function.Supplier;
|
|||
|
||||
import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
public class PercolateQueryBuilderTests extends AbstractQueryTestCase<PercolateQueryBuilder> {
|
||||
|
||||
|
@ -364,4 +367,14 @@ public class PercolateQueryBuilderTests extends AbstractQueryTestCase<PercolateQ
|
|||
assertNotEquals(rewrittenQueryBuilder, percolateQueryBuilder);
|
||||
}
|
||||
|
||||
public void testDisallowExpensiveQueries() {
|
||||
QueryShardContext queryShardContext = mock(QueryShardContext.class);
|
||||
when(queryShardContext.allowExpensiveQueries()).thenReturn(false);
|
||||
|
||||
PercolateQueryBuilder queryBuilder = doCreateTestQueryBuilder(true);
|
||||
ElasticsearchException e = expectThrows(ElasticsearchException.class,
|
||||
() -> queryBuilder.toQuery(queryShardContext));
|
||||
assertEquals("[percolate] queries cannot be executed when 'search.allow_expensive_queries' is set to false.",
|
||||
e.getMessage());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,11 +19,14 @@
|
|||
package org.elasticsearch.percolator;
|
||||
|
||||
import org.apache.lucene.search.join.ScoreMode;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
|
||||
import org.elasticsearch.action.search.MultiSearchResponse;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.geo.GeoPoint;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.DistanceUnit;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
|
@ -37,6 +40,7 @@ import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder;
|
|||
import org.elasticsearch.search.sort.SortOrder;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
|
||||
|
@ -886,4 +890,55 @@ public class PercolatorQuerySearchIT extends ESIntegTestCase {
|
|||
assertThat(item.getFailureMessage(), containsString("[test/type/6] couldn't be found"));
|
||||
}
|
||||
|
||||
public void testDisallowExpensiveQueries() throws IOException {
|
||||
try {
|
||||
assertAcked(client().admin().indices().prepareCreate("test")
|
||||
.addMapping("_doc", "id", "type=keyword", "field1", "type=keyword", "query", "type=percolator")
|
||||
);
|
||||
|
||||
client().prepareIndex("test", "_doc").setId("1")
|
||||
.setSource(jsonBuilder().startObject()
|
||||
.field("id", "1")
|
||||
.field("query", matchQuery("field1", "value")).endObject())
|
||||
.get();
|
||||
refresh();
|
||||
|
||||
// Execute with search.allow_expensive_queries = null => default value = false => success
|
||||
BytesReference source = BytesReference.bytes(jsonBuilder().startObject().field("field1", "value").endObject());
|
||||
SearchResponse response = client().prepareSearch()
|
||||
.setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON))
|
||||
.get();
|
||||
assertHitCount(response, 1);
|
||||
assertThat(response.getHits().getAt(0).getId(), equalTo("1"));
|
||||
assertThat(response.getHits().getAt(0).getFields().get("_percolator_document_slot").getValue(), equalTo(0));
|
||||
|
||||
// Set search.allow_expensive_queries to "false" => assert failure
|
||||
ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest();
|
||||
updateSettingsRequest.persistentSettings(Settings.builder().put("search.allow_expensive_queries", false));
|
||||
assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet());
|
||||
|
||||
ElasticsearchException e = expectThrows(ElasticsearchException.class,
|
||||
() -> client().prepareSearch()
|
||||
.setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON))
|
||||
.get());
|
||||
assertEquals("[percolate] queries cannot be executed when 'search.allow_expensive_queries' is set to false.",
|
||||
e.getCause().getMessage());
|
||||
|
||||
// Set search.allow_expensive_queries setting to "true" ==> success
|
||||
updateSettingsRequest = new ClusterUpdateSettingsRequest();
|
||||
updateSettingsRequest.persistentSettings(Settings.builder().put("search.allow_expensive_queries", true));
|
||||
assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet());
|
||||
|
||||
response = client().prepareSearch()
|
||||
.setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON))
|
||||
.get();
|
||||
assertHitCount(response, 1);
|
||||
assertThat(response.getHits().getAt(0).getId(), equalTo("1"));
|
||||
assertThat(response.getHits().getAt(0).getFields().get("_percolator_document_slot").getValue(), equalTo(0));
|
||||
} finally {
|
||||
ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest();
|
||||
updateSettingsRequest.persistentSettings(Settings.builder().put("search.allow_expensive_queries", (String) null));
|
||||
assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -158,7 +158,7 @@ public class ICUCollationKeywordFieldMapper extends FieldMapper {
|
|||
|
||||
@Override
|
||||
public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions,
|
||||
boolean transpositions) {
|
||||
boolean transpositions, QueryShardContext context) {
|
||||
throw new UnsupportedOperationException("[fuzzy] queries are not supported on [" + CONTENT_TYPE + "] fields.");
|
||||
}
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.lucene.search.TermInSetQuery;
|
|||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.TermRangeQuery;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.unit.Fuzziness;
|
||||
import org.elasticsearch.index.mapper.ICUCollationKeywordFieldMapper.CollationFieldType;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType.Relation;
|
||||
|
@ -101,32 +102,36 @@ public class CollationFieldTypeTests extends FieldTypeTestCase {
|
|||
MappedFieldType ft = createDefaultFieldType();
|
||||
ft.setName("field");
|
||||
ft.setIndexOptions(IndexOptions.DOCS);
|
||||
expectThrows(UnsupportedOperationException.class,
|
||||
() -> ft.regexpQuery("foo.*", 0, 10, null, null));
|
||||
UnsupportedOperationException e = expectThrows(UnsupportedOperationException.class,
|
||||
() -> ft.regexpQuery("foo.*", 0, 10, null, randomMockShardContext()));
|
||||
assertEquals("[regexp] queries are not supported on [icu_collation_keyword] fields.", e.getMessage());
|
||||
}
|
||||
|
||||
public void testFuzzyQuery() {
|
||||
MappedFieldType ft = createDefaultFieldType();
|
||||
ft.setName("field");
|
||||
ft.setIndexOptions(IndexOptions.DOCS);
|
||||
expectThrows(UnsupportedOperationException.class,
|
||||
() -> ft.fuzzyQuery("foo", Fuzziness.fromEdits(2), 1, 50, true));
|
||||
UnsupportedOperationException e = expectThrows(UnsupportedOperationException.class,
|
||||
() -> ft.fuzzyQuery("foo", Fuzziness.fromEdits(2), 1, 50, true, randomMockShardContext()));
|
||||
assertEquals("[fuzzy] queries are not supported on [icu_collation_keyword] fields.", e.getMessage());
|
||||
}
|
||||
|
||||
public void testPrefixQuery() {
|
||||
MappedFieldType ft = createDefaultFieldType();
|
||||
ft.setName("field");
|
||||
ft.setIndexOptions(IndexOptions.DOCS);
|
||||
expectThrows(UnsupportedOperationException.class,
|
||||
() -> ft.prefixQuery("prefix", null, null));
|
||||
UnsupportedOperationException e = expectThrows(UnsupportedOperationException.class,
|
||||
() -> ft.prefixQuery("prefix", null, randomMockShardContext()));
|
||||
assertEquals("[prefix] queries are not supported on [icu_collation_keyword] fields.", e.getMessage());
|
||||
}
|
||||
|
||||
public void testWildcardQuery() {
|
||||
MappedFieldType ft = createDefaultFieldType();
|
||||
ft.setName("field");
|
||||
ft.setIndexOptions(IndexOptions.DOCS);
|
||||
expectThrows(UnsupportedOperationException.class,
|
||||
() -> ft.wildcardQuery("foo*", null, null));
|
||||
UnsupportedOperationException e = expectThrows(UnsupportedOperationException.class,
|
||||
() -> ft.wildcardQuery("foo*", null, randomMockShardContext()));
|
||||
assertEquals("[wildcard] queries are not supported on [icu_collation_keyword] fields.", e.getMessage());
|
||||
}
|
||||
|
||||
public void testRangeQuery() {
|
||||
|
@ -143,11 +148,16 @@ public class CollationFieldTypeTests extends FieldTypeTestCase {
|
|||
TermRangeQuery expected = new TermRangeQuery("field", new BytesRef(aKey.bytes, 0, aKey.size),
|
||||
new BytesRef(bKey.bytes, 0, bKey.size), false, false);
|
||||
|
||||
assertEquals(expected, ft.rangeQuery("a", "b", false, false, null, null, null, null));
|
||||
assertEquals(expected, ft.rangeQuery("a", "b", false, false, null, null, null, MOCK_QSC));
|
||||
|
||||
ElasticsearchException ee = expectThrows(ElasticsearchException.class,
|
||||
() -> ft.rangeQuery("a", "b", true, true, null, null, null, MOCK_QSC_DISALLOW_EXPENSIVE));
|
||||
assertEquals("[range] queries on [text] or [keyword] fields cannot be executed when " +
|
||||
"'search.allow_expensive_queries' is set to false.", ee.getMessage());
|
||||
|
||||
ft.setIndexOptions(IndexOptions.NONE);
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
|
||||
() -> ft.rangeQuery("a", "b", false, false, null, null, null, null));
|
||||
() -> ft.rangeQuery("a", "b", false, false, null, null, null, MOCK_QSC));
|
||||
assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -55,8 +55,7 @@ class AwsEc2ServiceImpl implements AwsEc2Service {
|
|||
|
||||
// proxy for testing
|
||||
AmazonEC2 buildClient(AWSCredentialsProvider credentials, ClientConfiguration configuration) {
|
||||
final AmazonEC2 client = new AmazonEC2Client(credentials, configuration);
|
||||
return client;
|
||||
return new AmazonEC2Client(credentials, configuration);
|
||||
}
|
||||
|
||||
// pkg private for tests
|
||||
|
|
|
@ -0,0 +1,214 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.discovery.ec2;
|
||||
|
||||
import com.amazonaws.services.ec2.model.Instance;
|
||||
import com.amazonaws.services.ec2.model.Tag;
|
||||
import com.sun.net.httpserver.HttpServer;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.network.InetAddresses;
|
||||
import org.elasticsearch.common.network.NetworkService;
|
||||
import org.elasticsearch.common.settings.MockSecureSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.core.internal.io.IOUtils;
|
||||
import org.elasticsearch.mocksocket.MockHttpServer;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.transport.MockTransportService;
|
||||
import org.elasticsearch.threadpool.TestThreadPool;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
import javax.xml.XMLConstants;
|
||||
import javax.xml.stream.XMLOutputFactory;
|
||||
import javax.xml.stream.XMLStreamWriter;
|
||||
|
||||
import java.io.StringWriter;
|
||||
import java.net.InetAddress;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
|
||||
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||
|
||||
@SuppressForbidden(reason = "use a http server")
|
||||
public abstract class AbstractEC2MockAPITestCase extends ESTestCase {
|
||||
|
||||
protected HttpServer httpServer;
|
||||
|
||||
protected ThreadPool threadPool;
|
||||
|
||||
protected MockTransportService transportService;
|
||||
|
||||
protected NetworkService networkService = new NetworkService(Collections.emptyList());
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
httpServer = MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0);
|
||||
httpServer.start();
|
||||
threadPool = new TestThreadPool(EC2RetriesTests.class.getName());
|
||||
transportService = createTransportService();
|
||||
super.setUp();
|
||||
}
|
||||
|
||||
protected abstract MockTransportService createTransportService();
|
||||
|
||||
protected Settings buildSettings(String accessKey) {
|
||||
final InetSocketAddress address = httpServer.getAddress();
|
||||
final String endpoint = "http://" + InetAddresses.toUriString(address.getAddress()) + ":" + address.getPort();
|
||||
final MockSecureSettings mockSecure = new MockSecureSettings();
|
||||
mockSecure.setString(Ec2ClientSettings.ACCESS_KEY_SETTING.getKey(), accessKey);
|
||||
mockSecure.setString(Ec2ClientSettings.SECRET_KEY_SETTING.getKey(), "ec2_secret");
|
||||
return Settings.builder().put(Ec2ClientSettings.ENDPOINT_SETTING.getKey(), endpoint).setSecureSettings(mockSecure).build();
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
try {
|
||||
IOUtils.close(transportService, () -> terminate(threadPool), () -> httpServer.stop(0));
|
||||
} finally {
|
||||
super.tearDown();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates a XML response that describe the EC2 instances
|
||||
* TODO: org.elasticsearch.discovery.ec2.AmazonEC2Fixture uses pretty much the same code. We should dry up that test fixture.
|
||||
*/
|
||||
static byte[] generateDescribeInstancesResponse(List<Instance> instances) {
|
||||
final XMLOutputFactory xmlOutputFactory = XMLOutputFactory.newFactory();
|
||||
xmlOutputFactory.setProperty(XMLOutputFactory.IS_REPAIRING_NAMESPACES, true);
|
||||
|
||||
final StringWriter out = new StringWriter();
|
||||
XMLStreamWriter sw;
|
||||
try {
|
||||
sw = xmlOutputFactory.createXMLStreamWriter(out);
|
||||
sw.writeStartDocument();
|
||||
|
||||
String namespace = "http://ec2.amazonaws.com/doc/2013-02-01/";
|
||||
sw.setDefaultNamespace(namespace);
|
||||
sw.writeStartElement(XMLConstants.DEFAULT_NS_PREFIX, "DescribeInstancesResponse", namespace);
|
||||
{
|
||||
sw.writeStartElement("requestId");
|
||||
sw.writeCharacters(UUID.randomUUID().toString());
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("reservationSet");
|
||||
{
|
||||
for (Instance instance : instances) {
|
||||
sw.writeStartElement("item");
|
||||
{
|
||||
sw.writeStartElement("reservationId");
|
||||
sw.writeCharacters(UUID.randomUUID().toString());
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("instancesSet");
|
||||
{
|
||||
sw.writeStartElement("item");
|
||||
{
|
||||
sw.writeStartElement("instanceId");
|
||||
sw.writeCharacters(instance.getInstanceId());
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("imageId");
|
||||
sw.writeCharacters(instance.getImageId());
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("instanceState");
|
||||
{
|
||||
sw.writeStartElement("code");
|
||||
sw.writeCharacters("16");
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("name");
|
||||
sw.writeCharacters("running");
|
||||
sw.writeEndElement();
|
||||
}
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("privateDnsName");
|
||||
sw.writeCharacters(instance.getPrivateDnsName());
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("dnsName");
|
||||
sw.writeCharacters(instance.getPublicDnsName());
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("instanceType");
|
||||
sw.writeCharacters("m1.medium");
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("placement");
|
||||
{
|
||||
sw.writeStartElement("availabilityZone");
|
||||
sw.writeCharacters("use-east-1e");
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeEmptyElement("groupName");
|
||||
|
||||
sw.writeStartElement("tenancy");
|
||||
sw.writeCharacters("default");
|
||||
sw.writeEndElement();
|
||||
}
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("privateIpAddress");
|
||||
sw.writeCharacters(instance.getPrivateIpAddress());
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("ipAddress");
|
||||
sw.writeCharacters(instance.getPublicIpAddress());
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("tagSet");
|
||||
for (Tag tag : instance.getTags()) {
|
||||
sw.writeStartElement("item");
|
||||
{
|
||||
sw.writeStartElement("key");
|
||||
sw.writeCharacters(tag.getKey());
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("value");
|
||||
sw.writeCharacters(tag.getValue());
|
||||
sw.writeEndElement();
|
||||
}
|
||||
sw.writeEndElement();
|
||||
}
|
||||
sw.writeEndElement();
|
||||
}
|
||||
sw.writeEndElement();
|
||||
}
|
||||
sw.writeEndElement();
|
||||
}
|
||||
sw.writeEndElement();
|
||||
}
|
||||
sw.writeEndElement();
|
||||
}
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeEndDocument();
|
||||
sw.flush();
|
||||
}
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
return out.toString().getBytes(UTF_8);
|
||||
}
|
||||
}
|
|
@ -1,179 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.discovery.ec2;
|
||||
|
||||
import com.amazonaws.AmazonClientException;
|
||||
import com.amazonaws.ClientConfiguration;
|
||||
import com.amazonaws.auth.AWSCredentialsProvider;
|
||||
import com.amazonaws.services.ec2.AbstractAmazonEC2;
|
||||
import com.amazonaws.services.ec2.model.DescribeInstancesRequest;
|
||||
import com.amazonaws.services.ec2.model.DescribeInstancesResult;
|
||||
import com.amazonaws.services.ec2.model.Filter;
|
||||
import com.amazonaws.services.ec2.model.Instance;
|
||||
import com.amazonaws.services.ec2.model.InstanceState;
|
||||
import com.amazonaws.services.ec2.model.InstanceStateName;
|
||||
import com.amazonaws.services.ec2.model.Reservation;
|
||||
import com.amazonaws.services.ec2.model.Tag;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
public class AmazonEC2Mock extends AbstractAmazonEC2 {
|
||||
|
||||
private static final Logger logger = LogManager.getLogger(AmazonEC2Mock.class);
|
||||
|
||||
public static final String PREFIX_PRIVATE_IP = "10.0.0.";
|
||||
public static final String PREFIX_PUBLIC_IP = "8.8.8.";
|
||||
public static final String PREFIX_PUBLIC_DNS = "mock-ec2-";
|
||||
public static final String SUFFIX_PUBLIC_DNS = ".amazon.com";
|
||||
public static final String PREFIX_PRIVATE_DNS = "mock-ip-";
|
||||
public static final String SUFFIX_PRIVATE_DNS = ".ec2.internal";
|
||||
|
||||
final List<Instance> instances = new ArrayList<>();
|
||||
String endpoint;
|
||||
final AWSCredentialsProvider credentials;
|
||||
final ClientConfiguration configuration;
|
||||
|
||||
public AmazonEC2Mock(int nodes, List<List<Tag>> tagsList, AWSCredentialsProvider credentials, ClientConfiguration configuration) {
|
||||
if (tagsList != null) {
|
||||
assert tagsList.size() == nodes;
|
||||
}
|
||||
|
||||
for (int node = 1; node < nodes + 1; node++) {
|
||||
String instanceId = "node" + node;
|
||||
|
||||
Instance instance = new Instance()
|
||||
.withInstanceId(instanceId)
|
||||
.withState(new InstanceState().withName(InstanceStateName.Running))
|
||||
.withPrivateDnsName(PREFIX_PRIVATE_DNS + instanceId + SUFFIX_PRIVATE_DNS)
|
||||
.withPublicDnsName(PREFIX_PUBLIC_DNS + instanceId + SUFFIX_PUBLIC_DNS)
|
||||
.withPrivateIpAddress(PREFIX_PRIVATE_IP + node)
|
||||
.withPublicIpAddress(PREFIX_PUBLIC_IP + node);
|
||||
|
||||
if (tagsList != null) {
|
||||
instance.setTags(tagsList.get(node-1));
|
||||
}
|
||||
|
||||
instances.add(instance);
|
||||
}
|
||||
this.credentials = credentials;
|
||||
this.configuration = configuration;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DescribeInstancesResult describeInstances(DescribeInstancesRequest describeInstancesRequest)
|
||||
throws AmazonClientException {
|
||||
Collection<Instance> filteredInstances = new ArrayList<>();
|
||||
|
||||
logger.debug("--> mocking describeInstances");
|
||||
|
||||
for (Instance instance : instances) {
|
||||
boolean tagFiltered = false;
|
||||
boolean instanceFound = false;
|
||||
|
||||
Map<String, List<String>> expectedTags = new HashMap<>();
|
||||
Map<String, List<String>> instanceTags = new HashMap<>();
|
||||
|
||||
for (Tag tag : instance.getTags()) {
|
||||
List<String> tags = instanceTags.get(tag.getKey());
|
||||
if (tags == null) {
|
||||
tags = new ArrayList<>();
|
||||
instanceTags.put(tag.getKey(), tags);
|
||||
}
|
||||
tags.add(tag.getValue());
|
||||
}
|
||||
|
||||
for (Filter filter : describeInstancesRequest.getFilters()) {
|
||||
// If we have the same tag name and one of the values, we add the instance
|
||||
if (filter.getName().startsWith("tag:")) {
|
||||
tagFiltered = true;
|
||||
String tagName = filter.getName().substring(4);
|
||||
// if we have more than one value for the same key, then the key is appended with .x
|
||||
Pattern p = Pattern.compile("\\.\\d+", Pattern.DOTALL);
|
||||
Matcher m = p.matcher(tagName);
|
||||
if (m.find()) {
|
||||
int i = tagName.lastIndexOf(".");
|
||||
tagName = tagName.substring(0, i);
|
||||
}
|
||||
|
||||
List<String> tags = expectedTags.get(tagName);
|
||||
if (tags == null) {
|
||||
tags = new ArrayList<>();
|
||||
expectedTags.put(tagName, tags);
|
||||
}
|
||||
tags.addAll(filter.getValues());
|
||||
}
|
||||
}
|
||||
|
||||
if (tagFiltered) {
|
||||
logger.debug("--> expected tags: [{}]", expectedTags);
|
||||
logger.debug("--> instance tags: [{}]", instanceTags);
|
||||
|
||||
instanceFound = true;
|
||||
for (Map.Entry<String, List<String>> expectedTagsEntry : expectedTags.entrySet()) {
|
||||
List<String> instanceTagValues = instanceTags.get(expectedTagsEntry.getKey());
|
||||
if (instanceTagValues == null) {
|
||||
instanceFound = false;
|
||||
break;
|
||||
}
|
||||
|
||||
for (String expectedValue : expectedTagsEntry.getValue()) {
|
||||
boolean valueFound = false;
|
||||
for (String instanceTagValue : instanceTagValues) {
|
||||
if (instanceTagValue.equals(expectedValue)) {
|
||||
valueFound = true;
|
||||
}
|
||||
}
|
||||
if (valueFound == false) {
|
||||
instanceFound = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (tagFiltered == false || instanceFound) {
|
||||
logger.debug("--> instance added");
|
||||
filteredInstances.add(instance);
|
||||
} else {
|
||||
logger.debug("--> instance filtered");
|
||||
}
|
||||
}
|
||||
|
||||
return new DescribeInstancesResult().withReservations(
|
||||
new Reservation().withInstances(filteredInstances)
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setEndpoint(String endpoint) throws IllegalArgumentException {
|
||||
this.endpoint = endpoint;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void shutdown() {
|
||||
}
|
||||
}
|
|
@ -1,44 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.discovery.ec2;
|
||||
|
||||
import com.amazonaws.ClientConfiguration;
|
||||
import com.amazonaws.auth.AWSCredentialsProvider;
|
||||
import com.amazonaws.services.ec2.AmazonEC2;
|
||||
import com.amazonaws.services.ec2.model.Tag;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public class AwsEc2ServiceMock extends AwsEc2ServiceImpl {
|
||||
|
||||
private final int nodes;
|
||||
private final List<List<Tag>> tagsList;
|
||||
|
||||
public AwsEc2ServiceMock(int nodes, List<List<Tag>> tagsList) {
|
||||
this.nodes = nodes;
|
||||
this.tagsList = tagsList;
|
||||
}
|
||||
|
||||
@Override
|
||||
AmazonEC2 buildClient(AWSCredentialsProvider credentials, ClientConfiguration configuration) {
|
||||
return new AmazonEC2Mock(nodes, tagsList, credentials, configuration);
|
||||
}
|
||||
|
||||
}
|
|
@ -20,7 +20,7 @@
|
|||
package org.elasticsearch.discovery.ec2;
|
||||
|
||||
import com.amazonaws.http.HttpMethodName;
|
||||
import com.sun.net.httpserver.HttpServer;
|
||||
import com.amazonaws.services.ec2.model.Instance;
|
||||
import org.apache.http.HttpStatus;
|
||||
import org.apache.http.NameValuePair;
|
||||
import org.apache.http.client.utils.URLEncodedUtils;
|
||||
|
@ -28,82 +28,41 @@ import org.elasticsearch.Version;
|
|||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.network.InetAddresses;
|
||||
import org.elasticsearch.common.network.NetworkService;
|
||||
import org.elasticsearch.common.settings.MockSecureSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.common.util.PageCacheRecycler;
|
||||
import org.elasticsearch.core.internal.io.IOUtils;
|
||||
import org.elasticsearch.discovery.SeedHostsProvider;
|
||||
import org.elasticsearch.discovery.SeedHostsResolver;
|
||||
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
|
||||
import org.elasticsearch.mocksocket.MockHttpServer;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.transport.MockTransportService;
|
||||
import org.elasticsearch.threadpool.TestThreadPool;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.transport.nio.MockNioTransport;
|
||||
import org.hamcrest.Matchers;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
import javax.xml.XMLConstants;
|
||||
import javax.xml.stream.XMLOutputFactory;
|
||||
import javax.xml.stream.XMLStreamWriter;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.StringWriter;
|
||||
import java.net.InetAddress;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||
import static org.hamcrest.Matchers.aMapWithSize;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
@SuppressForbidden(reason = "use a http server")
|
||||
public class EC2RetriesTests extends ESTestCase {
|
||||
public class EC2RetriesTests extends AbstractEC2MockAPITestCase {
|
||||
|
||||
private HttpServer httpServer;
|
||||
|
||||
private ThreadPool threadPool;
|
||||
|
||||
private MockTransportService transportService;
|
||||
|
||||
private NetworkService networkService = new NetworkService(Collections.emptyList());
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
httpServer = MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0);
|
||||
httpServer.start();
|
||||
threadPool = new TestThreadPool(EC2RetriesTests.class.getName());
|
||||
final MockNioTransport transport = new MockNioTransport(Settings.EMPTY, Version.CURRENT, threadPool, networkService,
|
||||
@Override
|
||||
protected MockTransportService createTransportService() {
|
||||
return new MockTransportService(Settings.EMPTY, new MockNioTransport(Settings.EMPTY, Version.CURRENT, threadPool, networkService,
|
||||
PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(Collections.emptyList()),
|
||||
new NoneCircuitBreakerService());
|
||||
transportService =
|
||||
new MockTransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, null);
|
||||
super.setUp();
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
try {
|
||||
IOUtils.close(transportService, () -> terminate(threadPool), () -> httpServer.stop(0));
|
||||
} finally {
|
||||
super.tearDown();
|
||||
}
|
||||
new NoneCircuitBreakerService()), threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, null);
|
||||
}
|
||||
|
||||
public void testEC2DiscoveryRetriesOnRateLimiting() throws IOException {
|
||||
assumeFalse("https://github.com/elastic/elasticsearch/issues/51685", inFipsJvm());
|
||||
final String accessKey = "ec2_access";
|
||||
final List<String> hosts = Collections.singletonList("127.0.0.1:9000");
|
||||
final List<String> hosts = Collections.singletonList("127.0.0.1:9300");
|
||||
final Map<String, Integer> failedRequests = new ConcurrentHashMap<>();
|
||||
// retry the same request 5 times at most
|
||||
final int maxRetries = randomIntBetween(1, 5);
|
||||
|
@ -125,7 +84,8 @@ public class EC2RetriesTests extends ESTestCase {
|
|||
byte[] responseBody = null;
|
||||
for (NameValuePair parse : URLEncodedUtils.parse(request, UTF_8)) {
|
||||
if ("Action".equals(parse.getName())) {
|
||||
responseBody = generateDescribeInstancesResponse(hosts);
|
||||
responseBody = generateDescribeInstancesResponse(hosts.stream().map(
|
||||
address -> new Instance().withPublicIpAddress(address)).collect(Collectors.toList()));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -138,14 +98,7 @@ public class EC2RetriesTests extends ESTestCase {
|
|||
}
|
||||
fail("did not send response");
|
||||
});
|
||||
|
||||
final InetSocketAddress address = httpServer.getAddress();
|
||||
final String endpoint = "http://" + InetAddresses.toUriString(address.getAddress()) + ":" + address.getPort();
|
||||
final MockSecureSettings mockSecure = new MockSecureSettings();
|
||||
mockSecure.setString(Ec2ClientSettings.ACCESS_KEY_SETTING.getKey(), accessKey);
|
||||
mockSecure.setString(Ec2ClientSettings.SECRET_KEY_SETTING.getKey(), "ec2_secret");
|
||||
try (Ec2DiscoveryPlugin plugin = new Ec2DiscoveryPlugin(
|
||||
Settings.builder().put(Ec2ClientSettings.ENDPOINT_SETTING.getKey(), endpoint).setSecureSettings(mockSecure).build())) {
|
||||
try (Ec2DiscoveryPlugin plugin = new Ec2DiscoveryPlugin(buildSettings(accessKey))) {
|
||||
final SeedHostsProvider seedHostsProvider = plugin.getSeedHostProviders(transportService, networkService).get("ec2").get();
|
||||
final SeedHostsResolver resolver = new SeedHostsResolver("test", Settings.EMPTY, transportService, seedHostsProvider);
|
||||
resolver.start();
|
||||
|
@ -156,112 +109,4 @@ public class EC2RetriesTests extends ESTestCase {
|
|||
assertThat(failedRequests.values().iterator().next(), is(maxRetries));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates a XML response that describe the EC2 instances
|
||||
* TODO: org.elasticsearch.discovery.ec2.AmazonEC2Fixture uses pretty much the same code. We should dry up that test fixture.
|
||||
*/
|
||||
private byte[] generateDescribeInstancesResponse(List<String> nodes) {
|
||||
final XMLOutputFactory xmlOutputFactory = XMLOutputFactory.newFactory();
|
||||
xmlOutputFactory.setProperty(XMLOutputFactory.IS_REPAIRING_NAMESPACES, true);
|
||||
|
||||
final StringWriter out = new StringWriter();
|
||||
XMLStreamWriter sw;
|
||||
try {
|
||||
sw = xmlOutputFactory.createXMLStreamWriter(out);
|
||||
sw.writeStartDocument();
|
||||
|
||||
String namespace = "http://ec2.amazonaws.com/doc/2013-02-01/";
|
||||
sw.setDefaultNamespace(namespace);
|
||||
sw.writeStartElement(XMLConstants.DEFAULT_NS_PREFIX, "DescribeInstancesResponse", namespace);
|
||||
{
|
||||
sw.writeStartElement("requestId");
|
||||
sw.writeCharacters(UUID.randomUUID().toString());
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("reservationSet");
|
||||
{
|
||||
for (String address : nodes) {
|
||||
sw.writeStartElement("item");
|
||||
{
|
||||
sw.writeStartElement("reservationId");
|
||||
sw.writeCharacters(UUID.randomUUID().toString());
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("instancesSet");
|
||||
{
|
||||
sw.writeStartElement("item");
|
||||
{
|
||||
sw.writeStartElement("instanceId");
|
||||
sw.writeCharacters(UUID.randomUUID().toString());
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("imageId");
|
||||
sw.writeCharacters(UUID.randomUUID().toString());
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("instanceState");
|
||||
{
|
||||
sw.writeStartElement("code");
|
||||
sw.writeCharacters("16");
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("name");
|
||||
sw.writeCharacters("running");
|
||||
sw.writeEndElement();
|
||||
}
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("privateDnsName");
|
||||
sw.writeCharacters(address);
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("dnsName");
|
||||
sw.writeCharacters(address);
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("instanceType");
|
||||
sw.writeCharacters("m1.medium");
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("placement");
|
||||
{
|
||||
sw.writeStartElement("availabilityZone");
|
||||
sw.writeCharacters("use-east-1e");
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeEmptyElement("groupName");
|
||||
|
||||
sw.writeStartElement("tenancy");
|
||||
sw.writeCharacters("default");
|
||||
sw.writeEndElement();
|
||||
}
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("privateIpAddress");
|
||||
sw.writeCharacters(address);
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("ipAddress");
|
||||
sw.writeCharacters(address);
|
||||
sw.writeEndElement();
|
||||
}
|
||||
sw.writeEndElement();
|
||||
}
|
||||
sw.writeEndElement();
|
||||
}
|
||||
sw.writeEndElement();
|
||||
}
|
||||
sw.writeEndElement();
|
||||
}
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeEndDocument();
|
||||
sw.flush();
|
||||
}
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
return out.toString().getBytes(UTF_8);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,38 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.discovery.ec2;
|
||||
|
||||
import com.amazonaws.services.ec2.model.Tag;
|
||||
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public class Ec2DiscoveryPluginMock extends Ec2DiscoveryPlugin {
|
||||
|
||||
Ec2DiscoveryPluginMock(Settings settings) {
|
||||
this(settings, 1, null);
|
||||
}
|
||||
|
||||
public Ec2DiscoveryPluginMock(Settings settings, int nodes, List<List<Tag>> tagsList) {
|
||||
super(settings, new AwsEc2ServiceMock(nodes, tagsList));
|
||||
}
|
||||
|
||||
}
|
|
@ -19,9 +19,13 @@
|
|||
|
||||
package org.elasticsearch.discovery.ec2;
|
||||
|
||||
import com.amazonaws.ClientConfiguration;
|
||||
import com.amazonaws.auth.AWSCredentials;
|
||||
import com.amazonaws.auth.AWSCredentialsProvider;
|
||||
import com.amazonaws.auth.BasicAWSCredentials;
|
||||
import com.amazonaws.auth.BasicSessionCredentials;
|
||||
import com.amazonaws.services.ec2.AbstractAmazonEC2;
|
||||
import com.amazonaws.services.ec2.AmazonEC2;
|
||||
import org.elasticsearch.common.settings.MockSecureSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.node.Node;
|
||||
|
@ -189,4 +193,37 @@ public class Ec2DiscoveryPluginTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static class Ec2DiscoveryPluginMock extends Ec2DiscoveryPlugin {
|
||||
|
||||
Ec2DiscoveryPluginMock(Settings settings) {
|
||||
super(settings, new AwsEc2ServiceImpl() {
|
||||
@Override
|
||||
AmazonEC2 buildClient(AWSCredentialsProvider credentials, ClientConfiguration configuration) {
|
||||
return new AmazonEC2Mock(credentials, configuration);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
private static class AmazonEC2Mock extends AbstractAmazonEC2 {
|
||||
|
||||
String endpoint;
|
||||
final AWSCredentialsProvider credentials;
|
||||
final ClientConfiguration configuration;
|
||||
|
||||
AmazonEC2Mock(AWSCredentialsProvider credentials, ClientConfiguration configuration) {
|
||||
this.credentials = credentials;
|
||||
this.configuration = configuration;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setEndpoint(String endpoint) throws IllegalArgumentException {
|
||||
this.endpoint = endpoint;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void shutdown() {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,71 +19,68 @@
|
|||
|
||||
package org.elasticsearch.discovery.ec2;
|
||||
|
||||
import com.amazonaws.http.HttpMethodName;
|
||||
import com.amazonaws.services.ec2.model.Instance;
|
||||
import com.amazonaws.services.ec2.model.InstanceState;
|
||||
import com.amazonaws.services.ec2.model.InstanceStateName;
|
||||
import com.amazonaws.services.ec2.model.Tag;
|
||||
import org.apache.http.HttpStatus;
|
||||
import org.apache.http.NameValuePair;
|
||||
import org.apache.http.client.utils.URLEncodedUtils;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.common.network.NetworkService;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.common.util.PageCacheRecycler;
|
||||
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.transport.MockTransportService;
|
||||
import org.elasticsearch.threadpool.TestThreadPool;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.Transport;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.transport.nio.MockNioTransport;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.IntStream;
|
||||
|
||||
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
public class Ec2DiscoveryTests extends ESTestCase {
|
||||
@SuppressForbidden(reason = "use a http server")
|
||||
public class Ec2DiscoveryTests extends AbstractEC2MockAPITestCase {
|
||||
|
||||
private static final String SUFFIX_PRIVATE_DNS = ".ec2.internal";
|
||||
private static final String PREFIX_PRIVATE_DNS = "mock-ip-";
|
||||
private static final String SUFFIX_PUBLIC_DNS = ".amazon.com";
|
||||
private static final String PREFIX_PUBLIC_DNS = "mock-ec2-";
|
||||
private static final String PREFIX_PUBLIC_IP = "8.8.8.";
|
||||
private static final String PREFIX_PRIVATE_IP = "10.0.0.";
|
||||
|
||||
protected static ThreadPool threadPool;
|
||||
protected MockTransportService transportService;
|
||||
private Map<String, TransportAddress> poorMansDNS = new ConcurrentHashMap<>();
|
||||
|
||||
@BeforeClass
|
||||
public static void createThreadPool() {
|
||||
threadPool = new TestThreadPool(Ec2DiscoveryTests.class.getName());
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void stopThreadPool() throws InterruptedException {
|
||||
if (threadPool !=null) {
|
||||
terminate(threadPool);
|
||||
threadPool = null;
|
||||
}
|
||||
}
|
||||
|
||||
@Before
|
||||
public void createTransportService() {
|
||||
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList());
|
||||
protected MockTransportService createTransportService() {
|
||||
final Transport transport = new MockNioTransport(Settings.EMPTY, Version.CURRENT, threadPool,
|
||||
new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, namedWriteableRegistry,
|
||||
new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, writableRegistry(),
|
||||
new NoneCircuitBreakerService()) {
|
||||
@Override
|
||||
public TransportAddress[] addressesFromString(String address) throws UnknownHostException {
|
||||
public TransportAddress[] addressesFromString(String address) {
|
||||
// we just need to ensure we don't resolve DNS here
|
||||
return new TransportAddress[] {poorMansDNS.getOrDefault(address, buildNewFakeTransportAddress())};
|
||||
}
|
||||
};
|
||||
transportService = new MockTransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
|
||||
null);
|
||||
return new MockTransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, null);
|
||||
}
|
||||
|
||||
protected List<TransportAddress> buildDynamicHosts(Settings nodeSettings, int nodes) {
|
||||
|
@ -91,8 +88,65 @@ public class Ec2DiscoveryTests extends ESTestCase {
|
|||
}
|
||||
|
||||
protected List<TransportAddress> buildDynamicHosts(Settings nodeSettings, int nodes, List<List<Tag>> tagsList) {
|
||||
try (Ec2DiscoveryPluginMock plugin = new Ec2DiscoveryPluginMock(Settings.EMPTY, nodes, tagsList)) {
|
||||
final String accessKey = "ec2_key";
|
||||
try (Ec2DiscoveryPlugin plugin = new Ec2DiscoveryPlugin(buildSettings(accessKey))) {
|
||||
AwsEc2SeedHostsProvider provider = new AwsEc2SeedHostsProvider(nodeSettings, transportService, plugin.ec2Service);
|
||||
httpServer.createContext("/", exchange -> {
|
||||
if (exchange.getRequestMethod().equals(HttpMethodName.POST.name())) {
|
||||
final String request = Streams.readFully(exchange.getRequestBody()).toBytesRef().utf8ToString();
|
||||
final String userAgent = exchange.getRequestHeaders().getFirst("User-Agent");
|
||||
if (userAgent != null && userAgent.startsWith("aws-sdk-java")) {
|
||||
final String auth = exchange.getRequestHeaders().getFirst("Authorization");
|
||||
if (auth == null || auth.contains(accessKey) == false) {
|
||||
throw new IllegalArgumentException("wrong access key: " + auth);
|
||||
}
|
||||
// Simulate an EC2 DescribeInstancesResponse
|
||||
final Map<String, List<String>> tagsIncluded = new HashMap<>();
|
||||
final String[] params = request.split("&");
|
||||
Arrays.stream(params).filter(entry -> entry.startsWith("Filter.") && entry.contains("=tag%3A"))
|
||||
.forEach(entry -> {
|
||||
final int startIndex = "Filter.".length();
|
||||
final int filterId = Integer.parseInt(entry.substring(startIndex, entry.indexOf(".", startIndex)));
|
||||
tagsIncluded.put(entry.substring(entry.indexOf("=tag%3A") + "=tag%3A".length()),
|
||||
Arrays.stream(params)
|
||||
.filter(param -> param.startsWith("Filter." + filterId + ".Value."))
|
||||
.map(param -> param.substring(param.indexOf("=") + 1))
|
||||
.collect(Collectors.toList()));
|
||||
}
|
||||
);
|
||||
final List<Instance> instances = IntStream.range(1, nodes + 1).mapToObj(node -> {
|
||||
final String instanceId = "node" + node;
|
||||
final Instance instance = new Instance()
|
||||
.withInstanceId(instanceId)
|
||||
.withState(new InstanceState().withName(InstanceStateName.Running))
|
||||
.withPrivateDnsName(PREFIX_PRIVATE_DNS + instanceId + SUFFIX_PRIVATE_DNS)
|
||||
.withPublicDnsName(PREFIX_PUBLIC_DNS + instanceId + SUFFIX_PUBLIC_DNS)
|
||||
.withPrivateIpAddress(PREFIX_PRIVATE_IP + node)
|
||||
.withPublicIpAddress(PREFIX_PUBLIC_IP + node);
|
||||
if (tagsList != null) {
|
||||
instance.setTags(tagsList.get(node - 1));
|
||||
}
|
||||
return instance;
|
||||
}).filter(instance ->
|
||||
tagsIncluded.entrySet().stream().allMatch(entry -> instance.getTags().stream()
|
||||
.filter(t -> t.getKey().equals(entry.getKey()))
|
||||
.map(Tag::getValue)
|
||||
.collect(Collectors.toList())
|
||||
.containsAll(entry.getValue())))
|
||||
.collect(Collectors.toList());
|
||||
for (NameValuePair parse : URLEncodedUtils.parse(request, UTF_8)) {
|
||||
if ("Action".equals(parse.getName())) {
|
||||
final byte[] responseBody = generateDescribeInstancesResponse(instances);
|
||||
exchange.getResponseHeaders().set("Content-Type", "text/xml; charset=UTF-8");
|
||||
exchange.sendResponseHeaders(HttpStatus.SC_OK, responseBody.length);
|
||||
exchange.getResponseBody().write(responseBody);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
fail("did not send response");
|
||||
});
|
||||
List<TransportAddress> dynamicHosts = provider.getSeedAddresses(null);
|
||||
logger.debug("--> addresses found: {}", dynamicHosts);
|
||||
return dynamicHosts;
|
||||
|
@ -113,7 +167,7 @@ public class Ec2DiscoveryTests extends ESTestCase {
|
|||
public void testPrivateIp() throws InterruptedException {
|
||||
int nodes = randomInt(10);
|
||||
for (int i = 0; i < nodes; i++) {
|
||||
poorMansDNS.put(AmazonEC2Mock.PREFIX_PRIVATE_IP + (i+1), buildNewFakeTransportAddress());
|
||||
poorMansDNS.put(PREFIX_PRIVATE_IP + (i+1), buildNewFakeTransportAddress());
|
||||
}
|
||||
Settings nodeSettings = Settings.builder()
|
||||
.put(AwsEc2Service.HOST_TYPE_SETTING.getKey(), "private_ip")
|
||||
|
@ -123,7 +177,7 @@ public class Ec2DiscoveryTests extends ESTestCase {
|
|||
// We check that we are using here expected address
|
||||
int node = 1;
|
||||
for (TransportAddress address : transportAddresses) {
|
||||
TransportAddress expected = poorMansDNS.get(AmazonEC2Mock.PREFIX_PRIVATE_IP + node++);
|
||||
TransportAddress expected = poorMansDNS.get(PREFIX_PRIVATE_IP + node++);
|
||||
assertEquals(address, expected);
|
||||
}
|
||||
}
|
||||
|
@ -131,7 +185,7 @@ public class Ec2DiscoveryTests extends ESTestCase {
|
|||
public void testPublicIp() throws InterruptedException {
|
||||
int nodes = randomInt(10);
|
||||
for (int i = 0; i < nodes; i++) {
|
||||
poorMansDNS.put(AmazonEC2Mock.PREFIX_PUBLIC_IP + (i+1), buildNewFakeTransportAddress());
|
||||
poorMansDNS.put(PREFIX_PUBLIC_IP + (i+1), buildNewFakeTransportAddress());
|
||||
}
|
||||
Settings nodeSettings = Settings.builder()
|
||||
.put(AwsEc2Service.HOST_TYPE_SETTING.getKey(), "public_ip")
|
||||
|
@ -141,7 +195,7 @@ public class Ec2DiscoveryTests extends ESTestCase {
|
|||
// We check that we are using here expected address
|
||||
int node = 1;
|
||||
for (TransportAddress address : dynamicHosts) {
|
||||
TransportAddress expected = poorMansDNS.get(AmazonEC2Mock.PREFIX_PUBLIC_IP + node++);
|
||||
TransportAddress expected = poorMansDNS.get(PREFIX_PUBLIC_IP + node++);
|
||||
assertEquals(address, expected);
|
||||
}
|
||||
}
|
||||
|
@ -150,8 +204,8 @@ public class Ec2DiscoveryTests extends ESTestCase {
|
|||
int nodes = randomInt(10);
|
||||
for (int i = 0; i < nodes; i++) {
|
||||
String instanceId = "node" + (i+1);
|
||||
poorMansDNS.put(AmazonEC2Mock.PREFIX_PRIVATE_DNS + instanceId +
|
||||
AmazonEC2Mock.SUFFIX_PRIVATE_DNS, buildNewFakeTransportAddress());
|
||||
poorMansDNS.put(PREFIX_PRIVATE_DNS + instanceId +
|
||||
SUFFIX_PRIVATE_DNS, buildNewFakeTransportAddress());
|
||||
}
|
||||
Settings nodeSettings = Settings.builder()
|
||||
.put(AwsEc2Service.HOST_TYPE_SETTING.getKey(), "private_dns")
|
||||
|
@ -163,7 +217,7 @@ public class Ec2DiscoveryTests extends ESTestCase {
|
|||
for (TransportAddress address : dynamicHosts) {
|
||||
String instanceId = "node" + node++;
|
||||
TransportAddress expected = poorMansDNS.get(
|
||||
AmazonEC2Mock.PREFIX_PRIVATE_DNS + instanceId + AmazonEC2Mock.SUFFIX_PRIVATE_DNS);
|
||||
PREFIX_PRIVATE_DNS + instanceId + SUFFIX_PRIVATE_DNS);
|
||||
assertEquals(address, expected);
|
||||
}
|
||||
}
|
||||
|
@ -172,8 +226,8 @@ public class Ec2DiscoveryTests extends ESTestCase {
|
|||
int nodes = randomInt(10);
|
||||
for (int i = 0; i < nodes; i++) {
|
||||
String instanceId = "node" + (i+1);
|
||||
poorMansDNS.put(AmazonEC2Mock.PREFIX_PUBLIC_DNS + instanceId
|
||||
+ AmazonEC2Mock.SUFFIX_PUBLIC_DNS, buildNewFakeTransportAddress());
|
||||
poorMansDNS.put(PREFIX_PUBLIC_DNS + instanceId
|
||||
+ SUFFIX_PUBLIC_DNS, buildNewFakeTransportAddress());
|
||||
}
|
||||
Settings nodeSettings = Settings.builder()
|
||||
.put(AwsEc2Service.HOST_TYPE_SETTING.getKey(), "public_dns")
|
||||
|
@ -185,7 +239,7 @@ public class Ec2DiscoveryTests extends ESTestCase {
|
|||
for (TransportAddress address : dynamicHosts) {
|
||||
String instanceId = "node" + node++;
|
||||
TransportAddress expected = poorMansDNS.get(
|
||||
AmazonEC2Mock.PREFIX_PUBLIC_DNS + instanceId + AmazonEC2Mock.SUFFIX_PUBLIC_DNS);
|
||||
PREFIX_PUBLIC_DNS + instanceId + SUFFIX_PUBLIC_DNS);
|
||||
assertEquals(address, expected);
|
||||
}
|
||||
}
|
||||
|
@ -289,8 +343,7 @@ public class Ec2DiscoveryTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
abstract class DummyEc2SeedHostsProvider extends AwsEc2SeedHostsProvider {
|
||||
abstract static class DummyEc2SeedHostsProvider extends AwsEc2SeedHostsProvider {
|
||||
public int fetchCount = 0;
|
||||
DummyEc2SeedHostsProvider(Settings settings, TransportService transportService, AwsEc2Service service) {
|
||||
super(settings, transportService, service);
|
||||
|
@ -298,7 +351,7 @@ public class Ec2DiscoveryTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testGetNodeListEmptyCache() {
|
||||
AwsEc2Service awsEc2Service = new AwsEc2ServiceMock(1, null);
|
||||
AwsEc2Service awsEc2Service = new AwsEc2ServiceImpl();
|
||||
DummyEc2SeedHostsProvider provider = new DummyEc2SeedHostsProvider(Settings.EMPTY, transportService, awsEc2Service) {
|
||||
@Override
|
||||
protected List<TransportAddress> fetchDynamicNodes() {
|
||||
|
@ -311,27 +364,4 @@ public class Ec2DiscoveryTests extends ESTestCase {
|
|||
}
|
||||
assertThat(provider.fetchCount, is(1));
|
||||
}
|
||||
|
||||
public void testGetNodeListCached() throws Exception {
|
||||
Settings.Builder builder = Settings.builder()
|
||||
.put(AwsEc2Service.NODE_CACHE_TIME_SETTING.getKey(), "500ms");
|
||||
try (Ec2DiscoveryPluginMock plugin = new Ec2DiscoveryPluginMock(Settings.EMPTY)) {
|
||||
DummyEc2SeedHostsProvider provider = new DummyEc2SeedHostsProvider(builder.build(), transportService, plugin.ec2Service) {
|
||||
@Override
|
||||
protected List<TransportAddress> fetchDynamicNodes() {
|
||||
fetchCount++;
|
||||
return Ec2DiscoveryTests.this.buildDynamicHosts(Settings.EMPTY, 1);
|
||||
}
|
||||
};
|
||||
for (int i=0; i<3; i++) {
|
||||
provider.getSeedAddresses(null);
|
||||
}
|
||||
assertThat(provider.fetchCount, is(1));
|
||||
Thread.sleep(1_000L); // wait for cache to expire
|
||||
for (int i=0; i<3; i++) {
|
||||
provider.getSeedAddresses(null);
|
||||
}
|
||||
assertThat(provider.fetchCount, is(2));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,6 +31,7 @@ import java.nio.file.Paths;
|
|||
import java.nio.file.attribute.PosixFileAttributes;
|
||||
import java.nio.file.attribute.PosixFilePermission;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -76,7 +77,12 @@ public class Docker {
|
|||
* @param distribution details about the docker image to potentially load.
|
||||
*/
|
||||
public static void ensureImageIsLoaded(Distribution distribution) {
|
||||
final long count = sh.run("docker image ls --format '{{.Repository}}' " + distribution.flavor.name).stdout.split("\n").length;
|
||||
Shell.Result result = sh.run("docker image ls --format '{{.Repository}}' " + distribution.flavor.name);
|
||||
|
||||
final long count = Arrays.stream(result.stdout.split("\n"))
|
||||
.map(String::trim)
|
||||
.filter(s -> s.isEmpty() == false)
|
||||
.count();
|
||||
|
||||
if (count != 0) {
|
||||
return;
|
||||
|
|
|
@ -84,7 +84,7 @@ public class TestDeprecationHeaderRestAction extends BaseRestHandler {
|
|||
@Override
|
||||
public List<DeprecatedRoute> deprecatedRoutes() {
|
||||
return singletonList(
|
||||
new DeprecatedRoute(GET, "/_test_cluster/deprecated_settings", DEPRECATED_ENDPOINT, deprecationLogger));
|
||||
new DeprecatedRoute(GET, "/_test_cluster/deprecated_settings", DEPRECATED_ENDPOINT));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -85,8 +85,8 @@
|
|||
---
|
||||
"Close index response with result per index":
|
||||
- skip:
|
||||
version: " - 7.99.99"
|
||||
reason: "close index response reports result per index starting version 8.0.0"
|
||||
version: " - 7.2.99"
|
||||
reason: "close index response reports result per index starting version 7.3.0"
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
|
|
|
@ -13,8 +13,8 @@ setup:
|
|||
---
|
||||
"Validate query api":
|
||||
- skip:
|
||||
version: ' - 7.99.99'
|
||||
reason: message changed in 8.0.0
|
||||
version: ' - 7.6.99'
|
||||
reason: message changed in 7.7.0
|
||||
|
||||
- do:
|
||||
indices.validate_query:
|
||||
|
|
|
@ -6,11 +6,11 @@ setup:
|
|||
settings:
|
||||
number_of_replicas: 0
|
||||
mappings:
|
||||
"properties":
|
||||
"number":
|
||||
"type" : "integer"
|
||||
"date":
|
||||
"type" : "date"
|
||||
properties:
|
||||
number:
|
||||
type: integer
|
||||
date:
|
||||
type: date
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: green
|
||||
|
@ -214,7 +214,10 @@ setup:
|
|||
mappings:
|
||||
properties:
|
||||
date:
|
||||
type : date
|
||||
type: date
|
||||
fields:
|
||||
nanos:
|
||||
type: date_nanos
|
||||
|
||||
- do:
|
||||
bulk:
|
||||
|
@ -239,7 +242,24 @@ setup:
|
|||
date_histogram:
|
||||
field: date
|
||||
calendar_interval: month
|
||||
- match: { hits.total.value: 4 }
|
||||
- length: { aggregations.histo.buckets: 3 }
|
||||
- match: { aggregations.histo.buckets.0.key_as_string: "2016-01-01T00:00:00.000Z" }
|
||||
- match: { aggregations.histo.buckets.0.doc_count: 2 }
|
||||
- match: { aggregations.histo.buckets.1.key_as_string: "2016-02-01T00:00:00.000Z" }
|
||||
- match: { aggregations.histo.buckets.1.doc_count: 1 }
|
||||
- match: { aggregations.histo.buckets.2.key_as_string: "2016-03-01T00:00:00.000Z" }
|
||||
- match: { aggregations.histo.buckets.2.doc_count: 1 }
|
||||
|
||||
- do:
|
||||
search:
|
||||
body:
|
||||
size: 0
|
||||
aggs:
|
||||
histo:
|
||||
date_histogram:
|
||||
field: date.nanos
|
||||
calendar_interval: month
|
||||
- match: { hits.total.value: 4 }
|
||||
- length: { aggregations.histo.buckets: 3 }
|
||||
- match: { aggregations.histo.buckets.0.key_as_string: "2016-01-01T00:00:00.000Z" }
|
||||
|
@ -410,3 +430,63 @@ setup:
|
|||
- match: { aggregations.histo.buckets.1.doc_count: 2 }
|
||||
- match: { aggregations.histo.buckets.2.key_as_string: "2016-02-02T00:00:00.000Z" }
|
||||
- match: { aggregations.histo.buckets.2.doc_count: 1 }
|
||||
|
||||
---
|
||||
"date_histogram with pre-epoch daylight savings time transition":
|
||||
- skip:
|
||||
version: " - 7.6.1"
|
||||
reason: bug fixed in 7.6.1.
|
||||
# Add date_nanos to the mapping. We couldn't do it during setup because that
|
||||
# is run against 6.8 which doesn't have date_nanos
|
||||
- do:
|
||||
indices.put_mapping:
|
||||
index: test_1
|
||||
body:
|
||||
properties:
|
||||
number:
|
||||
type: integer
|
||||
date:
|
||||
type: date
|
||||
fields:
|
||||
nanos:
|
||||
type: date_nanos
|
||||
|
||||
- do:
|
||||
bulk:
|
||||
index: test_1
|
||||
refresh: true
|
||||
body:
|
||||
- '{"index": {}}'
|
||||
- '{"date": "2016-01-01"}'
|
||||
|
||||
- do:
|
||||
search:
|
||||
body:
|
||||
size: 0
|
||||
aggs:
|
||||
histo:
|
||||
date_histogram:
|
||||
field: date
|
||||
fixed_interval: 1ms
|
||||
time_zone: America/Phoenix
|
||||
|
||||
- match: { hits.total.value: 1 }
|
||||
- length: { aggregations.histo.buckets: 1 }
|
||||
- match: { aggregations.histo.buckets.0.key_as_string: "2015-12-31T17:00:00.000-07:00" }
|
||||
- match: { aggregations.histo.buckets.0.doc_count: 1 }
|
||||
|
||||
- do:
|
||||
search:
|
||||
body:
|
||||
size: 0
|
||||
aggs:
|
||||
histo:
|
||||
date_histogram:
|
||||
field: date.nanos
|
||||
fixed_interval: 1ms
|
||||
time_zone: America/Phoenix
|
||||
|
||||
- match: { hits.total.value: 1 }
|
||||
- length: { aggregations.histo.buckets: 1 }
|
||||
- match: { aggregations.histo.buckets.0.key_as_string: "2015-12-31T17:00:00.000-07:00" }
|
||||
- match: { aggregations.histo.buckets.0.doc_count: 1 }
|
||||
|
|
|
@ -775,7 +775,7 @@ setup:
|
|||
---
|
||||
"Mixed ip and unmapped fields":
|
||||
- skip:
|
||||
version: " - 7.99.99"
|
||||
version: " - 7.5.99"
|
||||
reason: This will fail against 7.x until the fix is backported there
|
||||
# It is important that the index *without* the ip field be sorted *before*
|
||||
# the index *with* the ip field because that has caused bugs in the past.
|
||||
|
@ -821,8 +821,8 @@ setup:
|
|||
---
|
||||
"date_histogram with time_zone":
|
||||
- skip:
|
||||
version: " - 7.99.99"
|
||||
reason: This will fail against 7.whatever until we backport the fix
|
||||
version: " - 7.6.0"
|
||||
reason: Fixed in 7.6.0
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
|
|
|
@ -316,8 +316,8 @@ setup:
|
|||
---
|
||||
"sub aggs":
|
||||
- skip:
|
||||
version: " - 7.99.99"
|
||||
reason: Sub aggs fixed in 8.0 (to be backported to 7.6.1)
|
||||
version: " - 7.6.1"
|
||||
reason: Sub aggs fixed in 7.6.1
|
||||
|
||||
- do:
|
||||
index:
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
setup:
|
||||
- skip:
|
||||
version: " - 7.99.99" #TODO change this after backport
|
||||
reason: These new error messages were added in 7.1
|
||||
version: " - 7.1.99"
|
||||
reason: These new error messages were added in 7.2
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
|
|
|
@ -141,7 +141,7 @@
|
|||
---
|
||||
'Misspelled fields get "did you mean"':
|
||||
- skip:
|
||||
version: " - 7.99.99"
|
||||
version: " - 7.6.99"
|
||||
reason: Implemented in 8.0 (to be backported to 7.7)
|
||||
- do:
|
||||
catch: /\[significant_terms\] unknown field \[jlp\] did you mean \[jlh\]\?/
|
||||
|
|
|
@ -0,0 +1,330 @@
|
|||
---
|
||||
setup:
|
||||
- skip:
|
||||
version: " - 7.6.99"
|
||||
reason: "implemented in 7.7.0"
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: test
|
||||
body:
|
||||
mappings:
|
||||
properties:
|
||||
text:
|
||||
type: text
|
||||
analyzer: standard
|
||||
fields:
|
||||
raw:
|
||||
type: keyword
|
||||
nested1:
|
||||
type: nested
|
||||
|
||||
- do:
|
||||
bulk:
|
||||
refresh: true
|
||||
body:
|
||||
- '{"index": {"_index": "test", "_id": "1"}}'
|
||||
- '{"text" : "Some like it hot, some like it cold", "nested1": [{"foo": "bar1"}]}'
|
||||
- '{"index": {"_index": "test", "_id": "2"}}'
|
||||
- '{"text" : "Its cold outside, theres no kind of atmosphere", "nested1": [{"foo": "bar2"}]}'
|
||||
- '{"index": {"_index": "test", "_id": "3"}}'
|
||||
- '{"text" : "Baby its cold there outside", "nested1": [{"foo": "bar3"}]}'
|
||||
- '{"index": {"_index": "test", "_id": "4"}}'
|
||||
- '{"text" : "Outside it is cold and wet", "nested1": [{"foo": "bar4"}]}'
|
||||
|
||||
---
|
||||
teardown:
|
||||
- skip:
|
||||
version: " - 7.6.99"
|
||||
reason: "implemented in 7.7.0"
|
||||
|
||||
- do:
|
||||
cluster.put_settings:
|
||||
body:
|
||||
transient:
|
||||
search.allow_expensive_queries: null
|
||||
|
||||
---
|
||||
"Test disallow expensive queries":
|
||||
- skip:
|
||||
version: " - 7.6.99"
|
||||
reason: "implemented in 7.7.0"
|
||||
|
||||
### Check for initial setting = null -> false
|
||||
- do:
|
||||
cluster.get_settings:
|
||||
flat_settings: true
|
||||
|
||||
- match: {search.allow_expensive_queries: null}
|
||||
|
||||
### Prefix
|
||||
- do:
|
||||
search:
|
||||
index: test
|
||||
body:
|
||||
query:
|
||||
prefix:
|
||||
text:
|
||||
value: out
|
||||
|
||||
- match: { hits.total.value: 3 }
|
||||
|
||||
### Fuzzy
|
||||
- do:
|
||||
search:
|
||||
index: test
|
||||
body:
|
||||
query:
|
||||
fuzzy:
|
||||
text:
|
||||
value: outwide
|
||||
|
||||
- match: { hits.total.value: 3 }
|
||||
|
||||
|
||||
### Regexp
|
||||
- do:
|
||||
search:
|
||||
index: test
|
||||
body:
|
||||
query:
|
||||
regexp:
|
||||
text:
|
||||
value: .*ou.*id.*
|
||||
|
||||
- match: { hits.total.value: 3 }
|
||||
|
||||
### Wildcard
|
||||
- do:
|
||||
search:
|
||||
index: test
|
||||
body:
|
||||
query:
|
||||
wildcard:
|
||||
text:
|
||||
value: out?ide
|
||||
|
||||
- match: { hits.total.value: 3 }
|
||||
|
||||
### Range on text
|
||||
- do:
|
||||
search:
|
||||
index: test
|
||||
body:
|
||||
query:
|
||||
range:
|
||||
text:
|
||||
gte: "theres"
|
||||
|
||||
- match: { hits.total.value: 2 }
|
||||
|
||||
### Range on keyword
|
||||
- do:
|
||||
search:
|
||||
index: test
|
||||
body:
|
||||
query:
|
||||
range:
|
||||
text.raw:
|
||||
gte : "Outside it is cold and wet"
|
||||
|
||||
- match: { hits.total.value: 2 }
|
||||
|
||||
### Nested
|
||||
- do:
|
||||
search:
|
||||
index: test
|
||||
body:
|
||||
query:
|
||||
nested:
|
||||
path: "nested1"
|
||||
query:
|
||||
bool:
|
||||
must: [{"match": {"nested1.foo": "bar2"}}]
|
||||
|
||||
- match: { hits.total.value: 1 }
|
||||
|
||||
### Update setting to false
|
||||
- do:
|
||||
cluster.put_settings:
|
||||
body:
|
||||
transient:
|
||||
search.allow_expensive_queries: "false"
|
||||
flat_settings: true
|
||||
|
||||
- match: {transient: {search.allow_expensive_queries: "false"}}
|
||||
|
||||
### Prefix
|
||||
- do:
|
||||
catch: /\[prefix\] queries cannot be executed when \'search.allow_expensive_queries\' is set to false. For optimised prefix queries on text fields please enable \[index_prefixes\]./
|
||||
search:
|
||||
index: test
|
||||
body:
|
||||
query:
|
||||
prefix:
|
||||
text:
|
||||
value: out
|
||||
|
||||
### Fuzzy
|
||||
- do:
|
||||
catch: /\[fuzzy\] queries cannot be executed when \'search.allow_expensive_queries\' is set to false./
|
||||
search:
|
||||
index: test
|
||||
body:
|
||||
query:
|
||||
fuzzy:
|
||||
text:
|
||||
value: outwide
|
||||
|
||||
### Regexp
|
||||
- do:
|
||||
catch: /\[regexp\] queries cannot be executed when \'search.allow_expensive_queries\' is set to false./
|
||||
search:
|
||||
index: test
|
||||
body:
|
||||
query:
|
||||
regexp:
|
||||
text:
|
||||
value: .*ou.*id.*
|
||||
|
||||
### Wildcard
|
||||
- do:
|
||||
catch: /\[wildcard\] queries cannot be executed when \'search.allow_expensive_queries\' is set to false./
|
||||
search:
|
||||
index: test
|
||||
body:
|
||||
query:
|
||||
wildcard:
|
||||
text:
|
||||
value: out?ide
|
||||
|
||||
### Range on text
|
||||
- do:
|
||||
catch: /\[range\] queries on \[text\] or \[keyword\] fields cannot be executed when \'search.allow_expensive_queries\' is set to false./
|
||||
search:
|
||||
index: test
|
||||
body:
|
||||
query:
|
||||
range:
|
||||
text:
|
||||
gte: "theres"
|
||||
|
||||
### Range on keyword
|
||||
- do:
|
||||
catch: /\[range\] queries on \[text\] or \[keyword\] fields cannot be executed when \'search.allow_expensive_queries\' is set to false./
|
||||
search:
|
||||
index: test
|
||||
body:
|
||||
query:
|
||||
range:
|
||||
text.raw:
|
||||
gte : "Outside it is cold and wet"
|
||||
|
||||
### Nested
|
||||
- do:
|
||||
catch: /\[joining\] queries cannot be executed when \'search.allow_expensive_queries\' is set to false./
|
||||
search:
|
||||
index: test
|
||||
body:
|
||||
query:
|
||||
nested:
|
||||
path: "nested1"
|
||||
query:
|
||||
bool:
|
||||
must: [{"match" : {"nested1.foo" : "bar2"}}]
|
||||
|
||||
### Revert setting to true
|
||||
- do:
|
||||
cluster.put_settings:
|
||||
body:
|
||||
transient:
|
||||
search.allow_expensive_queries: "true"
|
||||
flat_settings: true
|
||||
|
||||
- match: {transient: {search.allow_expensive_queries: "true"}}
|
||||
|
||||
### Prefix
|
||||
- do:
|
||||
search:
|
||||
index: test
|
||||
body:
|
||||
query:
|
||||
prefix:
|
||||
text:
|
||||
value: out
|
||||
|
||||
- match: { hits.total.value: 3 }
|
||||
|
||||
### Fuzzy
|
||||
- do:
|
||||
search:
|
||||
index: test
|
||||
body:
|
||||
query:
|
||||
fuzzy:
|
||||
text:
|
||||
value: outwide
|
||||
|
||||
- match: { hits.total.value: 3 }
|
||||
|
||||
### Regexp
|
||||
- do:
|
||||
search:
|
||||
index: test
|
||||
body:
|
||||
query:
|
||||
regexp:
|
||||
text:
|
||||
value: .*ou.*id.*
|
||||
|
||||
- match: { hits.total.value: 3 }
|
||||
|
||||
### Wildcard
|
||||
- do:
|
||||
search:
|
||||
index: test
|
||||
body:
|
||||
query:
|
||||
wildcard:
|
||||
text:
|
||||
value: out?ide
|
||||
|
||||
- match: { hits.total.value: 3 }
|
||||
|
||||
### Range on text
|
||||
- do:
|
||||
search:
|
||||
index: test
|
||||
body:
|
||||
query:
|
||||
range:
|
||||
text:
|
||||
gte: "theres"
|
||||
|
||||
- match: { hits.total.value: 2 }
|
||||
|
||||
### Range on keyword
|
||||
- do:
|
||||
search:
|
||||
index: test
|
||||
body:
|
||||
query:
|
||||
range:
|
||||
text.raw:
|
||||
gte: "Outside it is cold and wet"
|
||||
|
||||
- match: { hits.total.value: 2 }
|
||||
|
||||
### Nested
|
||||
- do:
|
||||
search:
|
||||
index: test
|
||||
body:
|
||||
query:
|
||||
nested:
|
||||
path: "nested1"
|
||||
query:
|
||||
bool:
|
||||
must: [{"match": {"nested1.foo": "bar2"}}]
|
||||
|
||||
- match: { hits.total.value: 1 }
|
|
@ -41,8 +41,8 @@ setup:
|
|||
---
|
||||
"Create a snapshot and clean up repository":
|
||||
- skip:
|
||||
version: " - 7.99.99"
|
||||
reason: cleanup introduced in 8.0
|
||||
version: " - 7.3.99"
|
||||
reason: cleanup introduced in 7.4
|
||||
|
||||
- do:
|
||||
snapshot.cleanup_repository:
|
||||
|
|
|
@ -157,8 +157,8 @@ setup:
|
|||
---
|
||||
"Get snapshot info with metadata":
|
||||
- skip:
|
||||
version: " - 7.9.99"
|
||||
reason: "https://github.com/elastic/elasticsearch/pull/41281 not yet backported to 7.x"
|
||||
version: " - 7.2.99"
|
||||
reason: "Introduced with 7.3"
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
|
|
|
@ -98,6 +98,7 @@ class LoggingOutputStream extends OutputStream {
|
|||
}
|
||||
if (used == 0) {
|
||||
// only windows \r was in the buffer
|
||||
buffer.used = 0;
|
||||
return;
|
||||
}
|
||||
log(new String(buffer.bytes, 0, used, StandardCharsets.UTF_8));
|
||||
|
|
|
@ -450,6 +450,7 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
SearchService.DEFAULT_KEEPALIVE_SETTING,
|
||||
SearchService.KEEPALIVE_INTERVAL_SETTING,
|
||||
SearchService.MAX_KEEPALIVE_SETTING,
|
||||
SearchService.ALLOW_EXPENSIVE_QUERIES,
|
||||
MultiBucketConsumerService.MAX_BUCKET_SETTING,
|
||||
SearchService.LOW_LEVEL_CANCELLATION_SETTING,
|
||||
SearchService.MAX_OPEN_SCROLL_CONTEXT,
|
||||
|
|
|
@ -208,7 +208,7 @@ public class DateUtils {
|
|||
return ZoneId.of(zoneId).normalized();
|
||||
}
|
||||
|
||||
private static final Instant MAX_NANOSECOND_INSTANT = Instant.parse("2262-04-11T23:47:16.854775807Z");
|
||||
static final Instant MAX_NANOSECOND_INSTANT = Instant.parse("2262-04-11T23:47:16.854775807Z");
|
||||
|
||||
static final long MAX_NANOSECOND_IN_MILLIS = MAX_NANOSECOND_INSTANT.toEpochMilli();
|
||||
|
||||
|
@ -231,6 +231,26 @@ public class DateUtils {
|
|||
return instant.getEpochSecond() * 1_000_000_000 + instant.getNano();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an instant that is with valid nanosecond resolution. If
|
||||
* the parameter is before the valid nanosecond range then this returns
|
||||
* the minimum {@linkplain Instant} valid for nanosecond resultion. If
|
||||
* the parameter is after the valid nanosecond range then this returns
|
||||
* the maximum {@linkplain Instant} valid for nanosecond resolution.
|
||||
* <p>
|
||||
* Useful for checking if all values for the field are within some range,
|
||||
* even if the range's endpoints are not valid nanosecond resolution.
|
||||
*/
|
||||
public static Instant clampToNanosRange(Instant instant) {
|
||||
if (instant.isBefore(Instant.EPOCH)) {
|
||||
return Instant.EPOCH;
|
||||
}
|
||||
if (instant.isAfter(MAX_NANOSECOND_INSTANT)) {
|
||||
return MAX_NANOSECOND_INSTANT;
|
||||
}
|
||||
return instant;
|
||||
}
|
||||
|
||||
/**
|
||||
* convert a long value to a java time instant
|
||||
* the long value resembles the nanoseconds since the epoch
|
||||
|
|
|
@ -130,6 +130,7 @@ public final class IndexModule {
|
|||
private final List<SearchOperationListener> searchOperationListeners = new ArrayList<>();
|
||||
private final List<IndexingOperationListener> indexOperationListeners = new ArrayList<>();
|
||||
private final AtomicBoolean frozen = new AtomicBoolean(false);
|
||||
private final BooleanSupplier allowExpensiveQueries;
|
||||
|
||||
/**
|
||||
* Construct the index module for the index with the specified index settings. The index module contains extension points for plugins
|
||||
|
@ -144,13 +145,15 @@ public final class IndexModule {
|
|||
final IndexSettings indexSettings,
|
||||
final AnalysisRegistry analysisRegistry,
|
||||
final EngineFactory engineFactory,
|
||||
final Map<String, IndexStorePlugin.DirectoryFactory> directoryFactories) {
|
||||
final Map<String, IndexStorePlugin.DirectoryFactory> directoryFactories,
|
||||
final BooleanSupplier allowExpensiveQueries) {
|
||||
this.indexSettings = indexSettings;
|
||||
this.analysisRegistry = analysisRegistry;
|
||||
this.engineFactory = Objects.requireNonNull(engineFactory);
|
||||
this.searchOperationListeners.add(new SearchSlowLog(indexSettings));
|
||||
this.indexOperationListeners.add(new IndexingSlowLog(indexSettings));
|
||||
this.directoryFactories = Collections.unmodifiableMap(directoryFactories);
|
||||
this.allowExpensiveQueries = allowExpensiveQueries;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -424,7 +427,7 @@ public final class IndexModule {
|
|||
new SimilarityService(indexSettings, scriptService, similarities), shardStoreDeleter, indexAnalyzers,
|
||||
engineFactory, circuitBreakerService, bigArrays, threadPool, scriptService, clusterService, client, queryCache,
|
||||
directoryFactory, eventListener, readerWrapperFactory, mapperRegistry, indicesFieldDataCache, searchOperationListeners,
|
||||
indexOperationListeners, namedWriteableRegistry, idFieldDataEnabled);
|
||||
indexOperationListeners, namedWriteableRegistry, idFieldDataEnabled, allowExpensiveQueries);
|
||||
success = true;
|
||||
return indexService;
|
||||
} finally {
|
||||
|
|
|
@ -59,8 +59,8 @@ import org.elasticsearch.index.engine.EngineFactory;
|
|||
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldDataService;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.query.SearchIndexNameMatcher;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.query.SearchIndexNameMatcher;
|
||||
import org.elasticsearch.index.seqno.RetentionLeaseSyncer;
|
||||
import org.elasticsearch.index.shard.IndexEventListener;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
|
@ -127,6 +127,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
|
|||
private final IndexSettings indexSettings;
|
||||
private final List<SearchOperationListener> searchOperationListeners;
|
||||
private final List<IndexingOperationListener> indexingOperationListeners;
|
||||
private final BooleanSupplier allowExpensiveQueries;
|
||||
private volatile AsyncRefreshTask refreshTask;
|
||||
private volatile AsyncTranslogFSync fsyncTask;
|
||||
private volatile AsyncGlobalCheckpointTask globalCheckpointTask;
|
||||
|
@ -167,8 +168,10 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
|
|||
List<SearchOperationListener> searchOperationListeners,
|
||||
List<IndexingOperationListener> indexingOperationListeners,
|
||||
NamedWriteableRegistry namedWriteableRegistry,
|
||||
BooleanSupplier idFieldDataEnabled) {
|
||||
BooleanSupplier idFieldDataEnabled,
|
||||
BooleanSupplier allowExpensiveQueries) {
|
||||
super(indexSettings);
|
||||
this.allowExpensiveQueries = allowExpensiveQueries;
|
||||
this.indexSettings = indexSettings;
|
||||
this.xContentRegistry = xContentRegistry;
|
||||
this.similarityService = similarityService;
|
||||
|
@ -570,7 +573,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
|
|||
return new QueryShardContext(
|
||||
shardId, indexSettings, bigArrays, indexCache.bitsetFilterCache(), indexFieldData::getForField, mapperService(),
|
||||
similarityService(), scriptService, xContentRegistry, namedWriteableRegistry, client, searcher, nowInMillis, clusterAlias,
|
||||
indexNameMatcher);
|
||||
indexNameMatcher, allowExpensiveQueries);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -35,14 +35,18 @@ import org.apache.lucene.util.BitDocIdSet;
|
|||
import org.apache.lucene.util.BitSet;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.index.IndexComponent;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.MultiValueMode;
|
||||
import org.elasticsearch.search.sort.BucketedSort;
|
||||
import org.elasticsearch.search.sort.NestedSortBuilder;
|
||||
import org.elasticsearch.search.sort.SortOrder;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -72,6 +76,12 @@ public interface IndexFieldData<FD extends AtomicFieldData> extends IndexCompone
|
|||
*/
|
||||
SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, Nested nested, boolean reverse);
|
||||
|
||||
/**
|
||||
* Build a sort implementation specialized for aggregations.
|
||||
*/
|
||||
BucketedSort newBucketedSort(BigArrays bigArrays, @Nullable Object missingValue, MultiValueMode sortMode,
|
||||
Nested nested, SortOrder sortOrder, DocValueFormat format);
|
||||
|
||||
/**
|
||||
* Clears any resources associated with this field data.
|
||||
*/
|
||||
|
@ -227,6 +237,11 @@ public interface IndexFieldData<FD extends AtomicFieldData> extends IndexCompone
|
|||
public Object missingValue(boolean reversed) {
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a {@linkplain BucketedSort} which is useful for sorting inside of aggregations.
|
||||
*/
|
||||
public abstract BucketedSort newBucketedSort(BigArrays bigArrays, SortOrder sortOrder, DocValueFormat format);
|
||||
}
|
||||
|
||||
interface Builder {
|
||||
|
@ -242,5 +257,4 @@ public interface IndexFieldData<FD extends AtomicFieldData> extends IndexCompone
|
|||
IndexFieldData<FD> localGlobalDirect(DirectoryReader indexReader) throws Exception;
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -29,11 +29,15 @@ import org.apache.lucene.search.Scorable;
|
|||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.util.BitSet;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.index.fielddata.AbstractSortedDocValues;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||
import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData;
|
||||
import org.elasticsearch.index.fielddata.SortedBinaryDocValues;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.MultiValueMode;
|
||||
import org.elasticsearch.search.sort.BucketedSort;
|
||||
import org.elasticsearch.search.sort.SortOrder;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -135,6 +139,11 @@ public class BytesRefFieldComparatorSource extends IndexFieldData.XFieldComparat
|
|||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public BucketedSort newBucketedSort(BigArrays bigArrays, SortOrder sortOrder, DocValueFormat format) {
|
||||
throw new IllegalArgumentException("only supported on numeric fields");
|
||||
}
|
||||
|
||||
/**
|
||||
* A view of a SortedDocValues where missing values
|
||||
* are replaced with the specified term
|
||||
|
|
|
@ -27,12 +27,16 @@ import org.apache.lucene.search.Scorable;
|
|||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.util.BitSet;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.index.fielddata.FieldData;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||
import org.elasticsearch.index.fielddata.IndexNumericFieldData;
|
||||
import org.elasticsearch.index.fielddata.NumericDoubleValues;
|
||||
import org.elasticsearch.index.fielddata.SortedNumericDoubleValues;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.MultiValueMode;
|
||||
import org.elasticsearch.search.sort.BucketedSort;
|
||||
import org.elasticsearch.search.sort.SortOrder;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -58,6 +62,18 @@ public class DoubleValuesComparatorSource extends IndexFieldData.XFieldComparato
|
|||
return indexFieldData.load(context).getDoubleValues();
|
||||
}
|
||||
|
||||
private NumericDoubleValues getNumericDocValues(LeafReaderContext context, double missingValue) throws IOException {
|
||||
final SortedNumericDoubleValues values = getValues(context);
|
||||
if (nested == null) {
|
||||
return FieldData.replaceMissing(sortMode.select(values), missingValue);
|
||||
} else {
|
||||
final BitSet rootDocs = nested.rootDocs(context);
|
||||
final DocIdSetIterator innerDocs = nested.innerDocs(context);
|
||||
final int maxChildren = nested.getNestedSort() != null ? nested.getNestedSort().getMaxChildren() : Integer.MAX_VALUE;
|
||||
return sortMode.select(values, missingValue, rootDocs, innerDocs, context.reader().maxDoc(), maxChildren);
|
||||
}
|
||||
}
|
||||
|
||||
protected void setScorer(Scorable scorer) {}
|
||||
|
||||
@Override
|
||||
|
@ -70,17 +86,7 @@ public class DoubleValuesComparatorSource extends IndexFieldData.XFieldComparato
|
|||
return new FieldComparator.DoubleComparator(numHits, null, null) {
|
||||
@Override
|
||||
protected NumericDocValues getNumericDocValues(LeafReaderContext context, String field) throws IOException {
|
||||
final SortedNumericDoubleValues values = getValues(context);
|
||||
final NumericDoubleValues selectedValues;
|
||||
if (nested == null) {
|
||||
selectedValues = FieldData.replaceMissing(sortMode.select(values), dMissingValue);
|
||||
} else {
|
||||
final BitSet rootDocs = nested.rootDocs(context);
|
||||
final DocIdSetIterator innerDocs = nested.innerDocs(context);
|
||||
final int maxChildren = nested.getNestedSort() != null ? nested.getNestedSort().getMaxChildren() : Integer.MAX_VALUE;
|
||||
selectedValues = sortMode.select(values, dMissingValue, rootDocs, innerDocs, context.reader().maxDoc(), maxChildren);
|
||||
}
|
||||
return selectedValues.getRawDoubleValues();
|
||||
return DoubleValuesComparatorSource.this.getNumericDocValues(context, dMissingValue).getRawDoubleValues();
|
||||
}
|
||||
@Override
|
||||
public void setScorer(Scorable scorer) {
|
||||
|
@ -88,4 +94,28 @@ public class DoubleValuesComparatorSource extends IndexFieldData.XFieldComparato
|
|||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public BucketedSort newBucketedSort(BigArrays bigArrays, SortOrder sortOrder, DocValueFormat format) {
|
||||
return new BucketedSort.ForDoubles(bigArrays, sortOrder, format) {
|
||||
private final double dMissingValue = (Double) missingObject(missingValue, sortOrder == SortOrder.DESC);
|
||||
|
||||
@Override
|
||||
public Leaf forLeaf(LeafReaderContext ctx) throws IOException {
|
||||
return new Leaf() {
|
||||
private final NumericDoubleValues values = getNumericDocValues(ctx, dMissingValue);
|
||||
|
||||
@Override
|
||||
protected boolean advanceExact(int doc) throws IOException {
|
||||
return values.advanceExact(doc);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected double docValue() throws IOException {
|
||||
return values.doubleValue();
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,15 +22,20 @@ import org.apache.lucene.index.LeafReaderContext;
|
|||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.FieldComparator;
|
||||
import org.apache.lucene.search.Scorable;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.util.BitSet;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.index.fielddata.FieldData;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||
import org.elasticsearch.index.fielddata.IndexNumericFieldData;
|
||||
import org.elasticsearch.index.fielddata.NumericDoubleValues;
|
||||
import org.elasticsearch.index.fielddata.SortedNumericDoubleValues;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.MultiValueMode;
|
||||
import org.elasticsearch.search.sort.BucketedSort;
|
||||
import org.elasticsearch.search.sort.SortOrder;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -52,27 +57,59 @@ public class FloatValuesComparatorSource extends IndexFieldData.XFieldComparator
|
|||
return SortField.Type.FLOAT;
|
||||
}
|
||||
|
||||
private NumericDoubleValues getNumericDocValues(LeafReaderContext context, float missingValue) throws IOException {
|
||||
final SortedNumericDoubleValues values = indexFieldData.load(context).getDoubleValues();
|
||||
if (nested == null) {
|
||||
return FieldData.replaceMissing(sortMode.select(values), missingValue);
|
||||
} else {
|
||||
final BitSet rootDocs = nested.rootDocs(context);
|
||||
final DocIdSetIterator innerDocs = nested.innerDocs(context);
|
||||
final int maxChildren = nested.getNestedSort() != null ? nested.getNestedSort().getMaxChildren() : Integer.MAX_VALUE;
|
||||
return sortMode.select(values, missingValue, rootDocs, innerDocs, context.reader().maxDoc(), maxChildren);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public FieldComparator<?> newComparator(String fieldname, int numHits, int sortPos, boolean reversed) {
|
||||
assert indexFieldData == null || fieldname.equals(indexFieldData.getFieldName());
|
||||
|
||||
final float dMissingValue = (Float) missingObject(missingValue, reversed);
|
||||
final float fMissingValue = (Float) missingObject(missingValue, reversed);
|
||||
// NOTE: it's important to pass null as a missing value in the constructor so that
|
||||
// the comparator doesn't check docsWithField since we replace missing values in select()
|
||||
return new FieldComparator.FloatComparator(numHits, null, null) {
|
||||
@Override
|
||||
protected NumericDocValues getNumericDocValues(LeafReaderContext context, String field) throws IOException {
|
||||
final SortedNumericDoubleValues values = indexFieldData.load(context).getDoubleValues();
|
||||
final NumericDoubleValues selectedValues;
|
||||
if (nested == null) {
|
||||
selectedValues = FieldData.replaceMissing(sortMode.select(values), dMissingValue);
|
||||
} else {
|
||||
final BitSet rootDocs = nested.rootDocs(context);
|
||||
final DocIdSetIterator innerDocs = nested.innerDocs(context);
|
||||
final int maxChildren = nested.getNestedSort() != null ? nested.getNestedSort().getMaxChildren() : Integer.MAX_VALUE;
|
||||
selectedValues = sortMode.select(values, dMissingValue, rootDocs, innerDocs, context.reader().maxDoc(), maxChildren);
|
||||
}
|
||||
return selectedValues.getRawFloatValues();
|
||||
return FloatValuesComparatorSource.this.getNumericDocValues(context, fMissingValue).getRawFloatValues();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public BucketedSort newBucketedSort(BigArrays bigArrays, SortOrder sortOrder, DocValueFormat format) {
|
||||
return new BucketedSort.ForFloats(bigArrays, sortOrder, format) {
|
||||
private final float dMissingValue = (Float) missingObject(missingValue, sortOrder == SortOrder.DESC);
|
||||
|
||||
@Override
|
||||
public boolean needsScores() { return false; }
|
||||
|
||||
@Override
|
||||
public Leaf forLeaf(LeafReaderContext ctx) throws IOException {
|
||||
return new Leaf() {
|
||||
private final NumericDoubleValues values = getNumericDocValues(ctx, dMissingValue);
|
||||
|
||||
@Override
|
||||
public void setScorer(Scorable scorer) {}
|
||||
|
||||
@Override
|
||||
protected boolean advanceExact(int doc) throws IOException {
|
||||
return values.advanceExact(doc);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected float docValue() throws IOException {
|
||||
return (float) values.doubleValue();
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
|
|
@ -26,12 +26,16 @@ import org.apache.lucene.search.FieldComparator;
|
|||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.util.BitSet;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.index.fielddata.AtomicNumericFieldData;
|
||||
import org.elasticsearch.index.fielddata.FieldData;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||
import org.elasticsearch.index.fielddata.IndexNumericFieldData;
|
||||
import org.elasticsearch.index.fielddata.plain.SortedNumericDVIndexFieldData;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.MultiValueMode;
|
||||
import org.elasticsearch.search.sort.BucketedSort;
|
||||
import org.elasticsearch.search.sort.SortOrder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.function.Function;
|
||||
|
@ -72,30 +76,54 @@ public class LongValuesComparatorSource extends IndexFieldData.XFieldComparatorS
|
|||
}
|
||||
return converter != null ? converter.apply(values) : values;
|
||||
}
|
||||
|
||||
private NumericDocValues getNumericDocValues(LeafReaderContext context, long missingValue) throws IOException {
|
||||
final SortedNumericDocValues values = loadDocValues(context);
|
||||
if (nested == null) {
|
||||
return FieldData.replaceMissing(sortMode.select(values), missingValue);
|
||||
}
|
||||
final BitSet rootDocs = nested.rootDocs(context);
|
||||
final DocIdSetIterator innerDocs = nested.innerDocs(context);
|
||||
final int maxChildren = nested.getNestedSort() != null ? nested.getNestedSort().getMaxChildren() : Integer.MAX_VALUE;
|
||||
return sortMode.select(values, missingValue, rootDocs, innerDocs, context.reader().maxDoc(), maxChildren);
|
||||
}
|
||||
|
||||
@Override
|
||||
public FieldComparator<?> newComparator(String fieldname, int numHits, int sortPos, boolean reversed) {
|
||||
assert indexFieldData == null || fieldname.equals(indexFieldData.getFieldName());
|
||||
|
||||
final Long dMissingValue = (Long) missingObject(missingValue, reversed);
|
||||
final long lMissingValue = (Long) missingObject(missingValue, reversed);
|
||||
// NOTE: it's important to pass null as a missing value in the constructor so that
|
||||
// the comparator doesn't check docsWithField since we replace missing values in select()
|
||||
return new FieldComparator.LongComparator(numHits, null, null) {
|
||||
@Override
|
||||
protected NumericDocValues getNumericDocValues(LeafReaderContext context, String field) throws IOException {
|
||||
final SortedNumericDocValues values = loadDocValues(context);
|
||||
final NumericDocValues selectedValues;
|
||||
if (nested == null) {
|
||||
selectedValues = FieldData.replaceMissing(sortMode.select(values), dMissingValue);
|
||||
} else {
|
||||
final BitSet rootDocs = nested.rootDocs(context);
|
||||
final DocIdSetIterator innerDocs = nested.innerDocs(context);
|
||||
final int maxChildren = nested.getNestedSort() != null ? nested.getNestedSort().getMaxChildren() : Integer.MAX_VALUE;
|
||||
selectedValues = sortMode.select(values, dMissingValue, rootDocs, innerDocs, context.reader().maxDoc(), maxChildren);
|
||||
}
|
||||
return selectedValues;
|
||||
return LongValuesComparatorSource.this.getNumericDocValues(context, lMissingValue);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public BucketedSort newBucketedSort(BigArrays bigArrays, SortOrder sortOrder, DocValueFormat format) {
|
||||
return new BucketedSort.ForLongs(bigArrays, sortOrder, format) {
|
||||
private final long lMissingValue = (Long) missingObject(missingValue, sortOrder == SortOrder.DESC);
|
||||
|
||||
@Override
|
||||
public Leaf forLeaf(LeafReaderContext ctx) throws IOException {
|
||||
return new Leaf() {
|
||||
private final NumericDocValues values = getNumericDocValues(ctx, lMissingValue);
|
||||
|
||||
@Override
|
||||
protected boolean advanceExact(int doc) throws IOException {
|
||||
return values.advanceExact(doc);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected long docValue() throws IOException {
|
||||
return values.longValue();
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.lucene.index.TermsEnum;
|
|||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.util.Accountable;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.index.AbstractIndexComponent;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.fielddata.AtomicOrdinalsFieldData;
|
||||
|
@ -33,7 +34,10 @@ import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.N
|
|||
import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData;
|
||||
import org.elasticsearch.index.fielddata.ScriptDocValues;
|
||||
import org.elasticsearch.index.fielddata.plain.AbstractAtomicOrdinalsFieldData;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.MultiValueMode;
|
||||
import org.elasticsearch.search.sort.BucketedSort;
|
||||
import org.elasticsearch.search.sort.SortOrder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.UncheckedIOException;
|
||||
|
@ -102,6 +106,12 @@ public final class GlobalOrdinalsIndexFieldData extends AbstractIndexComponent i
|
|||
throw new UnsupportedOperationException("no global ordinals sorting yet");
|
||||
}
|
||||
|
||||
@Override
|
||||
public BucketedSort newBucketedSort(BigArrays bigArrays, Object missingValue, MultiValueMode sortMode, Nested nested,
|
||||
SortOrder sortOrder, DocValueFormat format) {
|
||||
throw new IllegalArgumentException("only supported on numeric fields");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clear() {}
|
||||
|
||||
|
@ -186,6 +196,12 @@ public final class GlobalOrdinalsIndexFieldData extends AbstractIndexComponent i
|
|||
throw new UnsupportedOperationException("no global ordinals sorting yet");
|
||||
}
|
||||
|
||||
@Override
|
||||
public BucketedSort newBucketedSort(BigArrays bigArrays, Object missingValue, MultiValueMode sortMode, Nested nested,
|
||||
SortOrder sortOrder, DocValueFormat format) {
|
||||
throw new IllegalArgumentException("only supported on numeric fields");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clear() {}
|
||||
|
||||
|
|
|
@ -25,16 +25,21 @@ import org.apache.lucene.index.LeafReader;
|
|||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.fielddata.AtomicGeoPointFieldData;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
|
||||
import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.MultiValueMode;
|
||||
import org.elasticsearch.search.sort.BucketedSort;
|
||||
import org.elasticsearch.search.sort.SortOrder;
|
||||
|
||||
public abstract class AbstractLatLonPointDVIndexFieldData extends DocValuesIndexFieldData
|
||||
implements IndexGeoPointFieldData {
|
||||
|
@ -48,6 +53,12 @@ public abstract class AbstractLatLonPointDVIndexFieldData extends DocValuesIndex
|
|||
throw new IllegalArgumentException("can't sort on geo_point field without using specific sorting feature, like geo_distance");
|
||||
}
|
||||
|
||||
@Override
|
||||
public BucketedSort newBucketedSort(BigArrays bigArrays, Object missingValue, MultiValueMode sortMode, Nested nested,
|
||||
SortOrder sortOrder, DocValueFormat format) {
|
||||
throw new IllegalArgumentException("can't sort on geo_point field without using specific sorting feature, like geo_distance");
|
||||
}
|
||||
|
||||
public static class LatLonPointDVIndexFieldData extends AbstractLatLonPointDVIndexFieldData {
|
||||
public LatLonPointDVIndexFieldData(Index index, String fieldName) {
|
||||
super(index, fieldName);
|
||||
|
|
|
@ -24,10 +24,15 @@ import org.apache.lucene.search.SortField;
|
|||
import org.apache.lucene.search.SortedSetSortField;
|
||||
import org.apache.lucene.search.SortedSetSelector;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested;
|
||||
import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.MultiValueMode;
|
||||
import org.elasticsearch.search.sort.BucketedSort;
|
||||
import org.elasticsearch.search.sort.SortOrder;
|
||||
|
||||
public class BinaryDVIndexFieldData extends DocValuesIndexFieldData implements IndexFieldData<BinaryDVAtomicFieldData> {
|
||||
|
||||
|
@ -64,4 +69,10 @@ public class BinaryDVIndexFieldData extends DocValuesIndexFieldData implements I
|
|||
SortedSetSortField.STRING_LAST : SortedSetSortField.STRING_FIRST);
|
||||
return sortField;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BucketedSort newBucketedSort(BigArrays bigArrays, Object missingValue, MultiValueMode sortMode, Nested nested,
|
||||
SortOrder sortOrder, DocValueFormat format) {
|
||||
throw new IllegalArgumentException("only supported on numeric fields");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.apache.lucene.index.DocValues;
|
|||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||
|
@ -31,7 +32,10 @@ import org.elasticsearch.index.fielddata.IndexFieldDataCache;
|
|||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.MultiValueMode;
|
||||
import org.elasticsearch.search.sort.BucketedSort;
|
||||
import org.elasticsearch.search.sort.SortOrder;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -46,6 +50,12 @@ public class BytesBinaryDVIndexFieldData extends DocValuesIndexFieldData impleme
|
|||
throw new IllegalArgumentException("can't sort on binary field");
|
||||
}
|
||||
|
||||
@Override
|
||||
public BucketedSort newBucketedSort(BigArrays bigArrays, Object missingValue, MultiValueMode sortMode, Nested nested,
|
||||
SortOrder sortOrder, DocValueFormat format) {
|
||||
throw new IllegalArgumentException("can't sort on binary field");
|
||||
}
|
||||
|
||||
@Override
|
||||
public BytesBinaryDVAtomicFieldData load(LeafReaderContext context) {
|
||||
try {
|
||||
|
|
|
@ -28,18 +28,23 @@ import org.apache.lucene.search.SortField;
|
|||
import org.apache.lucene.util.Accountable;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.fielddata.AbstractSortedDocValues;
|
||||
import org.elasticsearch.index.fielddata.AtomicOrdinalsFieldData;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
|
||||
import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested;
|
||||
import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.TextFieldMapper;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.MultiValueMode;
|
||||
import org.elasticsearch.search.sort.BucketedSort;
|
||||
import org.elasticsearch.search.sort.SortOrder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
|
@ -158,6 +163,12 @@ public class ConstantIndexFieldData extends AbstractIndexOrdinalsFieldData {
|
|||
return new SortField(getFieldName(), source, reverse);
|
||||
}
|
||||
|
||||
@Override
|
||||
public BucketedSort newBucketedSort(BigArrays bigArrays, Object missingValue, MultiValueMode sortMode, Nested nested,
|
||||
SortOrder sortOrder, DocValueFormat format) {
|
||||
throw new IllegalArgumentException("only supported on numeric fields");
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexOrdinalsFieldData loadGlobal(DirectoryReader indexReader) {
|
||||
return this;
|
||||
|
|
|
@ -33,19 +33,24 @@ import org.apache.lucene.util.packed.PackedInts;
|
|||
import org.apache.lucene.util.packed.PackedLongValues;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.breaker.CircuitBreaker;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.fielddata.AtomicOrdinalsFieldData;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
|
||||
import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData;
|
||||
import org.elasticsearch.index.fielddata.RamAccountingTermsEnum;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested;
|
||||
import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource;
|
||||
import org.elasticsearch.index.fielddata.ordinals.Ordinals;
|
||||
import org.elasticsearch.index.fielddata.ordinals.OrdinalsBuilder;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.MultiValueMode;
|
||||
import org.elasticsearch.search.sort.BucketedSort;
|
||||
import org.elasticsearch.search.sort.SortOrder;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -84,6 +89,12 @@ public class PagedBytesIndexFieldData extends AbstractIndexOrdinalsFieldData {
|
|||
return new SortField(getFieldName(), source, reverse);
|
||||
}
|
||||
|
||||
@Override
|
||||
public BucketedSort newBucketedSort(BigArrays bigArrays, Object missingValue, MultiValueMode sortMode, Nested nested,
|
||||
SortOrder sortOrder, DocValueFormat format) {
|
||||
throw new IllegalArgumentException("only supported on numeric fields");
|
||||
}
|
||||
|
||||
@Override
|
||||
public AtomicOrdinalsFieldData loadDirect(LeafReaderContext context) throws Exception {
|
||||
LeafReader reader = context.reader();
|
||||
|
|
|
@ -31,7 +31,9 @@ import org.apache.lucene.search.SortedNumericSelector;
|
|||
import org.apache.lucene.search.SortedNumericSortField;
|
||||
import org.apache.lucene.util.Accountable;
|
||||
import org.apache.lucene.util.NumericUtils;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.time.DateUtils;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.fielddata.AbstractSortedNumericDocValues;
|
||||
import org.elasticsearch.index.fielddata.AtomicNumericFieldData;
|
||||
|
@ -43,7 +45,10 @@ import org.elasticsearch.index.fielddata.SortedNumericDoubleValues;
|
|||
import org.elasticsearch.index.fielddata.fieldcomparator.DoubleValuesComparatorSource;
|
||||
import org.elasticsearch.index.fielddata.fieldcomparator.FloatValuesComparatorSource;
|
||||
import org.elasticsearch.index.fielddata.fieldcomparator.LongValuesComparatorSource;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.MultiValueMode;
|
||||
import org.elasticsearch.search.sort.BucketedSort;
|
||||
import org.elasticsearch.search.sort.SortOrder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
|
@ -72,42 +77,7 @@ public class SortedNumericDVIndexFieldData extends DocValuesIndexFieldData imple
|
|||
*/
|
||||
public SortField sortField(NumericType targetNumericType, Object missingValue, MultiValueMode sortMode,
|
||||
Nested nested, boolean reverse) {
|
||||
final XFieldComparatorSource source;
|
||||
switch (targetNumericType) {
|
||||
case HALF_FLOAT:
|
||||
case FLOAT:
|
||||
source = new FloatValuesComparatorSource(this, missingValue, sortMode, nested);
|
||||
break;
|
||||
|
||||
case DOUBLE:
|
||||
source = new DoubleValuesComparatorSource(this, missingValue, sortMode, nested);
|
||||
break;
|
||||
|
||||
case DATE:
|
||||
if (numericType == NumericType.DATE_NANOSECONDS) {
|
||||
// converts date values to nanosecond resolution
|
||||
source = new LongValuesComparatorSource(this, missingValue,
|
||||
sortMode, nested, dvs -> convertNanosToMillis(dvs));
|
||||
} else {
|
||||
source = new LongValuesComparatorSource(this, missingValue, sortMode, nested);
|
||||
}
|
||||
break;
|
||||
|
||||
case DATE_NANOSECONDS:
|
||||
if (numericType == NumericType.DATE) {
|
||||
// converts date_nanos values to millisecond resolution
|
||||
source = new LongValuesComparatorSource(this, missingValue,
|
||||
sortMode, nested, dvs -> convertMillisToNanos(dvs));
|
||||
} else {
|
||||
source = new LongValuesComparatorSource(this, missingValue, sortMode, nested);
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
assert !targetNumericType.isFloatingPoint();
|
||||
source = new LongValuesComparatorSource(this, missingValue, sortMode, nested);
|
||||
break;
|
||||
}
|
||||
final XFieldComparatorSource source = comparatorSource(targetNumericType, missingValue, sortMode, nested);
|
||||
|
||||
/**
|
||||
* Check if we can use a simple {@link SortedNumericSortField} compatible with index sorting and
|
||||
|
@ -146,6 +116,49 @@ public class SortedNumericDVIndexFieldData extends DocValuesIndexFieldData imple
|
|||
return sortField(numericType, missingValue, sortMode, nested, reverse);
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds a {@linkplain BucketedSort} for the {@code targetNumericType},
|
||||
* casting the values if their native type doesn't match.
|
||||
*/
|
||||
public BucketedSort newBucketedSort(NumericType targetNumericType, BigArrays bigArrays, @Nullable Object missingValue,
|
||||
MultiValueMode sortMode, Nested nested, SortOrder sortOrder, DocValueFormat format) {
|
||||
return comparatorSource(targetNumericType, missingValue, sortMode, nested).newBucketedSort(bigArrays, sortOrder, format);
|
||||
}
|
||||
|
||||
@Override
|
||||
public BucketedSort newBucketedSort(BigArrays bigArrays, @Nullable Object missingValue, MultiValueMode sortMode, Nested nested,
|
||||
SortOrder sortOrder, DocValueFormat format) {
|
||||
return newBucketedSort(numericType, bigArrays, missingValue, sortMode, nested, sortOrder, format);
|
||||
}
|
||||
|
||||
private XFieldComparatorSource comparatorSource(NumericType targetNumericType, @Nullable Object missingValue, MultiValueMode sortMode,
|
||||
Nested nested) {
|
||||
switch (targetNumericType) {
|
||||
case HALF_FLOAT:
|
||||
case FLOAT:
|
||||
return new FloatValuesComparatorSource(this, missingValue, sortMode, nested);
|
||||
case DOUBLE:
|
||||
return new DoubleValuesComparatorSource(this, missingValue, sortMode, nested);
|
||||
case DATE:
|
||||
if (numericType == NumericType.DATE_NANOSECONDS) {
|
||||
// converts date values to nanosecond resolution
|
||||
return new LongValuesComparatorSource(this, missingValue,
|
||||
sortMode, nested, dvs -> convertNanosToMillis(dvs));
|
||||
}
|
||||
return new LongValuesComparatorSource(this, missingValue, sortMode, nested);
|
||||
case DATE_NANOSECONDS:
|
||||
if (numericType == NumericType.DATE) {
|
||||
// converts date_nanos values to millisecond resolution
|
||||
return new LongValuesComparatorSource(this, missingValue,
|
||||
sortMode, nested, dvs -> convertMillisToNanos(dvs));
|
||||
}
|
||||
return new LongValuesComparatorSource(this, missingValue, sortMode, nested);
|
||||
default:
|
||||
assert !targetNumericType.isFloatingPoint();
|
||||
return new LongValuesComparatorSource(this, missingValue, sortMode, nested);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public NumericType getNumericType() {
|
||||
return numericType;
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.apache.lucene.search.SortedSetSelector;
|
|||
import org.apache.lucene.search.SortedSetSortField;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.fielddata.AtomicOrdinalsFieldData;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested;
|
||||
|
@ -40,7 +41,10 @@ import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparator
|
|||
import org.elasticsearch.index.fielddata.ordinals.GlobalOrdinalsIndexFieldData;
|
||||
import org.elasticsearch.index.fielddata.ordinals.GlobalOrdinalsBuilder;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.MultiValueMode;
|
||||
import org.elasticsearch.search.sort.BucketedSort;
|
||||
import org.elasticsearch.search.sort.SortOrder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.function.Function;
|
||||
|
@ -81,6 +85,12 @@ public class SortedSetDVOrdinalsIndexFieldData extends DocValuesIndexFieldData i
|
|||
return sortField;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BucketedSort newBucketedSort(BigArrays bigArrays, Object missingValue, MultiValueMode sortMode, Nested nested,
|
||||
SortOrder sortOrder, DocValueFormat format) {
|
||||
throw new IllegalArgumentException("only supported on numeric fields");
|
||||
}
|
||||
|
||||
@Override
|
||||
public AtomicOrdinalsFieldData load(LeafReaderContext context) {
|
||||
return new SortedSetDVBytesAtomicFieldData(context.reader(), fieldName, scriptFunction);
|
||||
|
|
|
@ -88,6 +88,11 @@ public final class DateFieldMapper extends FieldMapper {
|
|||
public Instant toInstant(long value) {
|
||||
return Instant.ofEpochMilli(value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Instant clampToValidRange(Instant instant) {
|
||||
return instant;
|
||||
}
|
||||
},
|
||||
NANOSECONDS("date_nanos", NumericType.DATE_NANOSECONDS) {
|
||||
@Override
|
||||
|
@ -99,6 +104,11 @@ public final class DateFieldMapper extends FieldMapper {
|
|||
public Instant toInstant(long value) {
|
||||
return DateUtils.toInstant(value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Instant clampToValidRange(Instant instant) {
|
||||
return DateUtils.clampToNanosRange(instant);
|
||||
}
|
||||
};
|
||||
|
||||
private final String type;
|
||||
|
@ -117,10 +127,18 @@ public final class DateFieldMapper extends FieldMapper {
|
|||
return numericType;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert an {@linkplain Instant} into a long value in this resolution.
|
||||
*/
|
||||
public abstract long convert(Instant instant);
|
||||
|
||||
/**
|
||||
* Convert a long value in this resolution into an instant.
|
||||
*/
|
||||
public abstract Instant toInstant(long value);
|
||||
|
||||
public abstract Instant clampToValidRange(Instant instant);
|
||||
|
||||
public static Resolution ofOrdinal(int ord) {
|
||||
for (Resolution resolution : values()) {
|
||||
if (ord == resolution.ordinal()) {
|
||||
|
@ -440,9 +458,30 @@ public final class DateFieldMapper extends FieldMapper {
|
|||
}
|
||||
}
|
||||
|
||||
// This check needs to be done after fromInclusive and toInclusive
|
||||
// are resolved so we can throw an exception if they are invalid
|
||||
// even if there are no points in the shard
|
||||
return isFieldWithinRange(reader, fromInclusive, toInclusive);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return whether all values of the given {@link IndexReader} are within the range,
|
||||
* outside the range or cross the range. Unlike {@link #isFieldWithinQuery} this
|
||||
* accepts values that are out of the range of the {@link #resolution} of this field.
|
||||
* @param fromInclusive start date, inclusive
|
||||
* @param toInclusive end date, inclusive
|
||||
*/
|
||||
public Relation isFieldWithinRange(IndexReader reader, Instant fromInclusive, Instant toInclusive)
|
||||
throws IOException {
|
||||
return isFieldWithinRange(reader,
|
||||
resolution.convert(resolution.clampToValidRange(fromInclusive)),
|
||||
resolution.convert(resolution.clampToValidRange(toInclusive)));
|
||||
}
|
||||
|
||||
/**
|
||||
* Return whether all values of the given {@link IndexReader} are within the range,
|
||||
* outside the range or cross the range.
|
||||
* @param fromInclusive start date, inclusive, {@link Resolution#convert(Instant) converted} to the appropriate scale
|
||||
* @param toInclusive end date, inclusive, {@link Resolution#convert(Instant) converted} to the appropriate scale
|
||||
*/
|
||||
private Relation isFieldWithinRange(IndexReader reader, long fromInclusive, long toInclusive) throws IOException {
|
||||
if (PointValues.size(reader, name()) == 0) {
|
||||
// no points, so nothing matches
|
||||
return Relation.DISJOINT;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue