Merge branch 'master' into enhancement/remove_node_client_setting
This commit is contained in:
commit
061f09d9a4
|
@ -549,8 +549,9 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
public void handleException(TransportException exp) {
|
||||
try {
|
||||
// if we got disconnected from the node, or the node / shard is not in the right state (being closed)
|
||||
if (exp.unwrapCause() instanceof ConnectTransportException || exp.unwrapCause() instanceof NodeClosedException ||
|
||||
(isPrimaryAction && retryPrimaryException(exp.unwrapCause()))) {
|
||||
final Throwable cause = exp.unwrapCause();
|
||||
if (cause instanceof ConnectTransportException || cause instanceof NodeClosedException ||
|
||||
(isPrimaryAction && retryPrimaryException(cause))) {
|
||||
logger.trace("received an error from node [{}] for request [{}], scheduling a retry", exp, node.id(), request);
|
||||
retry(exp);
|
||||
} else {
|
||||
|
@ -799,6 +800,12 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
protected IndexShardReference getIndexShardReferenceOnPrimary(ShardId shardId) {
|
||||
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
|
||||
IndexShard indexShard = indexService.getShard(shardId.id());
|
||||
// we may end up here if the cluster state used to route the primary is so stale that the underlying
|
||||
// index shard was replaced with a replica. For example - in a two node cluster, if the primary fails
|
||||
// the replica will take over and a replica will be assigned to the first node.
|
||||
if (indexShard.routingEntry().primary() == false) {
|
||||
throw new RetryOnPrimaryException(indexShard.shardId(), "actual shard is not a primary " + indexShard.routingEntry());
|
||||
}
|
||||
return IndexShardReferenceImpl.createOnPrimary(indexShard);
|
||||
}
|
||||
|
||||
|
|
|
@ -128,6 +128,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
|
|||
PercolatorQueryCache.INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING,
|
||||
MapperService.INDEX_MAPPER_DYNAMIC_SETTING,
|
||||
MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING,
|
||||
MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING,
|
||||
BitsetFilterCache.INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING,
|
||||
IndexModule.INDEX_STORE_TYPE_SETTING,
|
||||
IndexModule.INDEX_QUERY_CACHE_TYPE_SETTING,
|
||||
|
|
|
@ -84,6 +84,8 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
public static final String DEFAULT_MAPPING = "_default_";
|
||||
public static final Setting<Long> INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING =
|
||||
Setting.longSetting("index.mapping.nested_fields.limit", 50L, 0, Property.Dynamic, Property.IndexScope);
|
||||
public static final Setting<Long> INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING =
|
||||
Setting.longSetting("index.mapping.total_fields.limit", 1000L, 0, Property.Dynamic, Property.IndexScope);
|
||||
public static final boolean INDEX_MAPPER_DYNAMIC_DEFAULT = true;
|
||||
public static final Setting<Boolean> INDEX_MAPPER_DYNAMIC_SETTING =
|
||||
Setting.boolSetting("index.mapper.dynamic", INDEX_MAPPER_DYNAMIC_DEFAULT, Property.IndexScope);
|
||||
|
@ -289,6 +291,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
// deserializing cluster state that was sent by the master node,
|
||||
// this check will be skipped.
|
||||
checkNestedFieldsLimit(fullPathObjectMappers);
|
||||
checkTotalFieldsLimit(objectMappers.size() + fieldMappers.size());
|
||||
}
|
||||
|
||||
Set<String> parentTypes = this.parentTypes;
|
||||
|
@ -403,11 +406,18 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
actualNestedFields++;
|
||||
}
|
||||
}
|
||||
if (allowedNestedFields >= 0 && actualNestedFields > allowedNestedFields) {
|
||||
if (actualNestedFields > allowedNestedFields) {
|
||||
throw new IllegalArgumentException("Limit of nested fields [" + allowedNestedFields + "] in index [" + index().getName() + "] has been exceeded");
|
||||
}
|
||||
}
|
||||
|
||||
private void checkTotalFieldsLimit(long totalMappers) {
|
||||
long allowedTotalFields = indexSettings.getValue(INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING);
|
||||
if (allowedTotalFields < totalMappers) {
|
||||
throw new IllegalArgumentException("Limit of total fields [" + allowedTotalFields + "] in index [" + index().getName() + "] has been exceeded");
|
||||
}
|
||||
}
|
||||
|
||||
public DocumentMapper parse(String mappingType, CompressedXContent mappingSource, boolean applyDefault) throws MapperParsingException {
|
||||
String defaultMappingSource;
|
||||
if (PercolatorFieldMapper.TYPE_NAME.equals(mappingType)) {
|
||||
|
|
|
@ -135,9 +135,12 @@ public final class PercolatorQuery extends Query implements Accountable {
|
|||
public Explanation explain(LeafReaderContext leafReaderContext, int docId) throws IOException {
|
||||
Scorer scorer = scorer(leafReaderContext);
|
||||
if (scorer != null) {
|
||||
int result = scorer.iterator().advance(docId);
|
||||
TwoPhaseIterator twoPhaseIterator = scorer.twoPhaseIterator();
|
||||
int result = twoPhaseIterator.approximation().advance(docId);
|
||||
if (result == docId) {
|
||||
return Explanation.match(scorer.score(), "PercolatorQuery");
|
||||
if (twoPhaseIterator.matches()) {
|
||||
return Explanation.match(scorer.score(), "PercolatorQuery");
|
||||
}
|
||||
}
|
||||
}
|
||||
return Explanation.noMatch("PercolatorQuery");
|
||||
|
|
|
@ -974,7 +974,6 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
|
||||
private void verifyPrimary() {
|
||||
if (shardRouting.primary() == false) {
|
||||
// must use exception that is not ignored by replication logic. See TransportActions.isShardNotAvailableException
|
||||
throw new IllegalStateException("shard is not a primary " + shardRouting);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -73,6 +73,23 @@ public final class ConvertProcessor extends AbstractProcessor {
|
|||
public Object convert(Object value) {
|
||||
return value.toString();
|
||||
}
|
||||
}, AUTO {
|
||||
@Override
|
||||
public Object convert(Object value) {
|
||||
if (!(value instanceof String)) {
|
||||
return value;
|
||||
}
|
||||
try {
|
||||
return BOOLEAN.convert(value);
|
||||
} catch (IllegalArgumentException e) { }
|
||||
try {
|
||||
return INTEGER.convert(value);
|
||||
} catch (IllegalArgumentException e) {}
|
||||
try {
|
||||
return FLOAT.convert(value);
|
||||
} catch (IllegalArgumentException e) {}
|
||||
return value;
|
||||
}
|
||||
};
|
||||
|
||||
@Override
|
||||
|
@ -94,11 +111,13 @@ public final class ConvertProcessor extends AbstractProcessor {
|
|||
public static final String TYPE = "convert";
|
||||
|
||||
private final String field;
|
||||
private final String targetField;
|
||||
private final Type convertType;
|
||||
|
||||
ConvertProcessor(String tag, String field, Type convertType) {
|
||||
ConvertProcessor(String tag, String field, String targetField, Type convertType) {
|
||||
super(tag);
|
||||
this.field = field;
|
||||
this.targetField = targetField;
|
||||
this.convertType = convertType;
|
||||
}
|
||||
|
||||
|
@ -106,6 +125,10 @@ public final class ConvertProcessor extends AbstractProcessor {
|
|||
return field;
|
||||
}
|
||||
|
||||
String getTargetField() {
|
||||
return targetField;
|
||||
}
|
||||
|
||||
Type getConvertType() {
|
||||
return convertType;
|
||||
}
|
||||
|
@ -128,7 +151,7 @@ public final class ConvertProcessor extends AbstractProcessor {
|
|||
} else {
|
||||
newValue = convertType.convert(oldValue);
|
||||
}
|
||||
document.setFieldValue(field, newValue);
|
||||
document.setFieldValue(targetField, newValue);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -141,8 +164,9 @@ public final class ConvertProcessor extends AbstractProcessor {
|
|||
public ConvertProcessor doCreate(String processorTag, Map<String, Object> config) throws Exception {
|
||||
String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field");
|
||||
String typeProperty = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "type");
|
||||
String targetField = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "target_field", field);
|
||||
Type convertType = Type.fromString(processorTag, "type", typeProperty);
|
||||
return new ConvertProcessor(processorTag, field, convertType);
|
||||
return new ConvertProcessor(processorTag, field, targetField, convertType);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.ingest.processor;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.ingest.core.AbstractProcessor;
|
||||
import org.elasticsearch.ingest.core.AbstractProcessorFactory;
|
||||
import org.elasticsearch.ingest.core.IngestDocument;
|
||||
|
@ -28,6 +29,9 @@ import java.util.Map;
|
|||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import static org.elasticsearch.ingest.core.ConfigurationUtils.newConfigurationException;
|
||||
import static org.elasticsearch.ingest.core.ConfigurationUtils.readStringProperty;
|
||||
|
||||
/**
|
||||
* Processor that allows to search for patterns in field content and replace them with corresponding string replacement.
|
||||
* Support fields of string type only, throws exception if a field is of a different type.
|
||||
|
@ -79,10 +83,15 @@ public final class GsubProcessor extends AbstractProcessor {
|
|||
public static final class Factory extends AbstractProcessorFactory<GsubProcessor> {
|
||||
@Override
|
||||
public GsubProcessor doCreate(String processorTag, Map<String, Object> config) throws Exception {
|
||||
String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field");
|
||||
String pattern = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "pattern");
|
||||
String replacement = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "replacement");
|
||||
Pattern searchPattern = Pattern.compile(pattern);
|
||||
String field = readStringProperty(TYPE, processorTag, config, "field");
|
||||
String pattern = readStringProperty(TYPE, processorTag, config, "pattern");
|
||||
String replacement = readStringProperty(TYPE, processorTag, config, "replacement");
|
||||
Pattern searchPattern;
|
||||
try {
|
||||
searchPattern = Pattern.compile(pattern);
|
||||
} catch (Exception e) {
|
||||
throw newConfigurationException(TYPE, processorTag, "pattern", "Invalid regex pattern. " + e.getMessage());
|
||||
}
|
||||
return new GsubProcessor(processorTag, field, searchPattern, replacement);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -94,7 +94,10 @@ public class AvgAggregator extends NumericMetricsAggregator.SingleValue {
|
|||
|
||||
@Override
|
||||
public double metric(long owningBucketOrd) {
|
||||
return valuesSource == null ? Double.NaN : sums.get(owningBucketOrd) / counts.get(owningBucketOrd);
|
||||
if (valuesSource == null || owningBucketOrd >= sums.size()) {
|
||||
return Double.NaN;
|
||||
}
|
||||
return sums.get(owningBucketOrd) / counts.get(owningBucketOrd);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -96,7 +96,10 @@ public class MaxAggregator extends NumericMetricsAggregator.SingleValue {
|
|||
|
||||
@Override
|
||||
public double metric(long owningBucketOrd) {
|
||||
return valuesSource == null ? Double.NEGATIVE_INFINITY : maxes.get(owningBucketOrd);
|
||||
if (valuesSource == null || owningBucketOrd >= maxes.size()) {
|
||||
return Double.NEGATIVE_INFINITY;
|
||||
}
|
||||
return maxes.get(owningBucketOrd);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -95,7 +95,10 @@ public class MinAggregator extends NumericMetricsAggregator.SingleValue {
|
|||
|
||||
@Override
|
||||
public double metric(long owningBucketOrd) {
|
||||
return valuesSource == null ? Double.POSITIVE_INFINITY : mins.get(owningBucketOrd);
|
||||
if (valuesSource == null || owningBucketOrd >= mins.size()) {
|
||||
return Double.POSITIVE_INFINITY;
|
||||
}
|
||||
return mins.get(owningBucketOrd);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -128,12 +128,23 @@ public class StatsAggregator extends NumericMetricsAggregator.MultiValue {
|
|||
|
||||
@Override
|
||||
public double metric(String name, long owningBucketOrd) {
|
||||
if (valuesSource == null || owningBucketOrd >= counts.size()) {
|
||||
switch(InternalStats.Metrics.resolve(name)) {
|
||||
case count: return 0;
|
||||
case sum: return 0;
|
||||
case min: return Double.POSITIVE_INFINITY;
|
||||
case max: return Double.NEGATIVE_INFINITY;
|
||||
case avg: return Double.NaN;
|
||||
default:
|
||||
throw new IllegalArgumentException("Unknown value [" + name + "] in common stats aggregation");
|
||||
}
|
||||
}
|
||||
switch(InternalStats.Metrics.resolve(name)) {
|
||||
case count: return valuesSource == null ? 0 : counts.get(owningBucketOrd);
|
||||
case sum: return valuesSource == null ? 0 : sums.get(owningBucketOrd);
|
||||
case min: return valuesSource == null ? Double.POSITIVE_INFINITY : mins.get(owningBucketOrd);
|
||||
case max: return valuesSource == null ? Double.NEGATIVE_INFINITY : maxes.get(owningBucketOrd);
|
||||
case avg: return valuesSource == null ? Double.NaN : sums.get(owningBucketOrd) / counts.get(owningBucketOrd);
|
||||
case count: return counts.get(owningBucketOrd);
|
||||
case sum: return sums.get(owningBucketOrd);
|
||||
case min: return mins.get(owningBucketOrd);
|
||||
case max: return maxes.get(owningBucketOrd);
|
||||
case avg: return sums.get(owningBucketOrd) / counts.get(owningBucketOrd);
|
||||
default:
|
||||
throw new IllegalArgumentException("Unknown value [" + name + "] in common stats aggregation");
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.elasticsearch.search.aggregations.InternalAggregation;
|
|||
import org.elasticsearch.search.aggregations.LeafBucketCollector;
|
||||
import org.elasticsearch.search.aggregations.LeafBucketCollectorBase;
|
||||
import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregator;
|
||||
import org.elasticsearch.search.aggregations.metrics.stats.InternalStats;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
import org.elasticsearch.search.aggregations.support.AggregationContext;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSource;
|
||||
|
@ -140,20 +141,34 @@ public class ExtendedStatsAggregator extends NumericMetricsAggregator.MultiValue
|
|||
|
||||
@Override
|
||||
public double metric(String name, long owningBucketOrd) {
|
||||
if (valuesSource == null || owningBucketOrd >= counts.size()) {
|
||||
switch(InternalExtendedStats.Metrics.resolve(name)) {
|
||||
case count: return 0;
|
||||
case sum: return 0;
|
||||
case min: return Double.POSITIVE_INFINITY;
|
||||
case max: return Double.NEGATIVE_INFINITY;
|
||||
case avg: return Double.NaN;
|
||||
case sum_of_squares: return 0;
|
||||
case variance: return Double.NaN;
|
||||
case std_deviation: return Double.NaN;
|
||||
case std_upper: return Double.NaN;
|
||||
case std_lower: return Double.NaN;
|
||||
default:
|
||||
throw new IllegalArgumentException("Unknown value [" + name + "] in common stats aggregation");
|
||||
}
|
||||
}
|
||||
switch(InternalExtendedStats.Metrics.resolve(name)) {
|
||||
case count: return valuesSource == null ? 0 : counts.get(owningBucketOrd);
|
||||
case sum: return valuesSource == null ? 0 : sums.get(owningBucketOrd);
|
||||
case min: return valuesSource == null ? Double.POSITIVE_INFINITY : mins.get(owningBucketOrd);
|
||||
case max: return valuesSource == null ? Double.NEGATIVE_INFINITY : maxes.get(owningBucketOrd);
|
||||
case avg: return valuesSource == null ? Double.NaN : sums.get(owningBucketOrd) / counts.get(owningBucketOrd);
|
||||
case sum_of_squares: return valuesSource == null ? 0 : sumOfSqrs.get(owningBucketOrd);
|
||||
case variance: return valuesSource == null ? Double.NaN : variance(owningBucketOrd);
|
||||
case std_deviation: return valuesSource == null ? Double.NaN : Math.sqrt(variance(owningBucketOrd));
|
||||
case count: return counts.get(owningBucketOrd);
|
||||
case sum: return sums.get(owningBucketOrd);
|
||||
case min: return mins.get(owningBucketOrd);
|
||||
case max: return maxes.get(owningBucketOrd);
|
||||
case avg: return sums.get(owningBucketOrd) / counts.get(owningBucketOrd);
|
||||
case sum_of_squares: return sumOfSqrs.get(owningBucketOrd);
|
||||
case variance: return variance(owningBucketOrd);
|
||||
case std_deviation: return Math.sqrt(variance(owningBucketOrd));
|
||||
case std_upper:
|
||||
if (valuesSource == null) { return Double.NaN; }
|
||||
return (sums.get(owningBucketOrd) / counts.get(owningBucketOrd)) + (Math.sqrt(variance(owningBucketOrd)) * this.sigma);
|
||||
case std_lower:
|
||||
if (valuesSource == null) { return Double.NaN; }
|
||||
return (sums.get(owningBucketOrd) / counts.get(owningBucketOrd)) - (Math.sqrt(variance(owningBucketOrd)) * this.sigma);
|
||||
default:
|
||||
throw new IllegalArgumentException("Unknown value [" + name + "] in common stats aggregation");
|
||||
|
|
|
@ -87,7 +87,10 @@ public class SumAggregator extends NumericMetricsAggregator.SingleValue {
|
|||
|
||||
@Override
|
||||
public double metric(long owningBucketOrd) {
|
||||
return valuesSource == null ? 0 : sums.get(owningBucketOrd);
|
||||
if (valuesSource == null || owningBucketOrd >= sums.size()) {
|
||||
return 0.0;
|
||||
}
|
||||
return sums.get(owningBucketOrd);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -50,6 +50,8 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
|||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.index.engine.EngineClosedException;
|
||||
import org.elasticsearch.index.shard.IndexShardClosedException;
|
||||
import org.elasticsearch.index.shard.IndexShardNotStartedException;
|
||||
import org.elasticsearch.index.shard.IndexShardState;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
@ -158,7 +160,7 @@ public class TransportReplicationActionTests extends ESTestCase {
|
|||
ReplicationTask task = maybeTask();
|
||||
|
||||
ClusterBlocks.Builder block = ClusterBlocks.builder()
|
||||
.addGlobalBlock(new ClusterBlock(1, "non retryable", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL));
|
||||
.addGlobalBlock(new ClusterBlock(1, "non retryable", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL));
|
||||
setState(clusterService, ClusterState.builder(clusterService.state()).blocks(block));
|
||||
TransportReplicationAction.ReroutePhase reroutePhase = action.new ReroutePhase(task, request, listener);
|
||||
reroutePhase.run();
|
||||
|
@ -166,7 +168,7 @@ public class TransportReplicationActionTests extends ESTestCase {
|
|||
assertPhase(task, "failed");
|
||||
|
||||
block = ClusterBlocks.builder()
|
||||
.addGlobalBlock(new ClusterBlock(1, "retryable", true, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL));
|
||||
.addGlobalBlock(new ClusterBlock(1, "retryable", true, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL));
|
||||
setState(clusterService, ClusterState.builder(clusterService.state()).blocks(block));
|
||||
listener = new PlainActionFuture<>();
|
||||
reroutePhase = action.new ReroutePhase(task, new Request().timeout("5ms"), listener);
|
||||
|
@ -181,7 +183,7 @@ public class TransportReplicationActionTests extends ESTestCase {
|
|||
assertPhase(task, "waiting_for_retry");
|
||||
|
||||
block = ClusterBlocks.builder()
|
||||
.addGlobalBlock(new ClusterBlock(1, "non retryable", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL));
|
||||
.addGlobalBlock(new ClusterBlock(1, "non retryable", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL));
|
||||
setState(clusterService, ClusterState.builder(clusterService.state()).blocks(block));
|
||||
assertListenerThrows("primary phase should fail operation when moving from a retryable block to a non-retryable one", listener, ClusterBlockException.class);
|
||||
assertIndexShardUninitialized();
|
||||
|
@ -196,7 +198,7 @@ public class TransportReplicationActionTests extends ESTestCase {
|
|||
final ShardId shardId = new ShardId(index, "_na_", 0);
|
||||
// no replicas in oder to skip the replication part
|
||||
setState(clusterService, state(index, true,
|
||||
randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.UNASSIGNED));
|
||||
randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.UNASSIGNED));
|
||||
ReplicationTask task = maybeTask();
|
||||
|
||||
logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint());
|
||||
|
@ -221,7 +223,7 @@ public class TransportReplicationActionTests extends ESTestCase {
|
|||
final IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id());
|
||||
final String primaryNodeId = shardRoutingTable.primaryShard().currentNodeId();
|
||||
final List<CapturingTransport.CapturedRequest> capturedRequests =
|
||||
transport.getCapturedRequestsByTargetNodeAndClear().get(primaryNodeId);
|
||||
transport.getCapturedRequestsByTargetNodeAndClear().get(primaryNodeId);
|
||||
assertThat(capturedRequests, notNullValue());
|
||||
assertThat(capturedRequests.size(), equalTo(1));
|
||||
assertThat(capturedRequests.get(0).action, equalTo("testAction[p]"));
|
||||
|
@ -234,7 +236,7 @@ public class TransportReplicationActionTests extends ESTestCase {
|
|||
* before the relocation target, there is a time span where relocation source believes active primary to be on
|
||||
* relocation target and relocation target believes active primary to be on relocation source. This results in replication
|
||||
* requests being sent back and forth.
|
||||
*
|
||||
* <p>
|
||||
* This test checks that replication request is not routed back from relocation target to relocation source in case of
|
||||
* stale index routing table on relocation target.
|
||||
*/
|
||||
|
@ -271,7 +273,7 @@ public class TransportReplicationActionTests extends ESTestCase {
|
|||
IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id());
|
||||
final String primaryNodeId = shardRoutingTable.primaryShard().currentNodeId();
|
||||
final List<CapturingTransport.CapturedRequest> capturedRequests =
|
||||
transport.getCapturedRequestsByTargetNodeAndClear().get(primaryNodeId);
|
||||
transport.getCapturedRequestsByTargetNodeAndClear().get(primaryNodeId);
|
||||
assertThat(capturedRequests, notNullValue());
|
||||
assertThat(capturedRequests.size(), equalTo(1));
|
||||
assertThat(capturedRequests.get(0).action, equalTo("testAction[p]"));
|
||||
|
@ -282,7 +284,7 @@ public class TransportReplicationActionTests extends ESTestCase {
|
|||
final String index = "test";
|
||||
// no replicas in oder to skip the replication part
|
||||
setState(clusterService, state(index, true,
|
||||
randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.UNASSIGNED));
|
||||
randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.UNASSIGNED));
|
||||
logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint());
|
||||
Request request = new Request(new ShardId("unknown_index", "_na_", 0)).timeout("1ms");
|
||||
PlainActionFuture<Response> listener = new PlainActionFuture<>();
|
||||
|
@ -299,6 +301,61 @@ public class TransportReplicationActionTests extends ESTestCase {
|
|||
assertListenerThrows("must throw shard not found exception", listener, ShardNotFoundException.class);
|
||||
}
|
||||
|
||||
public void testStalePrimaryShardOnReroute() throws InterruptedException {
|
||||
final String index = "test";
|
||||
final ShardId shardId = new ShardId(index, "_na_", 0);
|
||||
// no replicas in order to skip the replication part
|
||||
setState(clusterService, stateWithActivePrimary(index, true, randomInt(3)));
|
||||
logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint());
|
||||
Request request = new Request(shardId);
|
||||
boolean timeout = randomBoolean();
|
||||
if (timeout) {
|
||||
request.timeout("0s");
|
||||
} else {
|
||||
request.timeout("1h");
|
||||
}
|
||||
PlainActionFuture<Response> listener = new PlainActionFuture<>();
|
||||
ReplicationTask task = maybeTask();
|
||||
|
||||
TransportReplicationAction.ReroutePhase reroutePhase = action.new ReroutePhase(task, request, listener);
|
||||
reroutePhase.run();
|
||||
CapturingTransport.CapturedRequest[] capturedRequests = transport.getCapturedRequestsAndClear();
|
||||
assertThat(capturedRequests, arrayWithSize(1));
|
||||
assertThat(capturedRequests[0].action, equalTo("testAction[p]"));
|
||||
assertPhase(task, "waiting_on_primary");
|
||||
transport.handleRemoteError(capturedRequests[0].requestId, randomRetryPrimaryException(shardId));
|
||||
|
||||
|
||||
if (timeout) {
|
||||
// we always try at least one more time on timeout
|
||||
assertThat(listener.isDone(), equalTo(false));
|
||||
capturedRequests = transport.getCapturedRequestsAndClear();
|
||||
assertThat(capturedRequests, arrayWithSize(1));
|
||||
assertThat(capturedRequests[0].action, equalTo("testAction[p]"));
|
||||
assertPhase(task, "waiting_on_primary");
|
||||
transport.handleRemoteError(capturedRequests[0].requestId, randomRetryPrimaryException(shardId));
|
||||
assertListenerThrows("must throw index not found exception", listener, ElasticsearchException.class);
|
||||
assertPhase(task, "failed");
|
||||
} else {
|
||||
assertThat(listener.isDone(), equalTo(false));
|
||||
// generate a CS change
|
||||
setState(clusterService, clusterService.state());
|
||||
capturedRequests = transport.getCapturedRequestsAndClear();
|
||||
assertThat(capturedRequests, arrayWithSize(1));
|
||||
assertThat(capturedRequests[0].action, equalTo("testAction[p]"));
|
||||
}
|
||||
}
|
||||
|
||||
private ElasticsearchException randomRetryPrimaryException(ShardId shardId) {
|
||||
return randomFrom(
|
||||
new ShardNotFoundException(shardId),
|
||||
new IndexNotFoundException(shardId.getIndex()),
|
||||
new IndexShardClosedException(shardId),
|
||||
new EngineClosedException(shardId),
|
||||
new TransportReplicationAction.RetryOnPrimaryException(shardId, "hello")
|
||||
);
|
||||
}
|
||||
|
||||
public void testRoutePhaseExecutesRequest() {
|
||||
final String index = "test";
|
||||
final ShardId shardId = new ShardId(index, "_na_", 0);
|
||||
|
@ -449,7 +506,7 @@ public class TransportReplicationActionTests extends ESTestCase {
|
|||
PlainActionFuture<Response> listener = new PlainActionFuture<>();
|
||||
ReplicationTask task = maybeTask();
|
||||
TransportReplicationAction<Request, Request, Response>.PrimaryPhase primaryPhase = actionWithRelocatingReplicasAfterPrimaryOp.new PrimaryPhase(
|
||||
task, request, createTransportChannel(listener));
|
||||
task, request, createTransportChannel(listener));
|
||||
primaryPhase.run();
|
||||
assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true));
|
||||
ShardRouting relocatingReplicaShard = stateWithRelocatingReplica.getRoutingTable().shardRoutingTable(index, shardId.id()).replicaShards().get(0);
|
||||
|
@ -485,7 +542,7 @@ public class TransportReplicationActionTests extends ESTestCase {
|
|||
PlainActionFuture<Response> listener = new PlainActionFuture<>();
|
||||
ReplicationTask task = maybeTask();
|
||||
TransportReplicationAction<Request, Request, Response>.PrimaryPhase primaryPhase = actionWithDeletedIndexAfterPrimaryOp.new PrimaryPhase(
|
||||
task, request, createTransportChannel(listener));
|
||||
task, request, createTransportChannel(listener));
|
||||
primaryPhase.run();
|
||||
assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true));
|
||||
assertThat("replication phase should be skipped if index gets deleted after primary operation", transport.capturedRequestsByTargetNode().size(), equalTo(0));
|
||||
|
@ -529,8 +586,8 @@ public class TransportReplicationActionTests extends ESTestCase {
|
|||
|
||||
setState(clusterService, state(index, true, ShardRoutingState.STARTED, replicaStates));
|
||||
logger.debug("using consistency level of [{}], assigned shards [{}], total shards [{}]. expecting op to [{}]. using state: \n{}",
|
||||
request.consistencyLevel(), 1 + assignedReplicas, 1 + assignedReplicas + unassignedReplicas, passesWriteConsistency ? "succeed" : "retry",
|
||||
clusterService.state().prettyPrint());
|
||||
request.consistencyLevel(), 1 + assignedReplicas, 1 + assignedReplicas + unassignedReplicas, passesWriteConsistency ? "succeed" : "retry",
|
||||
clusterService.state().prettyPrint());
|
||||
|
||||
final IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id());
|
||||
PlainActionFuture<Response> listener = new PlainActionFuture<>();
|
||||
|
@ -646,7 +703,7 @@ public class TransportReplicationActionTests extends ESTestCase {
|
|||
|
||||
TransportChannel channel = createTransportChannel(listener, error::set);
|
||||
TransportReplicationAction<Request, Request, Response>.ReplicationPhase replicationPhase =
|
||||
action.new ReplicationPhase(task, request, new Response(), request.shardId(), channel, reference);
|
||||
action.new ReplicationPhase(task, request, new Response(), request.shardId(), channel, reference);
|
||||
|
||||
assertThat(replicationPhase.totalShards(), equalTo(totalShards));
|
||||
assertThat(replicationPhase.pending(), equalTo(assignedReplicas));
|
||||
|
@ -656,7 +713,7 @@ public class TransportReplicationActionTests extends ESTestCase {
|
|||
|
||||
HashMap<String, Request> nodesSentTo = new HashMap<>();
|
||||
boolean executeOnReplica =
|
||||
action.shouldExecuteReplication(clusterService.state().getMetaData().index(shardId.getIndex()).getSettings());
|
||||
action.shouldExecuteReplication(clusterService.state().getMetaData().index(shardId.getIndex()).getSettings());
|
||||
for (CapturingTransport.CapturedRequest capturedRequest : capturedRequests) {
|
||||
// no duplicate requests
|
||||
Request replicationRequest = (Request) capturedRequest.request;
|
||||
|
@ -819,7 +876,7 @@ public class TransportReplicationActionTests extends ESTestCase {
|
|||
final ShardId shardId = new ShardId(index, "_na_", 0);
|
||||
// one replica to make sure replication is attempted
|
||||
setState(clusterService, state(index, true,
|
||||
ShardRoutingState.STARTED, ShardRoutingState.STARTED));
|
||||
ShardRoutingState.STARTED, ShardRoutingState.STARTED));
|
||||
ShardRouting primaryShard = clusterService.state().routingTable().shardRoutingTable(shardId).primaryShard();
|
||||
indexShardRouting.set(primaryShard);
|
||||
logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint());
|
||||
|
@ -856,7 +913,7 @@ public class TransportReplicationActionTests extends ESTestCase {
|
|||
public void testReplicasCounter() throws Exception {
|
||||
final ShardId shardId = new ShardId("test", "_na_", 0);
|
||||
setState(clusterService, state(shardId.getIndexName(), true,
|
||||
ShardRoutingState.STARTED, ShardRoutingState.STARTED));
|
||||
ShardRoutingState.STARTED, ShardRoutingState.STARTED));
|
||||
action = new ActionWithDelay(Settings.EMPTY, "testActionWithExceptions", transportService, clusterService, threadPool);
|
||||
final Action.ReplicaOperationTransportHandler replicaOperationTransportHandler = action.new ReplicaOperationTransportHandler();
|
||||
final ReplicationTask task = maybeTask();
|
||||
|
@ -895,7 +952,7 @@ public class TransportReplicationActionTests extends ESTestCase {
|
|||
final String index = "test";
|
||||
final ShardId shardId = new ShardId(index, "_na_", 0);
|
||||
setState(clusterService, state(index, true,
|
||||
ShardRoutingState.STARTED, ShardRoutingState.STARTED));
|
||||
ShardRoutingState.STARTED, ShardRoutingState.STARTED));
|
||||
logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint());
|
||||
Request request = new Request(shardId).timeout("100ms");
|
||||
PlainActionFuture<Response> listener = new PlainActionFuture<>();
|
||||
|
@ -915,7 +972,7 @@ public class TransportReplicationActionTests extends ESTestCase {
|
|||
final ShardId shardId = new ShardId(index, "_na_", 0);
|
||||
boolean localPrimary = true;
|
||||
setState(clusterService, state(index, localPrimary,
|
||||
ShardRoutingState.STARTED, ShardRoutingState.STARTED));
|
||||
ShardRoutingState.STARTED, ShardRoutingState.STARTED));
|
||||
Action action = new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) {
|
||||
@Override
|
||||
protected void resolveRequest(MetaData metaData, String concreteIndex, Request request) {
|
||||
|
@ -967,7 +1024,7 @@ public class TransportReplicationActionTests extends ESTestCase {
|
|||
// publish a new cluster state
|
||||
boolean localPrimaryOnRetry = randomBoolean();
|
||||
setState(clusterService, state(index, localPrimaryOnRetry,
|
||||
ShardRoutingState.STARTED, ShardRoutingState.STARTED));
|
||||
ShardRoutingState.STARTED, ShardRoutingState.STARTED));
|
||||
CapturingTransport.CapturedRequest[] primaryRetry = transport.getCapturedRequestsAndClear();
|
||||
|
||||
// the request should be retried
|
||||
|
@ -1083,8 +1140,8 @@ public class TransportReplicationActionTests extends ESTestCase {
|
|||
ClusterService clusterService,
|
||||
ThreadPool threadPool) {
|
||||
super(settings, actionName, transportService, clusterService, null, threadPool,
|
||||
new ShardStateAction(settings, clusterService, transportService, null, null, threadPool),
|
||||
new ActionFilters(new HashSet<ActionFilter>()), new IndexNameExpressionResolver(Settings.EMPTY), Request::new, Request::new, ThreadPool.Names.SAME);
|
||||
new ShardStateAction(settings, clusterService, transportService, null, null, threadPool),
|
||||
new ActionFilters(new HashSet<ActionFilter>()), new IndexNameExpressionResolver(Settings.EMPTY), Request::new, Request::new, ThreadPool.Names.SAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.elasticsearch.common.unit.ByteSizeValue;
|
|||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.hamcrest.CollectionAssertions;
|
||||
import org.junit.Before;
|
||||
|
@ -145,7 +146,9 @@ public class SimpleClusterStateIT extends ESIntegTestCase {
|
|||
int numberOfShards = scaledRandomIntBetween(1, cluster().numDataNodes());
|
||||
// if the create index is ack'ed, then all nodes have successfully processed the cluster state
|
||||
assertAcked(client().admin().indices().prepareCreate("test")
|
||||
.setSettings(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numberOfShards, IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
.setSettings(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numberOfShards,
|
||||
IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0,
|
||||
MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), Long.MAX_VALUE)
|
||||
.addMapping("type", mapping)
|
||||
.setTimeout("60s").get());
|
||||
ensureGreen(); // wait for green state, so its both green, and there are no more pending events
|
||||
|
|
|
@ -30,15 +30,21 @@ import org.apache.lucene.util.BytesRef;
|
|||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.lucene.search.Queries;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.mapper.MapperService.MergeReason;
|
||||
import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
import org.junit.Rule;
|
||||
import org.junit.rules.ExpectedException;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.UncheckedIOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.function.Function;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.containsString;
|
||||
|
@ -135,4 +141,24 @@ public class MapperServiceTests extends ESSingleNodeTestCase {
|
|||
assertFalse(indexService.mapperService().hasMapping(MapperService.DEFAULT_MAPPING));
|
||||
}
|
||||
|
||||
public void testTotalFieldsExceedsLimit() throws Throwable {
|
||||
Function<String, String> mapping = type -> {
|
||||
try {
|
||||
return XContentFactory.jsonBuilder().startObject().startObject(type).startObject("properties")
|
||||
.startObject("field1").field("type", "string")
|
||||
.endObject().endObject().endObject().endObject().string();
|
||||
} catch (IOException e) {
|
||||
throw new UncheckedIOException(e);
|
||||
}
|
||||
};
|
||||
createIndex("test1").mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE, false);
|
||||
//set total number of fields to 1 to trigger an exception
|
||||
try {
|
||||
createIndex("test2", Settings.builder().put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), 1).build())
|
||||
.mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE, false);
|
||||
fail("Expected IllegalArgumentException");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), containsString("Limit of total fields [1] in index [test2] has been exceeded"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -125,6 +125,8 @@ public class PercolatorQueryCacheTests extends ESTestCase {
|
|||
IndexWriter indexWriter = new IndexWriter(
|
||||
directory,
|
||||
newIndexWriterConfig(new MockAnalyzer(random()))
|
||||
.setMergePolicy(NoMergePolicy.INSTANCE)
|
||||
.setMaxBufferedDocs(16)
|
||||
);
|
||||
|
||||
boolean legacyFormat = randomBoolean();
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.apache.lucene.queries.BlendedTermQuery;
|
|||
import org.apache.lucene.queries.CommonTermsQuery;
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.Explanation;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||
import org.apache.lucene.search.PhraseQuery;
|
||||
|
@ -60,6 +61,7 @@ import java.util.HashMap;
|
|||
import java.util.Map;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
public class PercolatorQueryTests extends ESTestCase {
|
||||
|
||||
|
@ -147,10 +149,38 @@ public class PercolatorQueryTests extends ESTestCase {
|
|||
assertThat(topDocs.totalHits, equalTo(5));
|
||||
assertThat(topDocs.scoreDocs.length, equalTo(5));
|
||||
assertThat(topDocs.scoreDocs[0].doc, equalTo(0));
|
||||
Explanation explanation = shardSearcher.explain(builder.build(), 0);
|
||||
assertThat(explanation.isMatch(), is(true));
|
||||
assertThat(explanation.getValue(), equalTo(topDocs.scoreDocs[0].score));
|
||||
|
||||
explanation = shardSearcher.explain(builder.build(), 1);
|
||||
assertThat(explanation.isMatch(), is(false));
|
||||
|
||||
assertThat(topDocs.scoreDocs[1].doc, equalTo(2));
|
||||
explanation = shardSearcher.explain(builder.build(), 2);
|
||||
assertThat(explanation.isMatch(), is(true));
|
||||
assertThat(explanation.getValue(), equalTo(topDocs.scoreDocs[1].score));
|
||||
|
||||
assertThat(topDocs.scoreDocs[2].doc, equalTo(3));
|
||||
explanation = shardSearcher.explain(builder.build(), 3);
|
||||
assertThat(explanation.isMatch(), is(true));
|
||||
assertThat(explanation.getValue(), equalTo(topDocs.scoreDocs[2].score));
|
||||
|
||||
explanation = shardSearcher.explain(builder.build(), 4);
|
||||
assertThat(explanation.isMatch(), is(false));
|
||||
|
||||
assertThat(topDocs.scoreDocs[3].doc, equalTo(5));
|
||||
explanation = shardSearcher.explain(builder.build(), 5);
|
||||
assertThat(explanation.isMatch(), is(true));
|
||||
assertThat(explanation.getValue(), equalTo(topDocs.scoreDocs[3].score));
|
||||
|
||||
explanation = shardSearcher.explain(builder.build(), 6);
|
||||
assertThat(explanation.isMatch(), is(false));
|
||||
|
||||
assertThat(topDocs.scoreDocs[4].doc, equalTo(7));
|
||||
explanation = shardSearcher.explain(builder.build(), 7);
|
||||
assertThat(explanation.isMatch(), is(true));
|
||||
assertThat(explanation.getValue(), equalTo(topDocs.scoreDocs[4].score));
|
||||
}
|
||||
|
||||
public void testDuel() throws Exception {
|
||||
|
@ -236,11 +266,14 @@ public class PercolatorQueryTests extends ESTestCase {
|
|||
new MatchAllDocsQuery()
|
||||
);
|
||||
TopDocs topDocs2 = shardSearcher.search(builder2.build(), 10);
|
||||
|
||||
assertThat(topDocs1.totalHits, equalTo(topDocs2.totalHits));
|
||||
assertThat(topDocs1.scoreDocs.length, equalTo(topDocs2.scoreDocs.length));
|
||||
for (int j = 0; j < topDocs1.scoreDocs.length; j++) {
|
||||
assertThat(topDocs1.scoreDocs[j].doc, equalTo(topDocs2.scoreDocs[j].doc));
|
||||
assertThat(topDocs1.scoreDocs[j].score, equalTo(topDocs2.scoreDocs[j].score));
|
||||
Explanation explain1 = shardSearcher.explain(builder1.build(), topDocs1.scoreDocs[j].doc);
|
||||
Explanation explain2 = shardSearcher.explain(builder2.build(), topDocs2.scoreDocs[j].doc);
|
||||
assertThat(explain1.toHtml(), equalTo(explain2.toHtml()));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.ingest.processor;
|
|||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.ingest.core.AbstractProcessorFactory;
|
||||
import org.elasticsearch.ingest.core.Processor;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.hamcrest.Matchers;
|
||||
|
||||
|
@ -44,6 +43,7 @@ public class ConvertProcessorFactoryTests extends ESTestCase {
|
|||
ConvertProcessor convertProcessor = factory.create(config);
|
||||
assertThat(convertProcessor.getTag(), equalTo(processorTag));
|
||||
assertThat(convertProcessor.getField(), equalTo("field1"));
|
||||
assertThat(convertProcessor.getTargetField(), equalTo("field1"));
|
||||
assertThat(convertProcessor.getConvertType(), equalTo(type));
|
||||
}
|
||||
|
||||
|
@ -88,4 +88,20 @@ public class ConvertProcessorFactoryTests extends ESTestCase {
|
|||
assertThat(e.getMessage(), Matchers.equalTo("[type] required property is missing"));
|
||||
}
|
||||
}
|
||||
|
||||
public void testCreateWithExplicitTargetField() throws Exception {
|
||||
ConvertProcessor.Factory factory = new ConvertProcessor.Factory();
|
||||
Map<String, Object> config = new HashMap<>();
|
||||
ConvertProcessor.Type type = randomFrom(ConvertProcessor.Type.values());
|
||||
config.put("field", "field1");
|
||||
config.put("target_field", "field2");
|
||||
config.put("type", type.toString());
|
||||
String processorTag = randomAsciiOfLength(10);
|
||||
config.put(AbstractProcessorFactory.TAG_KEY, processorTag);
|
||||
ConvertProcessor convertProcessor = factory.create(config);
|
||||
assertThat(convertProcessor.getTag(), equalTo(processorTag));
|
||||
assertThat(convertProcessor.getField(), equalTo("field1"));
|
||||
assertThat(convertProcessor.getTargetField(), equalTo("field2"));
|
||||
assertThat(convertProcessor.getConvertType(), equalTo(type));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,6 +34,7 @@ import java.util.Map;
|
|||
import static org.elasticsearch.ingest.processor.ConvertProcessor.Type;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.sameInstance;
|
||||
|
||||
public class ConvertProcessorTests extends ESTestCase {
|
||||
|
||||
|
@ -41,7 +42,7 @@ public class ConvertProcessorTests extends ESTestCase {
|
|||
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random());
|
||||
int randomInt = randomInt();
|
||||
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, randomInt);
|
||||
Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, Type.INTEGER);
|
||||
Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.INTEGER);
|
||||
processor.execute(ingestDocument);
|
||||
assertThat(ingestDocument.getFieldValue(fieldName, Integer.class), equalTo(randomInt));
|
||||
}
|
||||
|
@ -57,7 +58,7 @@ public class ConvertProcessorTests extends ESTestCase {
|
|||
expectedList.add(randomInt);
|
||||
}
|
||||
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, fieldValue);
|
||||
Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, Type.INTEGER);
|
||||
Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.INTEGER);
|
||||
processor.execute(ingestDocument);
|
||||
assertThat(ingestDocument.getFieldValue(fieldName, List.class), equalTo(expectedList));
|
||||
}
|
||||
|
@ -68,7 +69,7 @@ public class ConvertProcessorTests extends ESTestCase {
|
|||
String value = "string-" + randomAsciiOfLengthBetween(1, 10);
|
||||
ingestDocument.setFieldValue(fieldName, value);
|
||||
|
||||
Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, Type.INTEGER);
|
||||
Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.INTEGER);
|
||||
try {
|
||||
processor.execute(ingestDocument);
|
||||
fail("processor execute should have failed");
|
||||
|
@ -84,7 +85,7 @@ public class ConvertProcessorTests extends ESTestCase {
|
|||
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, randomFloat);
|
||||
expectedResult.put(fieldName, randomFloat);
|
||||
|
||||
Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, Type.FLOAT);
|
||||
Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.FLOAT);
|
||||
processor.execute(ingestDocument);
|
||||
assertThat(ingestDocument.getFieldValue(fieldName, Float.class), equalTo(randomFloat));
|
||||
}
|
||||
|
@ -100,7 +101,7 @@ public class ConvertProcessorTests extends ESTestCase {
|
|||
expectedList.add(randomFloat);
|
||||
}
|
||||
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, fieldValue);
|
||||
Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, Type.FLOAT);
|
||||
Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.FLOAT);
|
||||
processor.execute(ingestDocument);
|
||||
assertThat(ingestDocument.getFieldValue(fieldName, List.class), equalTo(expectedList));
|
||||
}
|
||||
|
@ -111,7 +112,7 @@ public class ConvertProcessorTests extends ESTestCase {
|
|||
String value = "string-" + randomAsciiOfLengthBetween(1, 10);
|
||||
ingestDocument.setFieldValue(fieldName, value);
|
||||
|
||||
Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, Type.FLOAT);
|
||||
Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.FLOAT);
|
||||
try {
|
||||
processor.execute(ingestDocument);
|
||||
fail("processor execute should have failed");
|
||||
|
@ -129,7 +130,7 @@ public class ConvertProcessorTests extends ESTestCase {
|
|||
}
|
||||
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, booleanString);
|
||||
|
||||
Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, Type.BOOLEAN);
|
||||
Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.BOOLEAN);
|
||||
processor.execute(ingestDocument);
|
||||
assertThat(ingestDocument.getFieldValue(fieldName, Boolean.class), equalTo(randomBoolean));
|
||||
}
|
||||
|
@ -149,7 +150,7 @@ public class ConvertProcessorTests extends ESTestCase {
|
|||
expectedList.add(randomBoolean);
|
||||
}
|
||||
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, fieldValue);
|
||||
Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, Type.BOOLEAN);
|
||||
Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.BOOLEAN);
|
||||
processor.execute(ingestDocument);
|
||||
assertThat(ingestDocument.getFieldValue(fieldName, List.class), equalTo(expectedList));
|
||||
}
|
||||
|
@ -166,7 +167,7 @@ public class ConvertProcessorTests extends ESTestCase {
|
|||
}
|
||||
ingestDocument.setFieldValue(fieldName, fieldValue);
|
||||
|
||||
Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, Type.BOOLEAN);
|
||||
Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.BOOLEAN);
|
||||
try {
|
||||
processor.execute(ingestDocument);
|
||||
fail("processor execute should have failed");
|
||||
|
@ -200,7 +201,7 @@ public class ConvertProcessorTests extends ESTestCase {
|
|||
}
|
||||
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, fieldValue);
|
||||
|
||||
Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, Type.STRING);
|
||||
Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.STRING);
|
||||
processor.execute(ingestDocument);
|
||||
assertThat(ingestDocument.getFieldValue(fieldName, String.class), equalTo(expectedFieldValue));
|
||||
}
|
||||
|
@ -236,7 +237,7 @@ public class ConvertProcessorTests extends ESTestCase {
|
|||
expectedList.add(randomValueString);
|
||||
}
|
||||
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, fieldValue);
|
||||
Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, Type.STRING);
|
||||
Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.STRING);
|
||||
processor.execute(ingestDocument);
|
||||
assertThat(ingestDocument.getFieldValue(fieldName, List.class), equalTo(expectedList));
|
||||
}
|
||||
|
@ -245,7 +246,7 @@ public class ConvertProcessorTests extends ESTestCase {
|
|||
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>());
|
||||
String fieldName = RandomDocumentPicks.randomFieldName(random());
|
||||
Type type = randomFrom(Type.values());
|
||||
Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, type);
|
||||
Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, type);
|
||||
try {
|
||||
processor.execute(ingestDocument);
|
||||
fail("processor execute should have failed");
|
||||
|
@ -257,7 +258,7 @@ public class ConvertProcessorTests extends ESTestCase {
|
|||
public void testConvertNullField() throws Exception {
|
||||
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.singletonMap("field", null));
|
||||
Type type = randomFrom(Type.values());
|
||||
Processor processor = new ConvertProcessor(randomAsciiOfLength(10), "field", type);
|
||||
Processor processor = new ConvertProcessor(randomAsciiOfLength(10), "field", "field", type);
|
||||
try {
|
||||
processor.execute(ingestDocument);
|
||||
fail("processor execute should have failed");
|
||||
|
@ -265,4 +266,81 @@ public class ConvertProcessorTests extends ESTestCase {
|
|||
assertThat(e.getMessage(), equalTo("Field [field] is null, cannot be converted to type [" + type + "]"));
|
||||
}
|
||||
}
|
||||
|
||||
public void testAutoConvertNotString() throws Exception {
|
||||
Object randomValue;
|
||||
switch(randomIntBetween(0, 2)) {
|
||||
case 0:
|
||||
float randomFloat = randomFloat();
|
||||
randomValue = randomFloat;
|
||||
break;
|
||||
case 1:
|
||||
int randomInt = randomInt();
|
||||
randomValue = randomInt;
|
||||
break;
|
||||
case 2:
|
||||
boolean randomBoolean = randomBoolean();
|
||||
randomValue = randomBoolean;
|
||||
break;
|
||||
default:
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.singletonMap("field", randomValue));
|
||||
Processor processor = new ConvertProcessor(randomAsciiOfLength(10), "field", "field", Type.AUTO);
|
||||
processor.execute(ingestDocument);
|
||||
Object convertedValue = ingestDocument.getFieldValue("field", Object.class);
|
||||
assertThat(convertedValue, sameInstance(randomValue));
|
||||
}
|
||||
|
||||
public void testAutoConvertStringNotMatched() throws Exception {
|
||||
String value = "notAnIntFloatOrBool";
|
||||
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.singletonMap("field", value));
|
||||
Processor processor = new ConvertProcessor(randomAsciiOfLength(10), "field", "field", Type.AUTO);
|
||||
processor.execute(ingestDocument);
|
||||
Object convertedValue = ingestDocument.getFieldValue("field", Object.class);
|
||||
assertThat(convertedValue, sameInstance(value));
|
||||
}
|
||||
|
||||
public void testAutoConvertMatchBoolean() throws Exception {
|
||||
boolean randomBoolean = randomBoolean();
|
||||
String booleanString = Boolean.toString(randomBoolean);
|
||||
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(),
|
||||
Collections.singletonMap("field", booleanString));
|
||||
Processor processor = new ConvertProcessor(randomAsciiOfLength(10), "field", "field", Type.AUTO);
|
||||
processor.execute(ingestDocument);
|
||||
Object convertedValue = ingestDocument.getFieldValue("field", Object.class);
|
||||
assertThat(convertedValue, equalTo(randomBoolean));
|
||||
}
|
||||
|
||||
public void testAutoConvertMatchInteger() throws Exception {
|
||||
int randomInt = randomInt();
|
||||
String randomString = Integer.toString(randomInt);
|
||||
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.singletonMap("field", randomString));
|
||||
Processor processor = new ConvertProcessor(randomAsciiOfLength(10), "field", "field", Type.AUTO);
|
||||
processor.execute(ingestDocument);
|
||||
Object convertedValue = ingestDocument.getFieldValue("field", Object.class);
|
||||
assertThat(convertedValue, equalTo(randomInt));
|
||||
}
|
||||
|
||||
public void testAutoConvertMatchFloat() throws Exception {
|
||||
float randomFloat = randomFloat();
|
||||
String randomString = Float.toString(randomFloat);
|
||||
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.singletonMap("field", randomString));
|
||||
Processor processor = new ConvertProcessor(randomAsciiOfLength(10), "field", "field", Type.AUTO);
|
||||
processor.execute(ingestDocument);
|
||||
Object convertedValue = ingestDocument.getFieldValue("field", Object.class);
|
||||
assertThat(convertedValue, equalTo(randomFloat));
|
||||
}
|
||||
|
||||
public void testTargetField() throws Exception {
|
||||
IngestDocument ingestDocument = new IngestDocument(new HashMap<>(), new HashMap<>());
|
||||
int randomInt = randomInt();
|
||||
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, String.valueOf(randomInt));
|
||||
String targetField = fieldName + randomAsciiOfLength(5);
|
||||
Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, targetField, Type.INTEGER);
|
||||
processor.execute(ingestDocument);
|
||||
assertThat(ingestDocument.getFieldValue(fieldName, String.class), equalTo(String.valueOf(randomInt)));
|
||||
assertThat(ingestDocument.getFieldValue(targetField, Integer.class), equalTo(randomInt));
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -84,4 +84,18 @@ public class GsubProcessorFactoryTests extends ESTestCase {
|
|||
assertThat(e.getMessage(), equalTo("[replacement] required property is missing"));
|
||||
}
|
||||
}
|
||||
|
||||
public void testCreateInvalidPattern() throws Exception {
|
||||
GsubProcessor.Factory factory = new GsubProcessor.Factory();
|
||||
Map<String, Object> config = new HashMap<>();
|
||||
config.put("field", "field1");
|
||||
config.put("pattern", "[");
|
||||
config.put("replacement", "-");
|
||||
try {
|
||||
factory.create(config);
|
||||
fail("factory create should have failed");
|
||||
} catch(ElasticsearchParseException e) {
|
||||
assertThat(e.getMessage(), equalTo("[pattern] Invalid regex pattern. Unclosed character class near index 0\n[\n^"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,8 +31,11 @@ import org.elasticsearch.script.ScriptEngineService;
|
|||
import org.elasticsearch.script.ScriptModule;
|
||||
import org.elasticsearch.script.ScriptService.ScriptType;
|
||||
import org.elasticsearch.script.SearchScript;
|
||||
import org.elasticsearch.search.aggregations.bucket.filter.Filter;
|
||||
import org.elasticsearch.search.aggregations.bucket.global.Global;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order;
|
||||
import org.elasticsearch.search.aggregations.metrics.avg.Avg;
|
||||
import org.elasticsearch.search.lookup.LeafSearchLookup;
|
||||
import org.elasticsearch.search.lookup.SearchLookup;
|
||||
|
@ -47,9 +50,12 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.avg;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.filter;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.global;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
@ -317,6 +323,36 @@ public class AvgIT extends AbstractNumericTestCase {
|
|||
assertThat(avg.getValue(), equalTo((double) (3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11+11+12+12+13) / 20));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void testOrderByEmptyAggregation() throws Exception {
|
||||
SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery())
|
||||
.addAggregation(terms("terms").field("value").order(Order.compound(Order.aggregation("filter>avg", true)))
|
||||
.subAggregation(filter("filter", termQuery("value", 100)).subAggregation(avg("avg").field("value"))))
|
||||
.get();
|
||||
|
||||
assertHitCount(searchResponse, 10);
|
||||
|
||||
Terms terms = searchResponse.getAggregations().get("terms");
|
||||
assertThat(terms, notNullValue());
|
||||
List<Terms.Bucket> buckets = terms.getBuckets();
|
||||
assertThat(buckets, notNullValue());
|
||||
assertThat(buckets.size(), equalTo(10));
|
||||
|
||||
for (int i = 0; i < 10; i++) {
|
||||
Terms.Bucket bucket = buckets.get(i);
|
||||
assertThat(bucket, notNullValue());
|
||||
assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1));
|
||||
assertThat(bucket.getDocCount(), equalTo(1L));
|
||||
Filter filter = bucket.getAggregations().get("filter");
|
||||
assertThat(filter, notNullValue());
|
||||
assertThat(filter.getDocCount(), equalTo(0L));
|
||||
Avg avg = filter.getAggregations().get("avg");
|
||||
assertThat(avg, notNullValue());
|
||||
assertThat(avg.value(), equalTo(Double.NaN));
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Mock plugin for the {@link ExtractFieldScriptEngine}
|
||||
*/
|
||||
|
|
|
@ -31,8 +31,11 @@ import org.elasticsearch.script.ScriptEngineService;
|
|||
import org.elasticsearch.script.ScriptModule;
|
||||
import org.elasticsearch.script.ScriptService.ScriptType;
|
||||
import org.elasticsearch.script.SearchScript;
|
||||
import org.elasticsearch.search.aggregations.bucket.filter.Filter;
|
||||
import org.elasticsearch.search.aggregations.bucket.global.Global;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order;
|
||||
import org.elasticsearch.search.aggregations.metrics.sum.Sum;
|
||||
import org.elasticsearch.search.lookup.LeafSearchLookup;
|
||||
import org.elasticsearch.search.lookup.SearchLookup;
|
||||
|
@ -47,9 +50,12 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.filter;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.global;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.sum;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
|
@ -312,6 +318,36 @@ public class SumIT extends AbstractNumericTestCase {
|
|||
assertThat(sum.getValue(), equalTo((double) 2 + 3 + 3 + 4 + 4 + 5 + 5 + 6 + 6 + 7 + 7 + 8 + 8 + 9 + 9 + 10 + 10 + 11 + 11 + 12));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void testOrderByEmptyAggregation() throws Exception {
|
||||
SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery())
|
||||
.addAggregation(terms("terms").field("value").order(Order.compound(Order.aggregation("filter>sum", true)))
|
||||
.subAggregation(filter("filter", termQuery("value", 100)).subAggregation(sum("sum").field("value"))))
|
||||
.get();
|
||||
|
||||
assertHitCount(searchResponse, 10);
|
||||
|
||||
Terms terms = searchResponse.getAggregations().get("terms");
|
||||
assertThat(terms, notNullValue());
|
||||
List<Terms.Bucket> buckets = terms.getBuckets();
|
||||
assertThat(buckets, notNullValue());
|
||||
assertThat(buckets.size(), equalTo(10));
|
||||
|
||||
for (int i = 0; i < 10; i++) {
|
||||
Terms.Bucket bucket = buckets.get(i);
|
||||
assertThat(bucket, notNullValue());
|
||||
assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1));
|
||||
assertThat(bucket.getDocCount(), equalTo(1L));
|
||||
Filter filter = bucket.getAggregations().get("filter");
|
||||
assertThat(filter, notNullValue());
|
||||
assertThat(filter.getDocCount(), equalTo(0L));
|
||||
Sum sum = filter.getAggregations().get("sum");
|
||||
assertThat(sum, notNullValue());
|
||||
assertThat(sum.value(), equalTo(0.0));
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Mock plugin for the {@link ExtractFieldScriptEngine}
|
||||
*/
|
||||
|
|
|
@ -45,7 +45,7 @@ include::cluster/nodes-stats.asciidoc[]
|
|||
|
||||
include::cluster/nodes-info.asciidoc[]
|
||||
|
||||
include::cluster/nodes-task.asciidoc[]
|
||||
include::cluster/tasks.asciidoc[]
|
||||
|
||||
include::cluster/nodes-hot-threads.asciidoc[]
|
||||
|
||||
|
|
|
@ -1,49 +0,0 @@
|
|||
[[nodes-task]]
|
||||
== Nodes Task API
|
||||
|
||||
The nodes task management API retrieves information about the tasks currently
|
||||
executing on one or more nodes in the cluster.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /_tasks <1>
|
||||
GET /_tasks/nodeId1,nodeId2 <2>
|
||||
GET /_tasks/nodeId1,nodeId2/cluster:* <3>
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
<1> Retrieves all tasks currently running on all nodes in the cluster.
|
||||
<2> Retrieves all tasks running on nodes `nodeId1` and `nodeId2`. See <<cluster-nodes>> for more info about how to select individual nodes.
|
||||
<3> Retrieves all cluster-related tasks running on nodes `nodeId1` and `nodeId2`.
|
||||
|
||||
The result will look similar to the following:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"nodes": {
|
||||
"fDlEl7PrQi6F-awHZ3aaDw": {
|
||||
"name": "Gazer",
|
||||
"transport_address": "127.0.0.1:9300",
|
||||
"host": "127.0.0.1",
|
||||
"ip": "127.0.0.1:9300",
|
||||
"tasks": [
|
||||
{
|
||||
"node": "fDlEl7PrQi6F-awHZ3aaDw",
|
||||
"id": 105,
|
||||
"type": "transport",
|
||||
"action": "cluster:monitor/nodes/tasks"
|
||||
},
|
||||
{
|
||||
"node": "fDlEl7PrQi6F-awHZ3aaDw",
|
||||
"id": 106,
|
||||
"type": "direct",
|
||||
"action": "cluster:monitor/nodes/tasks[n]",
|
||||
"parent_node": "fDlEl7PrQi6F-awHZ3aaDw",
|
||||
"parent_id": 105
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
|
@ -5,6 +5,11 @@ The pending cluster tasks API returns a list of any cluster-level changes
|
|||
(e.g. create index, update mapping, allocate or fail shard) which have not yet
|
||||
been executed.
|
||||
|
||||
NOTE: This API returns a list of any pending updates to the cluster state. These are distinct from the tasks reported by the
|
||||
<<tasks,Task Management API>> which include periodic tasks and tasks initiated by the user, such as node stats, search queries, or create
|
||||
index requests. However, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task
|
||||
might be reported by both task api and pending cluster tasks API.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
$ curl -XGET 'http://localhost:9200/_cluster/pending_tasks'
|
||||
|
|
|
@ -0,0 +1,103 @@
|
|||
[[tasks]]
|
||||
== Task Management API
|
||||
|
||||
experimental[The Task Management API is new and should still be considered experimental. The API may change in ways that are not backwards compatible]
|
||||
|
||||
[float]
|
||||
=== Current Tasks Information
|
||||
|
||||
The task management API allows to retrieve information about the tasks currently
|
||||
executing on one or more nodes in the cluster.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /_tasks <1>
|
||||
GET /_tasks?nodes=nodeId1,nodeId2 <2>
|
||||
GET /_tasks?nodes=nodeId1,nodeId2&actions=cluster:* <3>
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
<1> Retrieves all tasks currently running on all nodes in the cluster.
|
||||
<2> Retrieves all tasks running on nodes `nodeId1` and `nodeId2`. See <<cluster-nodes>> for more info about how to select individual nodes.
|
||||
<3> Retrieves all cluster-related tasks running on nodes `nodeId1` and `nodeId2`.
|
||||
|
||||
The result will look similar to the following:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"nodes" : {
|
||||
"oTUltX4IQMOUUVeiohTt8A" : {
|
||||
"name" : "Tamara Rahn",
|
||||
"transport_address" : "127.0.0.1:9300",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1:9300",
|
||||
"tasks" : {
|
||||
"oTUltX4IQMOUUVeiohTt8A:124" : {
|
||||
"node" : "oTUltX4IQMOUUVeiohTt8A",
|
||||
"id" : 124,
|
||||
"type" : "direct",
|
||||
"action" : "cluster:monitor/tasks/lists[n]",
|
||||
"start_time_in_millis" : 1458585884904,
|
||||
"running_time_in_nanos" : 47402,
|
||||
"parent_task_id" : "oTUltX4IQMOUUVeiohTt8A:123"
|
||||
},
|
||||
"oTUltX4IQMOUUVeiohTt8A:123" : {
|
||||
"node" : "oTUltX4IQMOUUVeiohTt8A",
|
||||
"id" : 123,
|
||||
"type" : "transport",
|
||||
"action" : "cluster:monitor/tasks/lists",
|
||||
"start_time_in_millis" : 1458585884904,
|
||||
"running_time_in_nanos" : 236042
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
--------------------------------------------------
|
||||
|
||||
It is also possible to retrieve information for a particular task, or all children of a particular
|
||||
tasks using the following two commands:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /_tasks/taskId1
|
||||
GET /_tasks?parent_task_id=parentTaskId1
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
The task API can be also used to wait for completion of a particular task. The following call will
|
||||
block for 10 seconds or until the task with id `oTUltX4IQMOUUVeiohTt8A:12345` is completed.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /_tasks/oTUltX4IQMOUUVeiohTt8A:12345?wait_for_completion=true&timeout=10s
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
|
||||
[float]
|
||||
=== Task Cancellation
|
||||
|
||||
If a long-running task supports cancellation, it can be cancelled by the following command:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /_tasks/taskId1/_cancel
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
The task cancellation command supports the same task selection parameters as the list tasks command, so multiple tasks
|
||||
can be cancelled at the same time. For example, the following command will cancel all reindex tasks running on the
|
||||
nodes `nodeId1` and `nodeId2`.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /_tasks/_cancel?node_id=nodeId1,nodeId2&actions=*reindex
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
|
||||
|
||||
|
|
@ -1,6 +1,8 @@
|
|||
[[docs-reindex]]
|
||||
== Reindex API
|
||||
|
||||
experimental[The reindex API is new and should still be considered experimental. The API may change in ways that are not backwards compatible]
|
||||
|
||||
The most basic form of `_reindex` just copies documents from one index to another.
|
||||
This will copy documents from the `twitter` index into the `new_twitter` index:
|
||||
|
||||
|
@ -388,7 +390,7 @@ from aborting the operation.
|
|||
=== Works with the Task API
|
||||
|
||||
While Reindex is running you can fetch their status using the
|
||||
<<nodes-task,Nodes Task API>>:
|
||||
<<tasks,Task API>>:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
@ -411,24 +413,26 @@ The responses looks like:
|
|||
"testattr" : "test",
|
||||
"portsfile" : "true"
|
||||
},
|
||||
"tasks" : [ {
|
||||
"node" : "r1A2WoRbTwKZ516z6NEs5A",
|
||||
"id" : 36619,
|
||||
"type" : "transport",
|
||||
"action" : "indices:data/write/reindex",
|
||||
"status" : { <1>
|
||||
"total" : 6154,
|
||||
"updated" : 3500,
|
||||
"created" : 0,
|
||||
"deleted" : 0,
|
||||
"batches" : 36,
|
||||
"version_conflicts" : 0,
|
||||
"noops" : 0,
|
||||
"retries": 0,
|
||||
"throttled_millis": 0
|
||||
},
|
||||
"description" : ""
|
||||
} ]
|
||||
"tasks" : {
|
||||
"r1A2WoRbTwKZ516z6NEs5A:36619" : {
|
||||
"node" : "r1A2WoRbTwKZ516z6NEs5A",
|
||||
"id" : 36619,
|
||||
"type" : "transport",
|
||||
"action" : "indices:data/write/reindex",
|
||||
"status" : { <1>
|
||||
"total" : 6154,
|
||||
"updated" : 3500,
|
||||
"created" : 0,
|
||||
"deleted" : 0,
|
||||
"batches" : 36,
|
||||
"version_conflicts" : 0,
|
||||
"noops" : 0,
|
||||
"retries": 0,
|
||||
"throttled_millis": 0
|
||||
},
|
||||
"description" : ""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
[[docs-update-by-query]]
|
||||
== Update By Query API
|
||||
|
||||
experimental[The update-by-query API is new and should still be considered experimental. The API may change in ways that are not backwards compatible]
|
||||
|
||||
The simplest usage of `_update_by_query` just performs an update on every
|
||||
document in the index without changing the source. This is useful to
|
||||
<<picking-up-a-new-property,pick up a new property>> or some other online
|
||||
|
@ -233,11 +235,11 @@ from aborting the operation.
|
|||
=== Works with the Task API
|
||||
|
||||
While Update By Query is running you can fetch their status using the
|
||||
<<nodes-task,Nodes Task API>>:
|
||||
<<tasks,Task API>>:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /_tasks/?pretty&detailed=true&action=byquery
|
||||
POST /_tasks/?pretty&detailed=true&action=*byquery
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
|
@ -256,24 +258,26 @@ The responses looks like:
|
|||
"testattr" : "test",
|
||||
"portsfile" : "true"
|
||||
},
|
||||
"tasks" : [ {
|
||||
"node" : "r1A2WoRbTwKZ516z6NEs5A",
|
||||
"id" : 36619,
|
||||
"type" : "transport",
|
||||
"action" : "indices:data/write/update/byquery",
|
||||
"status" : { <1>
|
||||
"total" : 6154,
|
||||
"updated" : 3500,
|
||||
"created" : 0,
|
||||
"deleted" : 0,
|
||||
"batches" : 36,
|
||||
"version_conflicts" : 0,
|
||||
"noops" : 0,
|
||||
"retries": 0,
|
||||
"throttled_millis": 0
|
||||
},
|
||||
"description" : ""
|
||||
} ]
|
||||
"tasks" : {
|
||||
"r1A2WoRbTwKZ516z6NEs5A:36619" : {
|
||||
"node" : "r1A2WoRbTwKZ516z6NEs5A",
|
||||
"id" : 36619,
|
||||
"type" : "transport",
|
||||
"action" : "indices:data/write/update/byquery",
|
||||
"status" : { <1>
|
||||
"total" : 6154,
|
||||
"updated" : 3500,
|
||||
"created" : 0,
|
||||
"deleted" : 0,
|
||||
"batches" : 36,
|
||||
"version_conflicts" : 0,
|
||||
"noops" : 0,
|
||||
"retries": 0,
|
||||
"throttled_millis": 0
|
||||
},
|
||||
"description" : ""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -668,18 +668,25 @@ Accepts a single value or an array of values.
|
|||
Converts an existing field's value to a different type, such as converting a string to an integer.
|
||||
If the field value is an array, all members will be converted.
|
||||
|
||||
The supported types include: `integer`, `float`, `string`, and `boolean`.
|
||||
The supported types include: `integer`, `float`, `string`, `boolean`, and `auto`.
|
||||
|
||||
Specifying `boolean` will set the field to true if its string value is equal to `true` (ignore case), to
|
||||
false if its string value is equal to `false` (ignore case), or it will throw an exception otherwise.
|
||||
|
||||
Specifying `auto` will attempt to convert the string-valued `field` into the closest non-string type.
|
||||
For example, a field whose value is `"true"` will be converted to its respective boolean type: `true`. And
|
||||
a value of `"242.15"` will "automatically" be converted to `242.15` of type `float`. If a provided field cannot
|
||||
be appropriately converted, the Convert Processor will still process successfully and leave the field value as-is. In
|
||||
such a case, `target_field` will still be updated with the unconverted field value.
|
||||
|
||||
[[convert-options]]
|
||||
.Convert Options
|
||||
[options="header"]
|
||||
|======
|
||||
| Name | Required | Default | Description
|
||||
| `field` | yes | - | The field whose value is to be converted
|
||||
| `type` | yes | - | The type to convert the existing value to
|
||||
| Name | Required | Default | Description
|
||||
| `field` | yes | - | The field whose value is to be converted
|
||||
| `target_field` | no | `field` | The field to assign the converted value to, by default `field` is updated in-place
|
||||
| `type` | yes | - | The type to convert the existing value to
|
||||
|======
|
||||
|
||||
[source,js]
|
||||
|
|
|
@ -30,6 +30,12 @@ detected. All other datatypes must be mapped explicitly.
|
|||
Besides the options listed below, dynamic field mapping rules can be further
|
||||
customised with <<dynamic-templates,`dynamic_templates`>>.
|
||||
|
||||
[[total-fields-limit]]
|
||||
==== Total fields limit
|
||||
|
||||
To avoid mapping explosion, Index has a default limit of 1000 total number of fields.
|
||||
The default setting can be updated with `index.mapping.total_fields.limit`.
|
||||
|
||||
[[date-detection]]
|
||||
==== Date detection
|
||||
|
||||
|
|
|
@ -27,6 +27,8 @@ import org.elasticsearch.ingest.core.IngestDocument;
|
|||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.ingest.core.ConfigurationUtils.newConfigurationException;
|
||||
|
||||
public final class GrokProcessor extends AbstractProcessor {
|
||||
|
||||
public static final String TYPE = "grok";
|
||||
|
@ -82,7 +84,12 @@ public final class GrokProcessor extends AbstractProcessor {
|
|||
patternBank.putAll(customPatternBank);
|
||||
}
|
||||
|
||||
Grok grok = new Grok(patternBank, matchPattern);
|
||||
Grok grok;
|
||||
try {
|
||||
grok = new Grok(patternBank, matchPattern);
|
||||
} catch (Exception e) {
|
||||
throw newConfigurationException(TYPE, processorTag, "pattern", "Invalid regex pattern. " + e.getMessage());
|
||||
}
|
||||
return new GrokProcessor(processorTag, grok, matchField);
|
||||
}
|
||||
|
||||
|
|
|
@ -84,4 +84,33 @@ public class GrokProcessorFactoryTests extends ESTestCase {
|
|||
assertThat(processor.getGrok(), notNullValue());
|
||||
assertThat(processor.getGrok().match("foo!"), equalTo(true));
|
||||
}
|
||||
|
||||
public void testCreateWithInvalidPattern() throws Exception {
|
||||
GrokProcessor.Factory factory = new GrokProcessor.Factory(Collections.emptyMap());
|
||||
Map<String, Object> config = new HashMap<>();
|
||||
config.put("field", "_field");
|
||||
config.put("pattern", "[");
|
||||
try {
|
||||
factory.create(config);
|
||||
fail("should fail");
|
||||
} catch (ElasticsearchParseException e) {
|
||||
assertThat(e.getMessage(), equalTo("[pattern] Invalid regex pattern. premature end of char-class"));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public void testCreateWithInvalidPatternDefinition() throws Exception {
|
||||
GrokProcessor.Factory factory = new GrokProcessor.Factory(Collections.emptyMap());
|
||||
Map<String, Object> config = new HashMap<>();
|
||||
config.put("field", "_field");
|
||||
config.put("pattern", "%{MY_PATTERN:name}!");
|
||||
config.put("pattern_definitions", Collections.singletonMap("MY_PATTERN", "["));
|
||||
try {
|
||||
factory.create(config);
|
||||
fail("should fail");
|
||||
} catch (ElasticsearchParseException e) {
|
||||
assertThat(e.getMessage(), equalTo("[pattern] Invalid regex pattern. premature end of char-class"));
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,20 +24,26 @@ import org.elasticsearch.plugins.Plugin;
|
|||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptService.ScriptType;
|
||||
import org.elasticsearch.script.groovy.GroovyPlugin;
|
||||
import org.elasticsearch.search.aggregations.bucket.filter.Filter;
|
||||
import org.elasticsearch.search.aggregations.bucket.global.Global;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
|
||||
import org.elasticsearch.search.aggregations.bucket.missing.Missing;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order;
|
||||
import org.elasticsearch.search.aggregations.metrics.AbstractNumericTestCase;
|
||||
import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats;
|
||||
import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats.Bounds;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.extendedStats;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.filter;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.global;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.missing;
|
||||
|
@ -538,6 +544,45 @@ public class ExtendedStatsTests extends AbstractNumericTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void testOrderByEmptyAggregation() throws Exception {
|
||||
SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery())
|
||||
.addAggregation(terms("terms").field("value").order(Order.compound(Order.aggregation("filter>extendedStats.avg", true)))
|
||||
.subAggregation(
|
||||
filter("filter", termQuery("value", 100)).subAggregation(extendedStats("extendedStats").field("value"))))
|
||||
.get();
|
||||
|
||||
assertHitCount(searchResponse, 10);
|
||||
|
||||
Terms terms = searchResponse.getAggregations().get("terms");
|
||||
assertThat(terms, notNullValue());
|
||||
List<Terms.Bucket> buckets = terms.getBuckets();
|
||||
assertThat(buckets, notNullValue());
|
||||
assertThat(buckets.size(), equalTo(10));
|
||||
|
||||
for (int i = 0; i < 10; i++) {
|
||||
Terms.Bucket bucket = buckets.get(i);
|
||||
assertThat(bucket, notNullValue());
|
||||
assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1));
|
||||
assertThat(bucket.getDocCount(), equalTo(1L));
|
||||
Filter filter = bucket.getAggregations().get("filter");
|
||||
assertThat(filter, notNullValue());
|
||||
assertThat(filter.getDocCount(), equalTo(0L));
|
||||
ExtendedStats extendedStats = filter.getAggregations().get("extendedStats");
|
||||
assertThat(extendedStats, notNullValue());
|
||||
assertThat(extendedStats.getMin(), equalTo(Double.POSITIVE_INFINITY));
|
||||
assertThat(extendedStats.getMax(), equalTo(Double.NEGATIVE_INFINITY));
|
||||
assertThat(extendedStats.getAvg(), equalTo(Double.NaN));
|
||||
assertThat(extendedStats.getSum(), equalTo(0.0));
|
||||
assertThat(extendedStats.getCount(), equalTo(0L));
|
||||
assertThat(extendedStats.getStdDeviation(), equalTo(Double.NaN));
|
||||
assertThat(extendedStats.getSumOfSquares(), equalTo(0.0));
|
||||
assertThat(extendedStats.getVariance(), equalTo(Double.NaN));
|
||||
assertThat(extendedStats.getStdDeviationBound(Bounds.LOWER), equalTo(Double.NaN));
|
||||
assertThat(extendedStats.getStdDeviationBound(Bounds.UPPER), equalTo(Double.NaN));
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
private void assertShardExecutionState(SearchResponse response, int expectedFailures) throws Exception {
|
||||
ShardSearchFailure[] failures = response.getShardFailures();
|
||||
|
|
|
@ -24,9 +24,11 @@ import org.elasticsearch.plugins.Plugin;
|
|||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptService.ScriptType;
|
||||
import org.elasticsearch.script.groovy.GroovyPlugin;
|
||||
import org.elasticsearch.search.aggregations.bucket.filter.Filter;
|
||||
import org.elasticsearch.search.aggregations.bucket.global.Global;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Order;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
|
||||
import org.elasticsearch.search.aggregations.metrics.AbstractNumericTestCase;
|
||||
import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile;
|
||||
import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanks;
|
||||
|
@ -41,9 +43,12 @@ import java.util.Map;
|
|||
|
||||
import static org.elasticsearch.common.util.CollectionUtils.iterableAsArrayList;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.filter;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.global;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.percentileRanks;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
|
@ -463,4 +468,35 @@ public class HDRPercentileRanksTests extends AbstractNumericTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void testOrderByEmptyAggregation() throws Exception {
|
||||
SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery())
|
||||
.addAggregation(terms("terms").field("value").order(Terms.Order.compound(Terms.Order.aggregation("filter>ranks.99", true)))
|
||||
.subAggregation(filter("filter", termQuery("value", 100))
|
||||
.subAggregation(percentileRanks("ranks").method(PercentilesMethod.HDR).values(99).field("value"))))
|
||||
.get();
|
||||
|
||||
assertHitCount(searchResponse, 10);
|
||||
|
||||
Terms terms = searchResponse.getAggregations().get("terms");
|
||||
assertThat(terms, notNullValue());
|
||||
List<Terms.Bucket> buckets = terms.getBuckets();
|
||||
assertThat(buckets, notNullValue());
|
||||
assertThat(buckets.size(), equalTo(10));
|
||||
|
||||
for (int i = 0; i < 10; i++) {
|
||||
Terms.Bucket bucket = buckets.get(i);
|
||||
assertThat(bucket, notNullValue());
|
||||
assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1));
|
||||
assertThat(bucket.getDocCount(), equalTo(1L));
|
||||
Filter filter = bucket.getAggregations().get("filter");
|
||||
assertThat(filter, notNullValue());
|
||||
assertThat(filter.getDocCount(), equalTo(0L));
|
||||
PercentileRanks ranks = filter.getAggregations().get("ranks");
|
||||
assertThat(ranks, notNullValue());
|
||||
assertThat(ranks.percent(99), equalTo(Double.NaN));
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -25,9 +25,11 @@ import org.elasticsearch.plugins.Plugin;
|
|||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptService.ScriptType;
|
||||
import org.elasticsearch.script.groovy.GroovyPlugin;
|
||||
import org.elasticsearch.search.aggregations.bucket.filter.Filter;
|
||||
import org.elasticsearch.search.aggregations.bucket.global.Global;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Order;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
|
||||
import org.elasticsearch.search.aggregations.metrics.AbstractNumericTestCase;
|
||||
import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile;
|
||||
import org.elasticsearch.search.aggregations.metrics.percentiles.Percentiles;
|
||||
|
@ -41,9 +43,12 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.filter;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.global;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.percentiles;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||
import static org.hamcrest.Matchers.closeTo;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
@ -443,4 +448,37 @@ public class HDRPercentilesTests extends AbstractNumericTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void testOrderByEmptyAggregation() throws Exception {
|
||||
SearchResponse searchResponse = client().prepareSearch("idx")
|
||||
.setQuery(matchAllQuery())
|
||||
.addAggregation(
|
||||
terms("terms").field("value").order(Terms.Order.compound(Terms.Order.aggregation("filter>percentiles.99", true)))
|
||||
.subAggregation(filter("filter", termQuery("value", 100))
|
||||
.subAggregation(percentiles("percentiles").method(PercentilesMethod.HDR).field("value"))))
|
||||
.get();
|
||||
|
||||
assertHitCount(searchResponse, 10);
|
||||
|
||||
Terms terms = searchResponse.getAggregations().get("terms");
|
||||
assertThat(terms, notNullValue());
|
||||
List<Terms.Bucket> buckets = terms.getBuckets();
|
||||
assertThat(buckets, notNullValue());
|
||||
assertThat(buckets.size(), equalTo(10));
|
||||
|
||||
for (int i = 0; i < 10; i++) {
|
||||
Terms.Bucket bucket = buckets.get(i);
|
||||
assertThat(bucket, notNullValue());
|
||||
assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1));
|
||||
assertThat(bucket.getDocCount(), equalTo(1L));
|
||||
Filter filter = bucket.getAggregations().get("filter");
|
||||
assertThat(filter, notNullValue());
|
||||
assertThat(filter.getDocCount(), equalTo(0L));
|
||||
Percentiles percentiles = filter.getAggregations().get("percentiles");
|
||||
assertThat(percentiles, notNullValue());
|
||||
assertThat(percentiles.percentile(99), equalTo(Double.NaN));
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -23,20 +23,26 @@ import org.elasticsearch.plugins.Plugin;
|
|||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptService.ScriptType;
|
||||
import org.elasticsearch.script.groovy.GroovyPlugin;
|
||||
import org.elasticsearch.search.aggregations.bucket.filter.Filter;
|
||||
import org.elasticsearch.search.aggregations.bucket.global.Global;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order;
|
||||
import org.elasticsearch.search.aggregations.metrics.AbstractNumericTestCase;
|
||||
import org.elasticsearch.search.aggregations.metrics.max.Max;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.filter;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.global;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.max;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
|
@ -293,4 +299,34 @@ public class MaxTests extends AbstractNumericTestCase {
|
|||
assertThat(max.getName(), equalTo("max"));
|
||||
assertThat(max.getValue(), equalTo(11.0));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void testOrderByEmptyAggregation() throws Exception {
|
||||
SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery())
|
||||
.addAggregation(terms("terms").field("value").order(Order.compound(Order.aggregation("filter>max", true)))
|
||||
.subAggregation(filter("filter", termQuery("value", 100)).subAggregation(max("max").field("value"))))
|
||||
.get();
|
||||
|
||||
assertHitCount(searchResponse, 10);
|
||||
|
||||
Terms terms = searchResponse.getAggregations().get("terms");
|
||||
assertThat(terms, notNullValue());
|
||||
List<Terms.Bucket> buckets = terms.getBuckets();
|
||||
assertThat(buckets, notNullValue());
|
||||
assertThat(buckets.size(), equalTo(10));
|
||||
|
||||
for (int i = 0; i < 10; i++) {
|
||||
Terms.Bucket bucket = buckets.get(i);
|
||||
assertThat(bucket, notNullValue());
|
||||
assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1));
|
||||
assertThat(bucket.getDocCount(), equalTo(1L));
|
||||
Filter filter = bucket.getAggregations().get("filter");
|
||||
assertThat(filter, notNullValue());
|
||||
assertThat(filter.getDocCount(), equalTo(0L));
|
||||
Max max = filter.getAggregations().get("max");
|
||||
assertThat(max, notNullValue());
|
||||
assertThat(max.value(), equalTo(Double.NEGATIVE_INFINITY));
|
||||
|
||||
}
|
||||
}
|
||||
}
|
|
@ -23,20 +23,27 @@ import org.elasticsearch.plugins.Plugin;
|
|||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptService.ScriptType;
|
||||
import org.elasticsearch.script.groovy.GroovyPlugin;
|
||||
import org.elasticsearch.search.aggregations.bucket.filter.Filter;
|
||||
import org.elasticsearch.search.aggregations.bucket.global.Global;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order;
|
||||
import org.elasticsearch.search.aggregations.metrics.AbstractNumericTestCase;
|
||||
import org.elasticsearch.search.aggregations.metrics.min.Min;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.filter;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.global;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.min;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
|
@ -305,4 +312,34 @@ public class MinTests extends AbstractNumericTestCase {
|
|||
assertThat(min.getName(), equalTo("min"));
|
||||
assertThat(min.getValue(), equalTo(1.0));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void testOrderByEmptyAggregation() throws Exception {
|
||||
SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery())
|
||||
.addAggregation(terms("terms").field("value").order(Order.compound(Order.aggregation("filter>min", true)))
|
||||
.subAggregation(filter("filter", termQuery("value", 100)).subAggregation(min("min").field("value"))))
|
||||
.get();
|
||||
|
||||
assertHitCount(searchResponse, 10);
|
||||
|
||||
Terms terms = searchResponse.getAggregations().get("terms");
|
||||
assertThat(terms, notNullValue());
|
||||
List<Terms.Bucket> buckets = terms.getBuckets();
|
||||
assertThat(buckets, notNullValue());
|
||||
assertThat(buckets.size(), equalTo(10));
|
||||
|
||||
for (int i = 0; i < 10; i++) {
|
||||
Terms.Bucket bucket = buckets.get(i);
|
||||
assertThat(bucket, notNullValue());
|
||||
assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1));
|
||||
assertThat(bucket.getDocCount(), equalTo(1L));
|
||||
Filter filter = bucket.getAggregations().get("filter");
|
||||
assertThat(filter, notNullValue());
|
||||
assertThat(filter.getDocCount(), equalTo(0L));
|
||||
Min min = filter.getAggregations().get("min");
|
||||
assertThat(min, notNullValue());
|
||||
assertThat(min.value(), equalTo(Double.POSITIVE_INFINITY));
|
||||
|
||||
}
|
||||
}
|
||||
}
|
|
@ -24,20 +24,27 @@ import org.elasticsearch.plugins.Plugin;
|
|||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptService.ScriptType;
|
||||
import org.elasticsearch.script.groovy.GroovyPlugin;
|
||||
import org.elasticsearch.search.aggregations.bucket.filter.Filter;
|
||||
import org.elasticsearch.search.aggregations.bucket.global.Global;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order;
|
||||
import org.elasticsearch.search.aggregations.metrics.AbstractNumericTestCase;
|
||||
import org.elasticsearch.search.aggregations.metrics.stats.Stats;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.filter;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.global;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.stats;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
@ -399,6 +406,39 @@ public class StatsTests extends AbstractNumericTestCase {
|
|||
assertThat(stats.getCount(), equalTo(20L));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void testOrderByEmptyAggregation() throws Exception {
|
||||
SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery())
|
||||
.addAggregation(terms("terms").field("value").order(Order.compound(Order.aggregation("filter>stats.avg", true)))
|
||||
.subAggregation(filter("filter", termQuery("value", 100)).subAggregation(stats("stats").field("value"))))
|
||||
.get();
|
||||
|
||||
assertHitCount(searchResponse, 10);
|
||||
|
||||
Terms terms = searchResponse.getAggregations().get("terms");
|
||||
assertThat(terms, notNullValue());
|
||||
List<Terms.Bucket> buckets = terms.getBuckets();
|
||||
assertThat(buckets, notNullValue());
|
||||
assertThat(buckets.size(), equalTo(10));
|
||||
|
||||
for (int i = 0; i < 10; i++) {
|
||||
Terms.Bucket bucket = buckets.get(i);
|
||||
assertThat(bucket, notNullValue());
|
||||
assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1));
|
||||
assertThat(bucket.getDocCount(), equalTo(1L));
|
||||
Filter filter = bucket.getAggregations().get("filter");
|
||||
assertThat(filter, notNullValue());
|
||||
assertThat(filter.getDocCount(), equalTo(0L));
|
||||
Stats stats = filter.getAggregations().get("stats");
|
||||
assertThat(stats, notNullValue());
|
||||
assertThat(stats.getMin(), equalTo(Double.POSITIVE_INFINITY));
|
||||
assertThat(stats.getMax(), equalTo(Double.NEGATIVE_INFINITY));
|
||||
assertThat(stats.getAvg(), equalTo(Double.NaN));
|
||||
assertThat(stats.getSum(), equalTo(0.0));
|
||||
assertThat(stats.getCount(), equalTo(0L));
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
private void assertShardExecutionState(SearchResponse response, int expectedFailures) throws Exception {
|
||||
ShardSearchFailure[] failures = response.getShardFailures();
|
||||
|
|
|
@ -25,13 +25,16 @@ import org.elasticsearch.plugins.Plugin;
|
|||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptService.ScriptType;
|
||||
import org.elasticsearch.script.groovy.GroovyPlugin;
|
||||
import org.elasticsearch.search.aggregations.bucket.filter.Filter;
|
||||
import org.elasticsearch.search.aggregations.bucket.global.Global;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Order;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
|
||||
import org.elasticsearch.search.aggregations.metrics.AbstractNumericTestCase;
|
||||
import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile;
|
||||
import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanks;
|
||||
import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanksAggregatorBuilder;
|
||||
import org.elasticsearch.search.aggregations.metrics.percentiles.PercentilesMethod;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
|
@ -41,9 +44,12 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.filter;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.global;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.percentileRanks;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
|
@ -425,4 +431,35 @@ public class TDigestPercentileRanksTests extends AbstractNumericTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void testOrderByEmptyAggregation() throws Exception {
|
||||
SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery())
|
||||
.addAggregation(terms("terms").field("value").order(Terms.Order.compound(Terms.Order.aggregation("filter>ranks.99", true)))
|
||||
.subAggregation(filter("filter", termQuery("value", 100))
|
||||
.subAggregation(percentileRanks("ranks").method(PercentilesMethod.TDIGEST).values(99).field("value"))))
|
||||
.get();
|
||||
|
||||
assertHitCount(searchResponse, 10);
|
||||
|
||||
Terms terms = searchResponse.getAggregations().get("terms");
|
||||
assertThat(terms, notNullValue());
|
||||
List<Terms.Bucket> buckets = terms.getBuckets();
|
||||
assertThat(buckets, notNullValue());
|
||||
assertThat(buckets.size(), equalTo(10));
|
||||
|
||||
for (int i = 0; i < 10; i++) {
|
||||
Terms.Bucket bucket = buckets.get(i);
|
||||
assertThat(bucket, notNullValue());
|
||||
assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1));
|
||||
assertThat(bucket.getDocCount(), equalTo(1L));
|
||||
Filter filter = bucket.getAggregations().get("filter");
|
||||
assertThat(filter, notNullValue());
|
||||
assertThat(filter.getDocCount(), equalTo(0L));
|
||||
PercentileRanks ranks = filter.getAggregations().get("ranks");
|
||||
assertThat(ranks, notNullValue());
|
||||
assertThat(ranks.percent(99), equalTo(Double.NaN));
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -25,13 +25,17 @@ import org.elasticsearch.plugins.Plugin;
|
|||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptService.ScriptType;
|
||||
import org.elasticsearch.script.groovy.GroovyPlugin;
|
||||
import org.elasticsearch.search.aggregations.bucket.filter.Filter;
|
||||
import org.elasticsearch.search.aggregations.bucket.global.Global;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Order;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
|
||||
import org.elasticsearch.search.aggregations.metrics.AbstractNumericTestCase;
|
||||
import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile;
|
||||
import org.elasticsearch.search.aggregations.metrics.percentiles.Percentiles;
|
||||
import org.elasticsearch.search.aggregations.metrics.percentiles.PercentilesAggregatorBuilder;
|
||||
import org.elasticsearch.search.aggregations.metrics.percentiles.PercentilesMethod;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
|
@ -40,9 +44,12 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.filter;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.global;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.percentiles;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
|
@ -407,4 +414,36 @@ public class TDigestPercentilesTests extends AbstractNumericTestCase {
|
|||
previous = p99;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void testOrderByEmptyAggregation() throws Exception {
|
||||
SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery())
|
||||
.addAggregation(
|
||||
terms("terms").field("value").order(Terms.Order.compound(Terms.Order.aggregation("filter>percentiles.99", true)))
|
||||
.subAggregation(filter("filter", termQuery("value", 100))
|
||||
.subAggregation(percentiles("percentiles").method(PercentilesMethod.TDIGEST).field("value"))))
|
||||
.get();
|
||||
|
||||
assertHitCount(searchResponse, 10);
|
||||
|
||||
Terms terms = searchResponse.getAggregations().get("terms");
|
||||
assertThat(terms, notNullValue());
|
||||
List<Terms.Bucket> buckets = terms.getBuckets();
|
||||
assertThat(buckets, notNullValue());
|
||||
assertThat(buckets.size(), equalTo(10));
|
||||
|
||||
for (int i = 0; i < 10; i++) {
|
||||
Terms.Bucket bucket = buckets.get(i);
|
||||
assertThat(bucket, notNullValue());
|
||||
assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1));
|
||||
assertThat(bucket.getDocCount(), equalTo(1L));
|
||||
Filter filter = bucket.getAggregations().get("filter");
|
||||
assertThat(filter, notNullValue());
|
||||
assertThat(filter.getDocCount(), equalTo(0L));
|
||||
Percentiles percentiles = filter.getAggregations().get("percentiles");
|
||||
assertThat(percentiles, notNullValue());
|
||||
assertThat(percentiles.percentile(99), equalTo(Double.NaN));
|
||||
|
||||
}
|
||||
}
|
||||
}
|
|
@ -35,7 +35,7 @@
|
|||
type: test
|
||||
id: 1
|
||||
body: { "company": "cat" }
|
||||
routing:
|
||||
routing: null
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
|
|
|
@ -20,7 +20,8 @@
|
|||
- match: {noops: 0}
|
||||
- match: {throttled_millis: 0}
|
||||
- gte: { took: 0 }
|
||||
- is_false: created # Update by query can't create
|
||||
# Update by query can't create
|
||||
- is_false: created
|
||||
- is_false: task
|
||||
|
||||
---
|
||||
|
@ -70,7 +71,8 @@
|
|||
body: { "text": "test" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
- do: # Creates a new version for reindex to miss on scan.
|
||||
# Creates a new version for reindex to miss on scan.
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: foo
|
||||
|
@ -111,7 +113,8 @@
|
|||
body: { "text": "test" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
- do: # Creates a new version for reindex to miss on scan.
|
||||
# Creates a new version for reindex to miss on scan.
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: foo
|
||||
|
|
|
@ -12,20 +12,6 @@
|
|||
index: test
|
||||
conflicts: cat
|
||||
|
||||
---
|
||||
"invalid scroll_size fails":
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: test
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
catch: /Failed to parse int parameter \[scroll_size\] with value \[cat\]/
|
||||
update_by_query:
|
||||
index: test
|
||||
scroll_size: cat
|
||||
|
||||
---
|
||||
"invalid size fails":
|
||||
- do:
|
||||
|
|
|
@ -97,4 +97,6 @@ public abstract class AbstractNumericTestCase extends ESIntegTestCase {
|
|||
public abstract void testScriptMultiValued() throws Exception;
|
||||
|
||||
public abstract void testScriptMultiValuedWithParams() throws Exception;
|
||||
|
||||
public abstract void testOrderByEmptyAggregation() throws Exception;
|
||||
}
|
Loading…
Reference in New Issue