Merge branch 'master' into docs/add_autosense_to_query_dsl

This commit is contained in:
Isabel Drost-Fromm 2016-05-17 21:12:06 +02:00
commit 4c627a00e5
136 changed files with 4105 additions and 3093 deletions

View File

@ -517,7 +517,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]processor[/\\]ConvertProcessor.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]jvm[/\\]GcNames.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]jvm[/\\]HotThreads.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]jvm[/\\]JvmGcMonitorService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]jvm[/\\]JvmStats.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]node[/\\]Node.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]node[/\\]internal[/\\]InternalSettingsPreparer.java" checks="LineLength" />

View File

@ -249,7 +249,7 @@ public class TransportClusterAllocationExplainAction
protected void masterOperation(final ClusterAllocationExplainRequest request, final ClusterState state,
final ActionListener<ClusterAllocationExplainResponse> listener) {
final RoutingNodes routingNodes = state.getRoutingNodes();
final RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, state.nodes(),
final RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, state,
clusterInfoService.getClusterInfo(), System.nanoTime());
ShardRouting foundShard = null;

View File

@ -54,10 +54,6 @@ import java.util.function.Predicate;
*/
public class RoutingNodes implements Iterable<RoutingNode> {
private final MetaData metaData;
private final ClusterBlocks blocks;
private final RoutingTable routingTable;
private final Map<String, RoutingNode> nodesToShards = new HashMap<>();
@ -66,8 +62,6 @@ public class RoutingNodes implements Iterable<RoutingNode> {
private final Map<ShardId, List<ShardRouting>> assignedShards = new HashMap<>();
private final ImmutableOpenMap<String, ClusterState.Custom> customs;
private final boolean readOnly;
private int inactivePrimaryCount = 0;
@ -85,10 +79,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
public RoutingNodes(ClusterState clusterState, boolean readOnly) {
this.readOnly = readOnly;
this.metaData = clusterState.metaData();
this.blocks = clusterState.blocks();
this.routingTable = clusterState.routingTable();
this.customs = clusterState.customs();
Map<String, LinkedHashMap<ShardId, ShardRouting>> nodesToShards = new HashMap<>();
// fill in the nodeToShards with the "live" nodes
@ -232,28 +223,6 @@ public class RoutingNodes implements Iterable<RoutingNode> {
return routingTable();
}
public MetaData metaData() {
return this.metaData;
}
public MetaData getMetaData() {
return metaData();
}
public ClusterBlocks blocks() {
return this.blocks;
}
public ClusterBlocks getBlocks() {
return this.blocks;
}
public ImmutableOpenMap<String, ClusterState.Custom> customs() {
return this.customs;
}
public <T extends ClusterState.Custom> T custom(String type) { return (T) customs.get(type); }
public UnassignedShards unassigned() {
return this.unassignedShards;
}

View File

@ -90,7 +90,7 @@ public class AllocationService extends AbstractComponent {
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
routingNodes.unassigned().shuffle();
StartedRerouteAllocation allocation = new StartedRerouteAllocation(allocationDeciders, routingNodes, clusterState.nodes(), startedShards, clusterInfoService.getClusterInfo());
StartedRerouteAllocation allocation = new StartedRerouteAllocation(allocationDeciders, routingNodes, clusterState, startedShards, clusterInfoService.getClusterInfo());
boolean changed = applyStartedShards(routingNodes, startedShards);
if (!changed) {
return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
@ -216,7 +216,7 @@ public class AllocationService extends AbstractComponent {
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
routingNodes.unassigned().shuffle();
FailedRerouteAllocation allocation = new FailedRerouteAllocation(allocationDeciders, routingNodes, clusterState.nodes(), failedShards, clusterInfoService.getClusterInfo());
FailedRerouteAllocation allocation = new FailedRerouteAllocation(allocationDeciders, routingNodes, clusterState, failedShards, clusterInfoService.getClusterInfo());
boolean changed = false;
// as failing primaries also fail associated replicas, we fail replicas first here so that their nodes are added to ignore list
List<FailedRerouteAllocation.FailedShard> orderedFailedShards = new ArrayList<>(failedShards);
@ -266,7 +266,7 @@ public class AllocationService extends AbstractComponent {
// we don't shuffle the unassigned shards here, to try and get as close as possible to
// a consistent result of the effect the commands have on the routing
// this allows systems to dry run the commands, see the resulting cluster state, and act on it
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState.nodes(), clusterInfoService.getClusterInfo(), currentNanoTime());
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState, clusterInfoService.getClusterInfo(), currentNanoTime());
// don't short circuit deciders, we want a full explanation
allocation.debugDecision(true);
// we ignore disable allocation, because commands are explicit
@ -305,7 +305,7 @@ public class AllocationService extends AbstractComponent {
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
routingNodes.unassigned().shuffle();
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState.nodes(), clusterInfoService.getClusterInfo(), currentNanoTime());
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState, clusterInfoService.getClusterInfo(), currentNanoTime());
allocation.debugDecision(debug);
if (!reroute(allocation)) {
return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());

View File

@ -21,7 +21,7 @@ package org.elasticsearch.cluster.routing.allocation;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.cluster.ClusterInfo;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
@ -57,8 +57,8 @@ public class FailedRerouteAllocation extends RoutingAllocation {
private final List<FailedShard> failedShards;
public FailedRerouteAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, DiscoveryNodes nodes, List<FailedShard> failedShards, ClusterInfo clusterInfo) {
super(deciders, routingNodes, nodes, clusterInfo, System.nanoTime());
public FailedRerouteAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState, List<FailedShard> failedShards, ClusterInfo clusterInfo) {
super(deciders, routingNodes, clusterState, clusterInfo, System.nanoTime());
this.failedShards = failedShards;
}

View File

@ -20,12 +20,14 @@
package org.elasticsearch.cluster.routing.allocation;
import org.elasticsearch.cluster.ClusterInfo;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.index.shard.ShardId;
import java.util.HashMap;
@ -118,8 +120,12 @@ public class RoutingAllocation {
private final RoutingNodes routingNodes;
private final MetaData metaData;
private final DiscoveryNodes nodes;
private final ImmutableOpenMap<String, ClusterState.Custom> customs;
private final AllocationExplanation explanation = new AllocationExplanation();
private final ClusterInfo clusterInfo;
@ -139,13 +145,15 @@ public class RoutingAllocation {
* Creates a new {@link RoutingAllocation}
* @param deciders {@link AllocationDeciders} to used to make decisions for routing allocations
* @param routingNodes Routing nodes in the current cluster
* @param nodes TODO: Documentation
* @param clusterState cluster state before rerouting
* @param currentNanoTime the nano time to use for all delay allocation calculation (typically {@link System#nanoTime()})
*/
public RoutingAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, DiscoveryNodes nodes, ClusterInfo clusterInfo, long currentNanoTime) {
public RoutingAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState, ClusterInfo clusterInfo, long currentNanoTime) {
this.deciders = deciders;
this.routingNodes = routingNodes;
this.nodes = nodes;
this.metaData = clusterState.metaData();
this.nodes = clusterState.nodes();
this.customs = clusterState.customs();
this.clusterInfo = clusterInfo;
this.currentNanoTime = currentNanoTime;
}
@ -184,7 +192,7 @@ public class RoutingAllocation {
* @return Metadata of routing nodes
*/
public MetaData metaData() {
return routingNodes.metaData();
return metaData;
}
/**
@ -199,6 +207,10 @@ public class RoutingAllocation {
return clusterInfo;
}
public <T extends ClusterState.Custom> T custom(String key) {
return (T)customs.get(key);
}
/**
* Get explanations of current routing
* @return explanation of routing

View File

@ -20,7 +20,7 @@
package org.elasticsearch.cluster.routing.allocation;
import org.elasticsearch.cluster.ClusterInfo;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
@ -35,8 +35,8 @@ public class StartedRerouteAllocation extends RoutingAllocation {
private final List<? extends ShardRouting> startedShards;
public StartedRerouteAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, DiscoveryNodes nodes, List<? extends ShardRouting> startedShards, ClusterInfo clusterInfo) {
super(deciders, routingNodes, nodes, clusterInfo, System.nanoTime());
public StartedRerouteAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState, List<? extends ShardRouting> startedShards, ClusterInfo clusterInfo) {
super(deciders, routingNodes, clusterState, clusterInfo, System.nanoTime());
this.startedShards = startedShards;
}
@ -47,4 +47,4 @@ public class StartedRerouteAllocation extends RoutingAllocation {
public List<? extends ShardRouting> startedShards() {
return startedShards;
}
}
}

View File

@ -225,7 +225,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
this.weight = weight;
this.threshold = threshold;
this.routingNodes = allocation.routingNodes();
metaData = routingNodes.metaData();
this.metaData = allocation.metaData();
avgShardsPerNode = ((float) metaData.getTotalNumberOfShards()) / routingNodes.size();
buildModelFromAssigned();
}

View File

@ -137,7 +137,7 @@ public class EnableAllocationDecider extends AllocationDecider {
return allocation.decision(Decision.YES, NAME, "allocation is explicitly ignoring any disabling of relocation");
}
Settings indexSettings = allocation.routingNodes().metaData().getIndexSafe(shardRouting.index()).getSettings();
Settings indexSettings = allocation.metaData().getIndexSafe(shardRouting.index()).getSettings();
final Rebalance enable;
if (INDEX_ROUTING_REBALANCE_ENABLE_SETTING.exists(indexSettings)) {
enable = INDEX_ROUTING_REBALANCE_ENABLE_SETTING.get(indexSettings);

View File

@ -105,7 +105,7 @@ public class FilterAllocationDecider extends AllocationDecider {
Decision decision = shouldClusterFilter(node, allocation);
if (decision != null) return decision;
decision = shouldIndexFilter(allocation.routingNodes().metaData().getIndexSafe(shardRouting.index()), node, allocation);
decision = shouldIndexFilter(allocation.metaData().getIndexSafe(shardRouting.index()), node, allocation);
if (decision != null) return decision;
return allocation.decision(Decision.YES, NAME, "node passes include/exclude/require filters");

View File

@ -86,7 +86,7 @@ public class ShardsLimitAllocationDecider extends AllocationDecider {
@Override
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
IndexMetaData indexMd = allocation.routingNodes().metaData().getIndexSafe(shardRouting.index());
IndexMetaData indexMd = allocation.metaData().getIndexSafe(shardRouting.index());
final int indexShardLimit = INDEX_TOTAL_SHARDS_PER_NODE_SETTING.get(indexMd.getSettings(), settings);
// Capture the limit here in case it changes during this method's
// execution
@ -125,7 +125,7 @@ public class ShardsLimitAllocationDecider extends AllocationDecider {
@Override
public Decision canRemain(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
IndexMetaData indexMd = allocation.routingNodes().metaData().getIndexSafe(shardRouting.index());
IndexMetaData indexMd = allocation.metaData().getIndexSafe(shardRouting.index());
final int indexShardLimit = INDEX_TOTAL_SHARDS_PER_NODE_SETTING.get(indexMd.getSettings(), settings);
// Capture the limit here in case it changes during this method's
// execution

View File

@ -98,7 +98,7 @@ public class SnapshotInProgressAllocationDecider extends AllocationDecider {
if (!enableRelocation && shardRouting.primary()) {
// Only primary shards are snapshotted
SnapshotsInProgress snapshotsInProgress = allocation.routingNodes().custom(SnapshotsInProgress.TYPE);
SnapshotsInProgress snapshotsInProgress = allocation.custom(SnapshotsInProgress.TYPE);
if (snapshotsInProgress == null) {
// Snapshots are not running
return allocation.decision(Decision.YES, NAME, "no snapshots are currently running");

View File

@ -84,7 +84,7 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
public boolean allocateUnassigned(RoutingAllocation allocation) {
boolean changed = false;
final RoutingNodes routingNodes = allocation.routingNodes();
final MetaData metaData = routingNodes.metaData();
final MetaData metaData = allocation.metaData();
final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator();
while (unassignedIterator.hasNext()) {

View File

@ -0,0 +1,137 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.ingest.processor;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.ingest.core.AbstractProcessor;
import org.elasticsearch.ingest.core.AbstractProcessorFactory;
import org.elasticsearch.ingest.core.IngestDocument;
import org.elasticsearch.ingest.core.ConfigurationUtils;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
/**
* Processor that sorts an array of items.
* Throws exception is the specified field is not an array.
*/
public final class SortProcessor extends AbstractProcessor {
public static final String TYPE = "sort";
public static final String FIELD = "field";
public static final String ORDER = "order";
public static final String DEFAULT_ORDER = "asc";
public enum SortOrder {
ASCENDING("asc"), DESCENDING("desc");
private final String direction;
SortOrder(String direction) {
this.direction = direction;
}
public String toString() {
return this.direction;
}
public static SortOrder fromString(String value) {
if (value == null) {
throw new IllegalArgumentException("Sort direction cannot be null");
}
if (value.equals(ASCENDING.toString())) {
return ASCENDING;
} else if (value.equals(DESCENDING.toString())) {
return DESCENDING;
}
throw new IllegalArgumentException("Sort direction [" + value + "] not recognized."
+ " Valid values are: [asc, desc]");
}
}
private final String field;
private final SortOrder order;
SortProcessor(String tag, String field, SortOrder order) {
super(tag);
this.field = field;
this.order = order;
}
String getField() {
return field;
}
SortOrder getOrder() {
return order;
}
@Override
@SuppressWarnings("unchecked")
public void execute(IngestDocument document) {
List<? extends Comparable> list = document.getFieldValue(field, List.class);
if (list == null) {
throw new IllegalArgumentException("field [" + field + "] is null, cannot sort.");
}
if (list.size() <= 1) {
return;
}
if (order.equals(SortOrder.ASCENDING)) {
Collections.sort(list);
} else {
Collections.sort(list, Collections.reverseOrder());
}
document.setFieldValue(field, list);
}
@Override
public String getType() {
return TYPE;
}
public final static class Factory extends AbstractProcessorFactory<SortProcessor> {
@Override
public SortProcessor doCreate(String processorTag, Map<String, Object> config) throws Exception {
String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, FIELD);
try {
SortOrder direction = SortOrder.fromString(
ConfigurationUtils.readStringProperty(
TYPE,
processorTag,
config,
ORDER,
DEFAULT_ORDER));
return new SortProcessor(processorTag, field, direction);
} catch (IllegalArgumentException e) {
throw ConfigurationUtils.newConfigurationException(TYPE, processorTag, ORDER, e.getMessage());
}
}
}
}

View File

@ -20,9 +20,11 @@
package org.elasticsearch.monitor.jvm;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.FutureUtils;
import org.elasticsearch.monitor.jvm.JvmStats.GarbageCollector;
@ -30,14 +32,13 @@ import org.elasticsearch.threadpool.ThreadPool;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.function.BiFunction;
import static java.util.Collections.unmodifiableMap;
import static org.elasticsearch.monitor.jvm.JvmStats.jvmStats;
/**
*
*/
public class JvmGcMonitorService extends AbstractLifecycleComponent<JvmGcMonitorService> {
private final ThreadPool threadPool;
@ -119,12 +120,123 @@ public class JvmGcMonitorService extends AbstractLifecycleComponent<JvmGcMonitor
return GC_COLLECTOR_PREFIX + key + "." + level;
}
private static final String LOG_MESSAGE =
"[gc][{}][{}][{}] duration [{}], collections [{}]/[{}], total [{}]/[{}], memory [{}]->[{}]/[{}], all_pools {}";
@Override
protected void doStart() {
if (!enabled) {
return;
}
scheduledFuture = threadPool.scheduleWithFixedDelay(new JvmMonitor(), interval);
scheduledFuture = threadPool.scheduleWithFixedDelay(new JvmMonitor(gcThresholds) {
@Override
void onMonitorFailure(Throwable t) {
logger.debug("failed to monitor", t);
}
@Override
void onSlowGc(final Threshold threshold, final long seq, final SlowGcEvent slowGcEvent) {
logSlowGc(logger, threshold, seq, slowGcEvent, JvmGcMonitorService::buildPools);
}
}, interval);
}
static void logSlowGc(
final ESLogger logger,
final JvmMonitor.Threshold threshold,
final long seq,
final JvmMonitor.SlowGcEvent slowGcEvent,
BiFunction<JvmStats, JvmStats, String> pools) {
final String name = slowGcEvent.currentGc.getName();
final long elapsed = slowGcEvent.elapsed;
final long totalGcCollectionCount = slowGcEvent.currentGc.getCollectionCount();
final long currentGcCollectionCount = slowGcEvent.collectionCount;
final TimeValue totalGcCollectionTime = slowGcEvent.currentGc.getCollectionTime();
final TimeValue currentGcCollectionTime = slowGcEvent.collectionTime;
final JvmStats lastJvmStats = slowGcEvent.lastJvmStats;
final JvmStats currentJvmStats = slowGcEvent.currentJvmStats;
final ByteSizeValue maxHeapUsed = slowGcEvent.maxHeapUsed;
switch (threshold) {
case WARN:
if (logger.isWarnEnabled()) {
logger.warn(
LOG_MESSAGE,
name,
seq,
totalGcCollectionCount,
currentGcCollectionTime,
currentGcCollectionCount,
TimeValue.timeValueMillis(elapsed),
currentGcCollectionTime,
totalGcCollectionTime,
lastJvmStats.getMem().getHeapUsed(),
currentJvmStats.getMem().getHeapUsed(),
maxHeapUsed,
pools.apply(lastJvmStats, currentJvmStats));
}
break;
case INFO:
if (logger.isInfoEnabled()) {
logger.info(
LOG_MESSAGE,
name,
seq,
totalGcCollectionCount,
currentGcCollectionTime,
currentGcCollectionCount,
TimeValue.timeValueMillis(elapsed),
currentGcCollectionTime,
totalGcCollectionTime,
lastJvmStats.getMem().getHeapUsed(),
currentJvmStats.getMem().getHeapUsed(),
maxHeapUsed,
pools.apply(lastJvmStats, currentJvmStats));
}
break;
case DEBUG:
if (logger.isDebugEnabled()) {
logger.debug(
LOG_MESSAGE,
name,
seq,
totalGcCollectionCount,
currentGcCollectionTime,
currentGcCollectionCount,
TimeValue.timeValueMillis(elapsed),
currentGcCollectionTime,
totalGcCollectionTime,
lastJvmStats.getMem().getHeapUsed(),
currentJvmStats.getMem().getHeapUsed(),
maxHeapUsed,
pools.apply(lastJvmStats, currentJvmStats));
}
break;
}
}
static String buildPools(JvmStats last, JvmStats current) {
StringBuilder sb = new StringBuilder();
for (JvmStats.MemoryPool currentPool : current.getMem()) {
JvmStats.MemoryPool prevPool = null;
for (JvmStats.MemoryPool pool : last.getMem()) {
if (pool.getName().equals(currentPool.getName())) {
prevPool = pool;
break;
}
}
sb.append("{[")
.append(currentPool.getName())
.append("] [")
.append(prevPool == null ? "?" : prevPool.getUsed())
.append("]->[")
.append(currentPool.getUsed())
.append("]/[")
.append(currentPool.getMax())
.append("]}");
}
return sb.toString();
}
@Override
@ -139,12 +251,46 @@ public class JvmGcMonitorService extends AbstractLifecycleComponent<JvmGcMonitor
protected void doClose() {
}
private class JvmMonitor implements Runnable {
static abstract class JvmMonitor implements Runnable {
enum Threshold { DEBUG, INFO, WARN }
static class SlowGcEvent {
final GarbageCollector currentGc;
final long collectionCount;
final TimeValue collectionTime;
final long elapsed;
final JvmStats lastJvmStats;
final JvmStats currentJvmStats;
final ByteSizeValue maxHeapUsed;
public SlowGcEvent(
final GarbageCollector currentGc,
final long collectionCount,
final TimeValue collectionTime,
final long elapsed,
final JvmStats lastJvmStats,
final JvmStats currentJvmStats,
final ByteSizeValue maxHeapUsed) {
this.currentGc = currentGc;
this.collectionCount = collectionCount;
this.collectionTime = collectionTime;
this.elapsed = elapsed;
this.lastJvmStats = lastJvmStats;
this.currentJvmStats = currentJvmStats;
this.maxHeapUsed = maxHeapUsed;
}
}
private long lastTime = now();
private JvmStats lastJvmStats = jvmStats();
private long seq = 0;
private final Map<String, GcThreshold> gcThresholds;
public JvmMonitor() {
public JvmMonitor(Map<String, GcThreshold> gcThresholds) {
this.gcThresholds = Objects.requireNonNull(gcThresholds);
}
@Override
@ -152,24 +298,28 @@ public class JvmGcMonitorService extends AbstractLifecycleComponent<JvmGcMonitor
try {
monitorLongGc();
} catch (Throwable t) {
logger.debug("failed to monitor", t);
onMonitorFailure(t);
}
}
private synchronized void monitorLongGc() {
abstract void onMonitorFailure(Throwable t);
synchronized void monitorLongGc() {
seq++;
final long currentTime = now();
JvmStats currentJvmStats = jvmStats();
final long elapsed = TimeUnit.NANOSECONDS.toMillis(currentTime - lastTime);
for (int i = 0; i < currentJvmStats.getGc().getCollectors().length; i++) {
GarbageCollector gc = currentJvmStats.getGc().getCollectors()[i];
GarbageCollector prevGc = lastJvmStats.gc.collectors[i];
GarbageCollector prevGc = lastJvmStats.getGc().getCollectors()[i];
// no collection has happened
long collections = gc.collectionCount - prevGc.collectionCount;
long collections = gc.getCollectionCount() - prevGc.getCollectionCount();
if (collections == 0) {
continue;
}
long collectionTime = gc.collectionTime - prevGc.collectionTime;
long collectionTime = gc.getCollectionTime().millis() - prevGc.getCollectionTime().millis();
if (collectionTime == 0) {
continue;
}
@ -181,34 +331,39 @@ public class JvmGcMonitorService extends AbstractLifecycleComponent<JvmGcMonitor
long avgCollectionTime = collectionTime / collections;
Threshold threshold = null;
if (avgCollectionTime > gcThreshold.warnThreshold) {
logger.warn("[gc][{}][{}][{}] duration [{}], collections [{}]/[{}], total [{}]/[{}], memory [{}]->[{}]/[{}], all_pools {}",
gc.getName(), seq, gc.getCollectionCount(), TimeValue.timeValueMillis(collectionTime), collections, TimeValue.timeValueMillis(currentJvmStats.getTimestamp() - lastJvmStats.getTimestamp()), TimeValue.timeValueMillis(collectionTime), gc.getCollectionTime(), lastJvmStats.getMem().getHeapUsed(), currentJvmStats.getMem().getHeapUsed(), JvmInfo.jvmInfo().getMem().getHeapMax(), buildPools(lastJvmStats, currentJvmStats));
threshold = Threshold.WARN;
} else if (avgCollectionTime > gcThreshold.infoThreshold) {
logger.info("[gc][{}][{}][{}] duration [{}], collections [{}]/[{}], total [{}]/[{}], memory [{}]->[{}]/[{}], all_pools {}",
gc.getName(), seq, gc.getCollectionCount(), TimeValue.timeValueMillis(collectionTime), collections, TimeValue.timeValueMillis(currentJvmStats.getTimestamp() - lastJvmStats.getTimestamp()), TimeValue.timeValueMillis(collectionTime), gc.getCollectionTime(), lastJvmStats.getMem().getHeapUsed(), currentJvmStats.getMem().getHeapUsed(), JvmInfo.jvmInfo().getMem().getHeapMax(), buildPools(lastJvmStats, currentJvmStats));
} else if (avgCollectionTime > gcThreshold.debugThreshold && logger.isDebugEnabled()) {
logger.debug("[gc][{}][{}][{}] duration [{}], collections [{}]/[{}], total [{}]/[{}], memory [{}]->[{}]/[{}], all_pools {}",
gc.getName(), seq, gc.getCollectionCount(), TimeValue.timeValueMillis(collectionTime), collections, TimeValue.timeValueMillis(currentJvmStats.getTimestamp() - lastJvmStats.getTimestamp()), TimeValue.timeValueMillis(collectionTime), gc.getCollectionTime(), lastJvmStats.getMem().getHeapUsed(), currentJvmStats.getMem().getHeapUsed(), JvmInfo.jvmInfo().getMem().getHeapMax(), buildPools(lastJvmStats, currentJvmStats));
threshold = Threshold.INFO;
} else if (avgCollectionTime > gcThreshold.debugThreshold) {
threshold = Threshold.DEBUG;
}
if (threshold != null) {
onSlowGc(threshold, seq, new SlowGcEvent(
gc,
collections,
TimeValue.timeValueMillis(collectionTime),
elapsed,
lastJvmStats,
currentJvmStats,
JvmInfo.jvmInfo().getMem().getHeapMax()));
}
}
lastTime = currentTime;
lastJvmStats = currentJvmStats;
}
private String buildPools(JvmStats prev, JvmStats current) {
StringBuilder sb = new StringBuilder();
for (JvmStats.MemoryPool currentPool : current.getMem()) {
JvmStats.MemoryPool prevPool = null;
for (JvmStats.MemoryPool pool : prev.getMem()) {
if (pool.getName().equals(currentPool.getName())) {
prevPool = pool;
break;
}
}
sb.append("{[").append(currentPool.getName())
.append("] [").append(prevPool == null ? "?" : prevPool.getUsed()).append("]->[").append(currentPool.getUsed()).append("]/[").append(currentPool.getMax()).append("]}");
}
return sb.toString();
JvmStats jvmStats() {
return JvmStats.jvmStats();
}
long now() {
return System.nanoTime();
}
abstract void onSlowGc(final Threshold threshold, final long seq, final SlowGcEvent slowGcEvent);
}
}

View File

@ -37,6 +37,7 @@ import org.elasticsearch.ingest.processor.LowercaseProcessor;
import org.elasticsearch.ingest.processor.RemoveProcessor;
import org.elasticsearch.ingest.processor.RenameProcessor;
import org.elasticsearch.ingest.processor.SetProcessor;
import org.elasticsearch.ingest.processor.SortProcessor;
import org.elasticsearch.ingest.processor.SplitProcessor;
import org.elasticsearch.ingest.processor.TrimProcessor;
import org.elasticsearch.ingest.processor.UppercaseProcessor;
@ -78,6 +79,7 @@ public class NodeModule extends AbstractModule {
registerProcessor(FailProcessor.TYPE, (templateService, registry) -> new FailProcessor.Factory(templateService));
registerProcessor(ForEachProcessor.TYPE, (templateService, registry) -> new ForEachProcessor.Factory(registry));
registerProcessor(DateIndexNameProcessor.TYPE, (templateService, registry) -> new DateIndexNameProcessor.Factory());
registerProcessor(SortProcessor.TYPE, (templateService, registry) -> new SortProcessor.Factory());
}
@Override

View File

@ -30,8 +30,8 @@ import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator;
import org.elasticsearch.search.aggregations.support.AggregationContext;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.profile.CollectorResult;
import org.elasticsearch.search.profile.InternalProfileCollector;
import org.elasticsearch.search.profile.query.CollectorResult;
import org.elasticsearch.search.profile.query.InternalProfileCollector;
import org.elasticsearch.search.query.QueryPhaseExecutionException;
import java.io.IOException;
@ -125,7 +125,7 @@ public class AggregationPhase implements SearchPhase {
Collections.emptyList());
collector = profileCollector;
// start a new profile with this collector
context.getProfilers().addProfiler().setCollector(profileCollector);
context.getProfilers().addQueryProfiler().setCollector(profileCollector);
}
globalsCollector.preCollection();
context.searcher().search(query, collector);

View File

@ -33,10 +33,10 @@ import org.apache.lucene.search.Weight;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.search.dfs.AggregatedDfs;
import org.elasticsearch.search.profile.QueryProfileBreakdown;
import org.elasticsearch.search.profile.ProfileWeight;
import org.elasticsearch.search.profile.QueryProfiler;
import org.elasticsearch.search.profile.QueryTimingType;
import org.elasticsearch.search.profile.query.ProfileWeight;
import org.elasticsearch.search.profile.query.QueryProfileBreakdown;
import org.elasticsearch.search.profile.query.QueryProfiler;
import org.elasticsearch.search.profile.query.QueryTimingType;
import java.io.IOException;

View File

@ -43,7 +43,7 @@ import java.util.Map;
* Each InternalProfileResult has a List of InternalProfileResults, which will contain
* "children" queries if applicable
*/
final class ProfileResult implements Writeable, ToXContent {
public final class ProfileResult implements Writeable, ToXContent {
private static final ParseField TYPE = new ParseField("type");
private static final ParseField DESCRIPTION = new ParseField("description");

View File

@ -24,6 +24,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.search.profile.query.CollectorResult;
import java.io.IOException;
import java.util.ArrayList;

View File

@ -20,6 +20,7 @@
package org.elasticsearch.search.profile;
import org.elasticsearch.search.internal.ContextIndexSearcher;
import org.elasticsearch.search.profile.query.QueryProfiler;
import java.util.ArrayList;
import java.util.Collections;
@ -29,31 +30,31 @@ import java.util.List;
public final class Profilers {
private final ContextIndexSearcher searcher;
private final List<QueryProfiler> profilers;
private final List<QueryProfiler> queryProfilers;
/** Sole constructor. This {@link Profilers} instance will initially wrap one {@link QueryProfiler}. */
public Profilers(ContextIndexSearcher searcher) {
this.searcher = searcher;
this.profilers = new ArrayList<>();
addProfiler();
this.queryProfilers = new ArrayList<>();
addQueryProfiler();
}
/** Switch to a new profile. */
public QueryProfiler addProfiler() {
public QueryProfiler addQueryProfiler() {
QueryProfiler profiler = new QueryProfiler();
searcher.setProfiler(profiler);
profilers.add(profiler);
queryProfilers.add(profiler);
return profiler;
}
/** Get the current profiler. */
public QueryProfiler getCurrent() {
return profilers.get(profilers.size() - 1);
public QueryProfiler getCurrentQueryProfiler() {
return queryProfilers.get(queryProfilers.size() - 1);
}
/** Return the list of all created {@link QueryProfiler}s so far. */
public List<QueryProfiler> getProfilers() {
return Collections.unmodifiableList(profilers);
public List<QueryProfiler> getQueryProfilers() {
return Collections.unmodifiableList(queryProfilers);
}
}

View File

@ -24,6 +24,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.search.profile.query.QueryProfiler;
import java.io.IOException;
import java.util.ArrayList;

View File

@ -17,7 +17,7 @@
* under the License.
*/
package org.elasticsearch.search.profile;
package org.elasticsearch.search.profile.query;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.io.stream.StreamInput;

View File

@ -17,7 +17,7 @@
* under the License.
*/
package org.elasticsearch.search.profile;
package org.elasticsearch.search.profile.query;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.Collector;

View File

@ -17,9 +17,10 @@
* under the License.
*/
package org.elasticsearch.search.profile;
package org.elasticsearch.search.profile.query;
import org.apache.lucene.search.Query;
import org.elasticsearch.search.profile.ProfileResult;
import java.util.ArrayList;
import java.util.Collections;

View File

@ -17,7 +17,7 @@
* under the License.
*/
package org.elasticsearch.search.profile;
package org.elasticsearch.search.profile.query;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.Collector;

View File

@ -17,7 +17,7 @@
* under the License.
*/
package org.elasticsearch.search.profile;
package org.elasticsearch.search.profile.query;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Scorer;

View File

@ -17,7 +17,7 @@
* under the License.
*/
package org.elasticsearch.search.profile;
package org.elasticsearch.search.profile.query;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;

View File

@ -17,7 +17,9 @@
* under the License.
*/
package org.elasticsearch.search.profile;
package org.elasticsearch.search.profile.query;
import org.elasticsearch.search.profile.AbstractProfileBreakdown;
/**
* A record of timings for the various operations that may happen during query execution.

View File

@ -17,9 +17,10 @@
* under the License.
*/
package org.elasticsearch.search.profile;
package org.elasticsearch.search.profile.query;
import org.apache.lucene.search.Query;
import org.elasticsearch.search.profile.ProfileResult;
import java.util.List;
import java.util.Objects;

View File

@ -17,7 +17,7 @@
* under the License.
*/
package org.elasticsearch.search.profile;
package org.elasticsearch.search.profile.query;
import java.util.Locale;

View File

@ -52,10 +52,10 @@ import org.elasticsearch.search.SearchService;
import org.elasticsearch.search.aggregations.AggregationPhase;
import org.elasticsearch.search.internal.ScrollContext;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.profile.CollectorResult;
import org.elasticsearch.search.profile.InternalProfileCollector;
import org.elasticsearch.search.profile.ProfileShardResult;
import org.elasticsearch.search.profile.SearchProfileShardResults;
import org.elasticsearch.search.profile.query.CollectorResult;
import org.elasticsearch.search.profile.query.InternalProfileCollector;
import org.elasticsearch.search.rescore.RescorePhase;
import org.elasticsearch.search.rescore.RescoreSearchContext;
import org.elasticsearch.search.sort.SortAndFormats;
@ -113,7 +113,7 @@ public class QueryPhase implements SearchPhase {
if (searchContext.getProfilers() != null) {
List<ProfileShardResult> shardResults = SearchProfileShardResults
.buildShardResults(searchContext.getProfilers().getProfilers());
.buildShardResults(searchContext.getProfilers().getQueryProfilers());
searchContext.queryResult().profileResults(shardResults);
}
}
@ -365,7 +365,7 @@ public class QueryPhase implements SearchPhase {
try {
if (collector != null) {
if (doProfile) {
searchContext.getProfilers().getCurrent().setCollector((InternalProfileCollector) collector);
searchContext.getProfilers().getCurrentQueryProfiler().setCollector((InternalProfileCollector) collector);
}
searcher.search(query, collector);
}
@ -386,7 +386,7 @@ public class QueryPhase implements SearchPhase {
if (searchContext.getProfilers() != null) {
List<ProfileShardResult> shardResults = SearchProfileShardResults
.buildShardResults(searchContext.getProfilers().getProfilers());
.buildShardResults(searchContext.getProfilers().getQueryProfilers());
searchContext.queryResult().profileResults(shardResults);
}

View File

@ -96,7 +96,7 @@ public class AckClusterUpdateSettingsIT extends ESIntegTestCase {
for (Client client : clients()) {
ClusterState clusterState = getLocalClusterState(client);
assertThat(clusterState.getRoutingNodes().metaData().transientSettings().get("cluster.routing.allocation.exclude._id"), equalTo(excludedNodeId));
assertThat(clusterState.metaData().transientSettings().get("cluster.routing.allocation.exclude._id"), equalTo(excludedNodeId));
for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
for (ShardRouting shardRouting : indexShardRoutingTable) {

View File

@ -875,7 +875,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
)
);
ClusterState clusterState = ClusterState.builder(baseClusterState).routingTable(builder.build()).build();
RoutingAllocation routingAllocation = new RoutingAllocation(null, new RoutingNodes(clusterState), discoveryNodes, clusterInfo,
RoutingAllocation routingAllocation = new RoutingAllocation(null, new RoutingNodes(clusterState), clusterState, clusterInfo,
System.nanoTime());
Decision decision = diskThresholdDecider.canRemain(firstRouting, firstRoutingNode, routingAllocation);
assertThat(decision.type(), equalTo(Decision.Type.NO));
@ -896,7 +896,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
)
);
clusterState = ClusterState.builder(baseClusterState).routingTable(builder.build()).build();
routingAllocation = new RoutingAllocation(null, new RoutingNodes(clusterState), discoveryNodes, clusterInfo, System.nanoTime());
routingAllocation = new RoutingAllocation(null, new RoutingNodes(clusterState), clusterState, clusterInfo, System.nanoTime());
decision = diskThresholdDecider.canRemain(firstRouting, firstRoutingNode, routingAllocation);
assertThat(decision.type(), equalTo(Decision.Type.YES));
@ -991,7 +991,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
)
);
ClusterState clusterState = ClusterState.builder(baseClusterState).routingTable(builder.build()).build();
RoutingAllocation routingAllocation = new RoutingAllocation(null, new RoutingNodes(clusterState), discoveryNodes, clusterInfo,
RoutingAllocation routingAllocation = new RoutingAllocation(null, new RoutingNodes(clusterState), clusterState, clusterInfo,
System.nanoTime());
Decision decision = diskThresholdDecider.canRemain(firstRouting, firstRoutingNode, routingAllocation);
@ -1051,7 +1051,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
);
clusterState = ClusterState.builder(updateClusterState).routingTable(builder.build()).build();
routingAllocation = new RoutingAllocation(null, new RoutingNodes(clusterState), discoveryNodes, clusterInfo, System.nanoTime());
routingAllocation = new RoutingAllocation(null, new RoutingNodes(clusterState), clusterState, clusterInfo, System.nanoTime());
decision = diskThresholdDecider.canRemain(firstRouting, firstRoutingNode, routingAllocation);
assertThat(decision.type(), equalTo(Decision.Type.YES));

View File

@ -136,7 +136,7 @@ public class DiskThresholdDeciderUnitTests extends ESTestCase {
ImmutableOpenMap.Builder<String, Long> shardSizes = ImmutableOpenMap.builder();
shardSizes.put("[test][0][p]", 10L); // 10 bytes
final ClusterInfo clusterInfo = new ClusterInfo(leastAvailableUsages.build(), mostAvailableUsage.build(), shardSizes.build(), ImmutableOpenMap.of());
RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Settings.EMPTY, new AllocationDecider[]{decider}), clusterState.getRoutingNodes(), clusterState.nodes(), clusterInfo, System.nanoTime());
RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Settings.EMPTY, new AllocationDecider[]{decider}), clusterState.getRoutingNodes(), clusterState, clusterInfo, System.nanoTime());
assertEquals(mostAvailableUsage.toString(), Decision.YES, decider.canAllocate(test_0, new RoutingNode("node_0", node_0), allocation));
assertEquals(mostAvailableUsage.toString(), Decision.NO, decider.canAllocate(test_0, new RoutingNode("node_1", node_1), allocation));
}
@ -204,7 +204,7 @@ public class DiskThresholdDeciderUnitTests extends ESTestCase {
shardSizes.put("[test][2][p]", 10L);
final ClusterInfo clusterInfo = new ClusterInfo(leastAvailableUsages.build(), mostAvailableUsage.build(), shardSizes.build(), shardRoutingMap.build());
RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Settings.EMPTY, new AllocationDecider[]{decider}), clusterState.getRoutingNodes(), clusterState.nodes(), clusterInfo, System.nanoTime());
RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Settings.EMPTY, new AllocationDecider[]{decider}), clusterState.getRoutingNodes(), clusterState, clusterInfo, System.nanoTime());
assertEquals(Decision.YES, decider.canRemain(test_0, new RoutingNode("node_0", node_0), allocation));
assertEquals(Decision.NO, decider.canRemain(test_1, new RoutingNode("node_1", node_1), allocation));
try {

View File

@ -346,7 +346,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
.metaData(metaData)
.routingTable(routingTable)
.nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build();
return new RoutingAllocation(allocationDeciders, new RoutingNodes(state, false), state.nodes(), null, System.nanoTime());
return new RoutingAllocation(allocationDeciders, new RoutingNodes(state, false), state, null, System.nanoTime());
}
/**
@ -425,7 +425,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
.metaData(metaData)
.routingTable(routingTable)
.nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build();
return new RoutingAllocation(allocationDeciders, new RoutingNodes(state, false), state.nodes(), null, System.nanoTime());
return new RoutingAllocation(allocationDeciders, new RoutingNodes(state, false), state, null, System.nanoTime());
}
/**
@ -444,7 +444,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
.routingTable(routingTable)
.nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build();
RoutingAllocation allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state.nodes(), null, System.nanoTime());
RoutingAllocation allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state, null, System.nanoTime());
boolean changed = testAllocator.allocateUnassigned(allocation);
assertThat(changed, equalTo(false));
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1));
@ -452,7 +452,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(2)); // replicas
testAllocator.addData(node1, 1, null, randomBoolean());
allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state.nodes(), null, System.nanoTime());
allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state, null, System.nanoTime());
changed = testAllocator.allocateUnassigned(allocation);
assertThat(changed, equalTo(false));
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1));
@ -460,7 +460,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(2)); // replicas
testAllocator.addData(node2, 1, null, randomBoolean());
allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state.nodes(), null, System.nanoTime());
allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state, null, System.nanoTime());
changed = testAllocator.allocateUnassigned(allocation);
assertThat(changed, equalTo(true));
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(0));
@ -485,7 +485,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
.routingTable(routingTable)
.nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build();
RoutingAllocation allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state.nodes(), null, System.nanoTime());
RoutingAllocation allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state, null, System.nanoTime());
boolean changed = testAllocator.allocateUnassigned(allocation);
assertThat(changed, equalTo(false));
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1));
@ -493,7 +493,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(2)); // replicas
testAllocator.addData(node1, 1, null, randomBoolean());
allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state.nodes(), null, System.nanoTime());
allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state, null, System.nanoTime());
changed = testAllocator.allocateUnassigned(allocation);
assertThat(changed, equalTo(false));
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1));
@ -501,7 +501,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(2)); // replicas
testAllocator.addData(node2, 2, null, randomBoolean());
allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state.nodes(), null, System.nanoTime());
allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state, null, System.nanoTime());
changed = testAllocator.allocateUnassigned(allocation);
assertThat(changed, equalTo(true));
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(0));
@ -525,7 +525,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
.metaData(metaData)
.routingTable(routingTableBuilder.build())
.nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build();
return new RoutingAllocation(deciders, new RoutingNodes(state, false), state.nodes(), null, System.nanoTime());
return new RoutingAllocation(deciders, new RoutingNodes(state, false), state, null, System.nanoTime());
}
class TestAllocator extends PrimaryShardAllocator {

View File

@ -302,7 +302,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
.metaData(metaData)
.routingTable(routingTable)
.nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build();
return new RoutingAllocation(deciders, new RoutingNodes(state, false), state.nodes(), ClusterInfo.EMPTY, System.nanoTime());
return new RoutingAllocation(deciders, new RoutingNodes(state, false), state, ClusterInfo.EMPTY, System.nanoTime());
}
private RoutingAllocation onePrimaryOnNode1And1ReplicaRecovering(AllocationDeciders deciders) {
@ -324,7 +324,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
.metaData(metaData)
.routingTable(routingTable)
.nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build();
return new RoutingAllocation(deciders, new RoutingNodes(state, false), state.nodes(), ClusterInfo.EMPTY, System.nanoTime());
return new RoutingAllocation(deciders, new RoutingNodes(state, false), state, ClusterInfo.EMPTY, System.nanoTime());
}
class TestAllocator extends ReplicaShardAllocator {

View File

@ -103,7 +103,7 @@ public class RareClusterStateIT extends ESIntegTestCase {
.nodes(DiscoveryNodes.EMPTY_NODES)
.build(), false
);
RoutingAllocation routingAllocation = new RoutingAllocation(allocationDeciders, routingNodes, current.nodes(), ClusterInfo.EMPTY, System.nanoTime());
RoutingAllocation routingAllocation = new RoutingAllocation(allocationDeciders, routingNodes, current, ClusterInfo.EMPTY, System.nanoTime());
allocator.allocateUnassigned(routingAllocation);
}

View File

@ -0,0 +1,282 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.ingest.processor;
import org.elasticsearch.ingest.core.IngestDocument;
import org.elasticsearch.ingest.RandomDocumentPicks;
import org.elasticsearch.ingest.core.Processor;
import org.elasticsearch.ingest.processor.SortProcessor.SortOrder;
import org.elasticsearch.test.ESTestCase;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Random;
import java.util.stream.Collectors;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
public class SortProcessorTests extends ESTestCase {
public void testSortStrings() throws Exception {
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random());
int numItems = randomIntBetween(1, 10);
List<String> fieldValue = new ArrayList<>(numItems);
List<String> expectedResult = new ArrayList<>(numItems);
for (int j = 0; j < numItems; j++) {
String value = randomAsciiOfLengthBetween(1, 10);
fieldValue.add(value);
expectedResult.add(value);
}
Collections.sort(expectedResult);
SortOrder order = randomBoolean() ? SortOrder.ASCENDING : SortOrder.DESCENDING;
if (order.equals(SortOrder.DESCENDING)) {
Collections.reverse(expectedResult);
}
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, fieldValue);
Processor processor = new SortProcessor(randomAsciiOfLength(10), fieldName, order);
processor.execute(ingestDocument);
assertEquals(ingestDocument.getFieldValue(fieldName, List.class), expectedResult);
}
public void testSortIntegersNonRandom() throws Exception {
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random());
Integer[] expectedResult = new Integer[]{1,2,3,4,5,10,20,21,22,50,100};
List<Integer> fieldValue = new ArrayList<>(expectedResult.length);
fieldValue.addAll(Arrays.asList(expectedResult).subList(0, expectedResult.length));
Collections.shuffle(fieldValue, random());
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, fieldValue);
Processor processor = new SortProcessor(randomAsciiOfLength(10), fieldName, SortOrder.ASCENDING);
processor.execute(ingestDocument);
assertThat(ingestDocument.getFieldValue(fieldName, List.class).toArray(), equalTo(expectedResult));
}
public void testSortIntegers() throws Exception {
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random());
int numItems = randomIntBetween(1, 10);
List<Integer> fieldValue = new ArrayList<>(numItems);
List<Integer> expectedResult = new ArrayList<>(numItems);
for (int j = 0; j < numItems; j++) {
Integer value = randomIntBetween(1, 100);
fieldValue.add(value);
expectedResult.add(value);
}
Collections.sort(expectedResult);
SortOrder order = randomBoolean() ? SortOrder.ASCENDING : SortOrder.DESCENDING;
if (order.equals(SortOrder.DESCENDING)) {
Collections.reverse(expectedResult);
}
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, fieldValue);
Processor processor = new SortProcessor(randomAsciiOfLength(10), fieldName, order);
processor.execute(ingestDocument);
assertEquals(ingestDocument.getFieldValue(fieldName, List.class), expectedResult);
}
public void testSortShorts() throws Exception {
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random());
int numItems = randomIntBetween(1, 10);
List<Short> fieldValue = new ArrayList<>(numItems);
List<Short> expectedResult = new ArrayList<>(numItems);
for (int j = 0; j < numItems; j++) {
Short value = randomShort();
fieldValue.add(value);
expectedResult.add(value);
}
Collections.sort(expectedResult);
SortOrder order = randomBoolean() ? SortOrder.ASCENDING : SortOrder.DESCENDING;
if (order.equals(SortOrder.DESCENDING)) {
Collections.reverse(expectedResult);
}
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, fieldValue);
Processor processor = new SortProcessor(randomAsciiOfLength(10), fieldName, order);
processor.execute(ingestDocument);
assertEquals(ingestDocument.getFieldValue(fieldName, List.class), expectedResult);
}
public void testSortDoubles() throws Exception {
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random());
int numItems = randomIntBetween(1, 10);
List<Double> fieldValue = new ArrayList<>(numItems);
List<Double> expectedResult = new ArrayList<>(numItems);
for (int j = 0; j < numItems; j++) {
Double value = randomDoubleBetween(0.0, 100.0, true);
fieldValue.add(value);
expectedResult.add(value);
}
Collections.sort(expectedResult);
SortOrder order = randomBoolean() ? SortOrder.ASCENDING : SortOrder.DESCENDING;
if (order.equals(SortOrder.DESCENDING)) {
Collections.reverse(expectedResult);
}
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, fieldValue);
Processor processor = new SortProcessor(randomAsciiOfLength(10), fieldName, order);
processor.execute(ingestDocument);
assertEquals(ingestDocument.getFieldValue(fieldName, List.class), expectedResult);
}
public void testSortFloats() throws Exception {
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random());
int numItems = randomIntBetween(1, 10);
List<Float> fieldValue = new ArrayList<>(numItems);
List<Float> expectedResult = new ArrayList<>(numItems);
for (int j = 0; j < numItems; j++) {
Float value = randomFloat();
fieldValue.add(value);
expectedResult.add(value);
}
Collections.sort(expectedResult);
SortOrder order = randomBoolean() ? SortOrder.ASCENDING : SortOrder.DESCENDING;
if (order.equals(SortOrder.DESCENDING)) {
Collections.reverse(expectedResult);
}
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, fieldValue);
Processor processor = new SortProcessor(randomAsciiOfLength(10), fieldName, order);
processor.execute(ingestDocument);
assertEquals(ingestDocument.getFieldValue(fieldName, List.class), expectedResult);
}
public void testSortBytes() throws Exception {
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random());
int numItems = randomIntBetween(1, 10);
List<Byte> fieldValue = new ArrayList<>(numItems);
List<Byte> expectedResult = new ArrayList<>(numItems);
for (int j = 0; j < numItems; j++) {
Byte value = randomByte();
fieldValue.add(value);
expectedResult.add(value);
}
Collections.sort(expectedResult);
SortOrder order = randomBoolean() ? SortOrder.ASCENDING : SortOrder.DESCENDING;
if (order.equals(SortOrder.DESCENDING)) {
Collections.reverse(expectedResult);
}
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, fieldValue);
Processor processor = new SortProcessor(randomAsciiOfLength(10), fieldName, order);
processor.execute(ingestDocument);
assertEquals(ingestDocument.getFieldValue(fieldName, List.class), expectedResult);
}
public void testSortBooleans() throws Exception {
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random());
int numItems = randomIntBetween(1, 10);
List<Boolean> fieldValue = new ArrayList<>(numItems);
List<Boolean> expectedResult = new ArrayList<>(numItems);
for (int j = 0; j < numItems; j++) {
Boolean value = randomBoolean();
fieldValue.add(value);
expectedResult.add(value);
}
Collections.sort(expectedResult);
SortOrder order = randomBoolean() ? SortOrder.ASCENDING : SortOrder.DESCENDING;
if (order.equals(SortOrder.DESCENDING)) {
Collections.reverse(expectedResult);
}
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, fieldValue);
Processor processor = new SortProcessor(randomAsciiOfLength(10), fieldName, order);
processor.execute(ingestDocument);
assertEquals(ingestDocument.getFieldValue(fieldName, List.class), expectedResult);
}
public void testSortMixedStrings() throws Exception {
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random());
int numItems = randomIntBetween(1, 10);
List<String> fieldValue = new ArrayList<>(numItems);
List<String> expectedResult = new ArrayList<>(numItems);
String value;
for (int j = 0; j < numItems; j++) {
if (randomBoolean()) {
value = String.valueOf(randomIntBetween(0, 100));
} else {
value = randomAsciiOfLengthBetween(1, 10);
}
fieldValue.add(value);
expectedResult.add(value);
}
Collections.sort(expectedResult);
SortOrder order = randomBoolean() ? SortOrder.ASCENDING : SortOrder.DESCENDING;
if (order.equals(SortOrder.DESCENDING)) {
Collections.reverse(expectedResult);
}
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, fieldValue);
Processor processor = new SortProcessor(randomAsciiOfLength(10), fieldName, order);
processor.execute(ingestDocument);
assertEquals(ingestDocument.getFieldValue(fieldName, List.class), expectedResult);
}
public void testSortNonListField() throws Exception {
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>());
String fieldName = RandomDocumentPicks.randomFieldName(random());
ingestDocument.setFieldValue(fieldName, randomAsciiOfLengthBetween(1, 10));
SortOrder order = randomBoolean() ? SortOrder.ASCENDING : SortOrder.DESCENDING;
Processor processor = new SortProcessor(randomAsciiOfLength(10), fieldName, order);
try {
processor.execute(ingestDocument);
} catch(IllegalArgumentException e) {
assertThat(e.getMessage(), equalTo("field [" + fieldName + "] of type [java.lang.String] cannot be cast to [java.util.List]"));
}
}
public void testSortNonExistingField() throws Exception {
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>());
String fieldName = RandomDocumentPicks.randomFieldName(random());
SortOrder order = randomBoolean() ? SortOrder.ASCENDING : SortOrder.DESCENDING;
Processor processor = new SortProcessor(randomAsciiOfLength(10), fieldName, order);
try {
processor.execute(ingestDocument);
} catch(IllegalArgumentException e) {
assertThat(e.getMessage(), containsString("not present as part of path [" + fieldName + "]"));
}
}
public void testSortNullValue() throws Exception {
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.singletonMap("field", null));
SortOrder order = randomBoolean() ? SortOrder.ASCENDING : SortOrder.DESCENDING;
Processor processor = new SortProcessor(randomAsciiOfLength(10), "field", order);
try {
processor.execute(ingestDocument);
} catch(IllegalArgumentException e) {
assertThat(e.getMessage(), equalTo("field [field] is null, cannot sort."));
}
}
}

View File

@ -0,0 +1,136 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.monitor.jvm;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.test.ESTestCase;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.verifyNoMoreInteractions;
import static org.mockito.Mockito.when;
public class JvmGcMonitorServiceTests extends ESTestCase {
public void testSlowGcLogging() {
final ESLogger logger = mock(ESLogger.class);
when(logger.isWarnEnabled()).thenReturn(true);
when(logger.isInfoEnabled()).thenReturn(true);
when(logger.isDebugEnabled()).thenReturn(true);
final JvmGcMonitorService.JvmMonitor.Threshold threshold = randomFrom(JvmGcMonitorService.JvmMonitor.Threshold.values());
final String name = randomAsciiOfLength(16);
final long seq = randomIntBetween(1, 1 << 30);
final int elapsedValue = randomIntBetween(1, 1 << 10);
final long totalCollectionCount = randomIntBetween(1, 16);
final long currentCollectionCount = randomIntBetween(1, 16);
final TimeValue totalCollectionTime = TimeValue.timeValueMillis(randomIntBetween(1, elapsedValue));
final TimeValue currentCollectionTime = TimeValue.timeValueMillis(randomIntBetween(1, elapsedValue));
final ByteSizeValue lastHeapUsed = new ByteSizeValue(randomIntBetween(1, 1 << 10));
JvmStats lastJvmStats = mock(JvmStats.class);
JvmStats.Mem lastMem = mock(JvmStats.Mem.class);
when(lastMem.getHeapUsed()).thenReturn(lastHeapUsed);
when(lastJvmStats.getMem()).thenReturn(lastMem);
when(lastJvmStats.toString()).thenReturn("last");
final ByteSizeValue currentHeapUsed = new ByteSizeValue(randomIntBetween(1, 1 << 10));
JvmStats currentJvmStats = mock(JvmStats.class);
JvmStats.Mem currentMem = mock(JvmStats.Mem.class);
when(currentMem.getHeapUsed()).thenReturn(currentHeapUsed);
when(currentJvmStats.getMem()).thenReturn(currentMem);
when(currentJvmStats.toString()).thenReturn("current");
JvmStats.GarbageCollector gc = mock(JvmStats.GarbageCollector.class);
when(gc.getName()).thenReturn(name);
when(gc.getCollectionCount()).thenReturn(totalCollectionCount);
when(gc.getCollectionTime()).thenReturn(totalCollectionTime);
final ByteSizeValue maxHeapUsed = new ByteSizeValue(Math.max(lastHeapUsed.bytes(), currentHeapUsed.bytes()) + 1 << 10);
JvmGcMonitorService.JvmMonitor.SlowGcEvent slowGcEvent = new JvmGcMonitorService.JvmMonitor.SlowGcEvent(
gc,
currentCollectionCount,
currentCollectionTime,
elapsedValue,
lastJvmStats,
currentJvmStats,
maxHeapUsed);
JvmGcMonitorService.logSlowGc(logger, threshold, seq, slowGcEvent, (l, c) -> l.toString() + ", " + c.toString());
switch (threshold) {
case WARN:
verify(logger).isWarnEnabled();
verify(logger).warn(
"[gc][{}][{}][{}] duration [{}], collections [{}]/[{}], total [{}]/[{}], memory [{}]->[{}]/[{}], all_pools {}",
name,
seq,
totalCollectionCount,
currentCollectionTime,
currentCollectionCount,
TimeValue.timeValueMillis(elapsedValue),
currentCollectionTime,
totalCollectionTime,
lastHeapUsed,
currentHeapUsed,
maxHeapUsed,
"last, current");
break;
case INFO:
verify(logger).isInfoEnabled();
verify(logger).info(
"[gc][{}][{}][{}] duration [{}], collections [{}]/[{}], total [{}]/[{}], memory [{}]->[{}]/[{}], all_pools {}",
name,
seq,
totalCollectionCount,
currentCollectionTime,
currentCollectionCount,
TimeValue.timeValueMillis(elapsedValue),
currentCollectionTime,
totalCollectionTime,
lastHeapUsed,
currentHeapUsed,
maxHeapUsed,
"last, current");
break;
case DEBUG:
verify(logger).isDebugEnabled();
verify(logger).debug(
"[gc][{}][{}][{}] duration [{}], collections [{}]/[{}], total [{}]/[{}], memory [{}]->[{}]/[{}], all_pools {}",
name,
seq,
totalCollectionCount,
currentCollectionTime,
currentCollectionCount,
TimeValue.timeValueMillis(elapsedValue),
currentCollectionTime,
totalCollectionTime,
lastHeapUsed,
currentHeapUsed,
maxHeapUsed,
"last, current");
break;
}
verifyNoMoreInteractions(logger);
}
}

View File

@ -0,0 +1,248 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.monitor.jvm;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.test.ESTestCase;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import static org.hamcrest.CoreMatchers.anyOf;
import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.CoreMatchers.instanceOf;
import static org.hamcrest.Matchers.hasToString;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class JvmMonitorTests extends ESTestCase {
public void testMonitorFailure() {
AtomicBoolean shouldFail = new AtomicBoolean();
AtomicBoolean invoked = new AtomicBoolean();
JvmGcMonitorService.JvmMonitor monitor = new JvmGcMonitorService.JvmMonitor(Collections.emptyMap()) {
@Override
void onMonitorFailure(Throwable t) {
invoked.set(true);
assertThat(t, instanceOf(RuntimeException.class));
assertThat(t, hasToString(containsString("simulated")));
}
@Override
synchronized void monitorLongGc() {
if (shouldFail.get()) {
throw new RuntimeException("simulated");
}
}
@Override
void onSlowGc(final Threshold threshold, final long seq, final SlowGcEvent slowGcEvent) {
}
};
monitor.run();
assertFalse(invoked.get());
shouldFail.set(true);
monitor.run();
assertTrue(invoked.get());
}
public void testSlowGc() {
final int initialYoungCollectionCount = randomIntBetween(1, 4);
final int initialYoungCollectionTime = randomIntBetween(initialYoungCollectionCount * 100, initialYoungCollectionCount * 200);
final int initialOldCollectionCount = randomIntBetween(1, 4);
final int initialOldCollectionTime = randomIntBetween(initialYoungCollectionCount * 1000, initialYoungCollectionCount * 2000);
final JvmStats.GarbageCollector initialYoungCollector = mock(JvmStats.GarbageCollector.class);
when(initialYoungCollector.getName()).thenReturn("young");
when(initialYoungCollector.getCollectionCount()).thenReturn((long) initialYoungCollectionCount);
when(initialYoungCollector.getCollectionTime()).thenReturn(TimeValue.timeValueMillis(initialYoungCollectionTime));
final JvmStats.GarbageCollector initialOldCollector = mock(JvmStats.GarbageCollector.class);
when(initialOldCollector.getName()).thenReturn("old");
when(initialOldCollector.getCollectionCount()).thenReturn((long) initialOldCollectionCount);
when(initialOldCollector.getCollectionTime()).thenReturn(TimeValue.timeValueMillis(initialOldCollectionTime));
JvmStats initialJvmStats = jvmStats(initialYoungCollector, initialOldCollector);
final Map<String, JvmGcMonitorService.GcThreshold> gcThresholds = new HashMap<>();
// fake debug threshold, info will be double this and warn will
// be triple
final int youngDebugThreshold = randomIntBetween(1, 10) * 100;
gcThresholds.put(
"young",
new JvmGcMonitorService.GcThreshold("young", youngDebugThreshold * 3, youngDebugThreshold * 2, youngDebugThreshold));
final boolean youngGcThreshold = randomBoolean();
final JvmGcMonitorService.JvmMonitor.Threshold youngThresholdLevel = randomFrom(JvmGcMonitorService.JvmMonitor.Threshold.values());
final int youngMultiplier = 1 + youngThresholdLevel.ordinal();
final int youngCollections = randomIntBetween(1, 4);
final JvmStats.GarbageCollector youngCollector;
youngCollector = mock(JvmStats.GarbageCollector.class);
when(youngCollector.getName()).thenReturn("young");
when(youngCollector.getCollectionCount()).thenReturn((long) (initialYoungCollectionCount + youngCollections));
final int youngIncrement;
if (youngGcThreshold) {
// we are faking that youngCollections collections occurred
// this number is chosen so that we squeak over the
// random threshold when computing the average collection
// time: note that average collection time will just be
// youngMultiplier * youngDebugThreshold + 1 which ensures
// that we are over the right threshold but below the next
// threshold
youngIncrement = youngCollections * youngMultiplier * youngDebugThreshold + youngCollections;
} else {
// fake that we did not exceed the threshold
youngIncrement = randomIntBetween(1, youngDebugThreshold);
}
when(youngCollector.getCollectionTime()).thenReturn(TimeValue.timeValueMillis(initialYoungCollectionTime + youngIncrement));
// fake debug threshold, info will be double this and warn will
// be triple
final int oldDebugThreshold = randomIntBetween(1, 10) * 100;
gcThresholds.put(
"old",
new JvmGcMonitorService.GcThreshold("old", oldDebugThreshold * 3, oldDebugThreshold * 2, oldDebugThreshold));
final boolean oldGcThreshold = randomBoolean();
final JvmGcMonitorService.JvmMonitor.Threshold oldThresholdLevel = randomFrom(JvmGcMonitorService.JvmMonitor.Threshold.values());
final int oldMultiplier = 1 + oldThresholdLevel.ordinal();
final int oldCollections = randomIntBetween(1, 4);
final JvmStats.GarbageCollector oldCollector = mock(JvmStats.GarbageCollector.class);
when(oldCollector.getName()).thenReturn("old");
when(oldCollector.getCollectionCount()).thenReturn((long) (initialOldCollectionCount + oldCollections));
final int oldIncrement;
if (oldGcThreshold) {
// we are faking that oldCollections collections occurred
// this number is chosen so that we squeak over the
// random threshold when computing the average collection
// time: note that average collection time will just be
// oldMultiplier * oldDebugThreshold + 1 which ensures
// that we are over the right threshold but below the next
// threshold
oldIncrement = oldCollections * oldMultiplier * oldDebugThreshold + oldCollections;
} else {
// fake that we did not exceed the threshold
oldIncrement = randomIntBetween(1, oldDebugThreshold);
}
when(oldCollector.getCollectionTime()).thenReturn(TimeValue.timeValueMillis(initialOldCollectionTime + oldIncrement));
final long start = randomIntBetween(1, 1 << 30);
final long expectedElapsed = randomIntBetween(1, 1000);
final AtomicLong now = new AtomicLong(start);
final AtomicReference<JvmStats> jvmStats = new AtomicReference<>();
jvmStats.set(initialJvmStats);
final AtomicInteger count = new AtomicInteger();
JvmGcMonitorService.JvmMonitor monitor = new JvmGcMonitorService.JvmMonitor(gcThresholds) {
@Override
void onMonitorFailure(Throwable t) {
}
@Override
void onSlowGc(final Threshold threshold, final long seq, final SlowGcEvent slowGcEvent) {
count.incrementAndGet();
assertThat(seq, equalTo(1L));
assertThat(slowGcEvent.elapsed, equalTo(expectedElapsed));
assertThat(slowGcEvent.currentGc.getName(), anyOf(equalTo("young"), equalTo("old")));
if ("young".equals(slowGcEvent.currentGc.getName())) {
assertCollection(
threshold,
youngThresholdLevel,
slowGcEvent,
initialYoungCollectionCount,
youngCollections,
initialYoungCollectionTime,
youngIncrement);
} else if ("old".equals(slowGcEvent.currentGc.getName())) {
assertCollection(
threshold,
oldThresholdLevel,
slowGcEvent,
initialOldCollectionCount,
oldCollections,
initialOldCollectionTime,
oldIncrement);
}
}
@Override
long now() {
return now.get();
}
@Override
JvmStats jvmStats() {
return jvmStats.get();
}
};
final JvmStats monitorJvmStats = jvmStats(youngCollector, oldCollector);
now.set(start + TimeUnit.NANOSECONDS.convert(expectedElapsed, TimeUnit.MILLISECONDS));
jvmStats.set(monitorJvmStats);
monitor.monitorLongGc();
assertThat(count.get(), equalTo((youngGcThreshold ? 1 : 0) + (oldGcThreshold ? 1 : 0)));
}
private void assertCollection(
final JvmGcMonitorService.JvmMonitor.Threshold actualThreshold,
final JvmGcMonitorService.JvmMonitor.Threshold expectedThreshold,
final JvmGcMonitorService.JvmMonitor.SlowGcEvent slowGcEvent,
final int initialCollectionCount,
final int collections,
final int initialCollectionTime,
final int increment) {
assertThat(actualThreshold, equalTo(expectedThreshold));
assertThat(slowGcEvent.currentGc.getCollectionCount(), equalTo((long) (initialCollectionCount + collections)));
assertThat(slowGcEvent.collectionCount, equalTo((long) collections));
assertThat(slowGcEvent.collectionTime, equalTo(TimeValue.timeValueMillis(increment)));
assertThat(slowGcEvent.currentGc.getCollectionTime(), equalTo(TimeValue.timeValueMillis(initialCollectionTime + increment)));
}
private JvmStats jvmStats(JvmStats.GarbageCollector youngCollector, JvmStats.GarbageCollector oldCollector) {
final JvmStats jvmStats = mock(JvmStats.class);
final JvmStats.GarbageCollectors initialGcs = mock(JvmStats.GarbageCollectors.class);
final JvmStats.GarbageCollector[] initialCollectors = new JvmStats.GarbageCollector[2];
initialCollectors[0] = youngCollector;
initialCollectors[1] = oldCollector;
when(initialGcs.getCollectors()).thenReturn(initialCollectors);
when(jvmStats.getGc()).thenReturn(initialGcs);
when(jvmStats.getMem()).thenReturn(JvmStats.jvmStats().getMem());
return jvmStats;
}
}

View File

@ -17,7 +17,7 @@
* under the License.
*/
package org.elasticsearch.search.profile;
package org.elasticsearch.search.profile.query;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field.Store;
@ -37,6 +37,9 @@ import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.TestUtil;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.search.internal.ContextIndexSearcher;
import org.elasticsearch.search.profile.ProfileResult;
import org.elasticsearch.search.profile.query.QueryProfiler;
import org.elasticsearch.search.profile.query.QueryTimingType;
import org.elasticsearch.test.ESTestCase;
import org.junit.AfterClass;
import org.junit.BeforeClass;

View File

@ -17,7 +17,7 @@
* under the License.
*/
package org.elasticsearch.search.profile;
package org.elasticsearch.search.profile.query;
import org.apache.lucene.util.English;
import org.elasticsearch.action.index.IndexRequestBuilder;
@ -29,6 +29,9 @@ import org.elasticsearch.action.search.ShardSearchFailure;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.profile.ProfileResult;
import org.elasticsearch.search.profile.ProfileShardResult;
import org.elasticsearch.search.profile.query.CollectorResult;
import org.elasticsearch.search.sort.SortOrder;
import org.elasticsearch.test.ESIntegTestCase;
@ -36,7 +39,7 @@ import java.util.Arrays;
import java.util.List;
import java.util.Map;
import static org.elasticsearch.search.profile.RandomQueryGenerator.randomQueryBuilder;
import static org.elasticsearch.search.profile.query.RandomQueryGenerator.randomQueryBuilder;
import static org.elasticsearch.test.hamcrest.DoubleMatcher.nearlyEqual;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
@ -160,7 +163,8 @@ public class QueryProfilerIT extends ESIntegTestCase {
nearlyEqual(vanillaMaxScore, profileMaxScore, 0.001));
}
assertThat("Profile totalHits of [" + profileResponse.getHits().totalHits() + "] is not close to Vanilla totalHits [" + vanillaResponse.getHits().totalHits() + "]",
assertThat("Profile totalHits of [" + profileResponse.getHits().totalHits() + "] is not close to Vanilla totalHits ["
+ vanillaResponse.getHits().totalHits() + "]",
vanillaResponse.getHits().getTotalHits(), equalTo(profileResponse.getHits().getTotalHits()));
SearchHit[] vanillaHits = vanillaResponse.getHits().getHits();
@ -237,7 +241,8 @@ public class QueryProfilerIT extends ESIntegTestCase {
indexRandom(true, docs);
QueryBuilder q = QueryBuilders.boolQuery().must(QueryBuilders.matchQuery("field1", "one")).must(QueryBuilders.matchQuery("field1", "two"));
QueryBuilder q = QueryBuilders.boolQuery().must(QueryBuilders.matchQuery("field1", "one"))
.must(QueryBuilders.matchQuery("field1", "two"));
SearchResponse resp = client().prepareSearch()
.setQuery(q)
@ -355,7 +360,8 @@ public class QueryProfilerIT extends ESIntegTestCase {
refresh();
QueryBuilder q = QueryBuilders.boolQuery().must(QueryBuilders.boolQuery().must(QueryBuilders.boolQuery().must(QueryBuilders.matchQuery("field1", "one"))));
QueryBuilder q = QueryBuilders.boolQuery()
.must(QueryBuilders.boolQuery().must(QueryBuilders.boolQuery().must(QueryBuilders.matchQuery("field1", "one"))));
logger.info("Query: {}", q);

View File

@ -17,7 +17,7 @@
* under the License.
*/
package org.elasticsearch.search.profile;
package org.elasticsearch.search.profile.query;
import org.apache.lucene.util.English;
import org.elasticsearch.common.unit.Fuzziness;

View File

@ -33,7 +33,10 @@ That will return something like this:
"batches": 1,
"version_conflicts": 0,
"noops": 0,
"retries": 0,
"retries": {
"bulk": 0,
"search": 0
},
"throttled_millis": 0,
"requests_per_second": "unlimited",
"throttled_until_millis": 0,
@ -386,7 +389,10 @@ The JSON response looks like this:
"created": 123,
"batches": 1,
"version_conflicts": 2,
"retries": 0,
"retries": {
"bulk": 0,
"search": 0
}
"throttled_millis": 0,
"failures" : [ ]
}
@ -414,7 +420,8 @@ The number of version conflicts that reindex hit.
`retries`::
The number of retries that the reindex did in response to a full queue.
The number of retries attempted by reindex. `bulk` is the number of bulk
actions retried and `search` is the number of search actions retried.
`throttled_millis`::
@ -468,7 +475,10 @@ The responses looks like:
"batches" : 4,
"version_conflicts" : 0,
"noops" : 0,
"retries": 0,
"retries": {
"bulk": 0,
"search": 0
},
"throttled_millis": 0
},
"description" : ""

View File

@ -26,7 +26,10 @@ That will return something like this:
"batches": 1,
"version_conflicts": 0,
"noops": 0,
"retries": 0,
"retries": {
"bulk": 0,
"search": 0
},
"throttled_millis": 0,
"requests_per_second": "unlimited",
"throttled_until_millis": 0,
@ -220,7 +223,10 @@ The JSON response looks like this:
"updated": 0,
"batches": 1,
"version_conflicts": 2,
"retries": 0,
"retries": {
"bulk": 0,
"search": 0
}
"throttled_millis": 0,
"failures" : [ ]
}
@ -244,7 +250,8 @@ The number of version conflicts that the update by query hit.
`retries`::
The number of retries that the update by query did in response to a full queue.
The number of retries attempted by update-by-query. `bulk` is the number of bulk
actions retried and `search` is the number of search actions retried.
`throttled_millis`::
@ -299,7 +306,10 @@ The responses looks like:
"batches" : 4,
"version_conflicts" : 0,
"noops" : 0,
"retries": 0,
"retries": {
"bulk": 0,
"search": 0
}
"throttled_millis": 0
},
"description" : ""

View File

@ -1282,6 +1282,31 @@ Splits a field into an array using a separator character. Only works on string f
--------------------------------------------------
<1> Treat all consecutive whitespace characters as a single separator
[[sort-processor]]
=== Sort Processor
Sorts the elements of an array ascending or descending. Homogeneous arrays of numbers will be sorted
numerically, while arrays of strings or heterogeneous arrays of strings + numbers will be sorted lexicographically.
Throws an error when the field is not an array.
[[sort-options]]
.Sort Options
[options="header"]
|======
| Name | Required | Default | Description
| `field` | yes | - | The field to be sorted
| `order` | no | `"asc"` | The sort order to use. Accepts `"asc"` or `"desc"`.
|======
[source,js]
--------------------------------------------------
{
"sort": {
"field": "field_to_sort",
"order": "desc"
}
}
--------------------------------------------------
[[trim-processor]]
=== Trim Processor
Trims whitespace from field.

View File

@ -34,6 +34,7 @@ way to do this is to upgrade to Elasticsearch 2.3 or later and to use the
* <<breaking_50_percolator>>
* <<breaking_50_suggester>>
* <<breaking_50_index_apis>>
* <<breaking_50_document_api_changes>>
* <<breaking_50_settings_changes>>
* <<breaking_50_allocation>>
* <<breaking_50_http_changes>>

View File

@ -0,0 +1,33 @@
[[breaking_50_document_api_changes]]
=== Document API changes
==== Reindex and Update By Query
Before 5.0.0 `_reindex` and `_update_by_query` only retried bulk failures so
they used the following response format:
[source,js]
----------------------
{
...
"retries": 10
...
}
----------------------
Where `retries` counts the number of bulk retries. Now they retry on search
failures as well and use this response format:
[source,js]
----------------------
{
...
"retries": {
"bulk": 10,
"search": 1
}
...
}
----------------------
Where `bulk` counts the number of bulk retries and `search` counts the number
of search retries.

View File

@ -176,3 +176,9 @@ The `coerce` and `ignore_malformed` parameters were deprecated in favour of `val
* Top level inner hits syntax has been removed. Inner hits can now only be specified as part of the `nested`,
`has_child` and `has_parent` queries. Use cases previously only possible with top level inner hits can now be done
with inner hits defined inside the query dsl.
==== Query Profiler
In the response for profiling queries, the `query_type` has been renamed to `type` and `lucene` has been renamed to
`description`. These changes have been made so the response format is more friendly to supporting other types of profiling
in the future.

View File

@ -52,7 +52,7 @@ with a timeout defaulting at 20 times the ping timeout.
When the master node stops or has encountered a problem, the cluster nodes
start pinging again and will elect a new master. This pinging round also
serves as a protection against (partial) network failures where node may unjustly
serves as a protection against (partial) network failures where a node may unjustly
think that the master has failed. In this case the node will simply hear from
other nodes about the currently active master.

View File

@ -189,7 +189,7 @@ POST hockey/player/1/_update
{
"script": {
"lang": "painless",
"inline": "ctx._source.last = params.last ctx._source.nick = params.nick",
"inline": "ctx._source.last = params.last; ctx._source.nick = params.nick",
"params": {
"last": "gaudreau",
"nick": "hockey"

View File

@ -81,7 +81,7 @@ all documents where the `status` field contains the term `active`.
This first query assigns a score of `0` to all documents, as no scoring
query has been specified:
[source,js]
[source,json]
---------------------------------
GET _search
{
@ -101,7 +101,7 @@ GET _search
This `bool` query has a `match_all` query, which assigns a score of `1.0` to
all documents.
[source,js]
[source,json]
---------------------------------
GET _search
{
@ -125,7 +125,7 @@ This `constant_score` query behaves in exactly the same way as the second exampl
The `constant_score` query assigns a score of `1.0` to all documents matched
by the filter.
[source,js]
[source,json]
---------------------------------
GET _search
{

View File

@ -8,23 +8,19 @@ overall score.
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"boosting" : {
"positive" : {
"term" : {
"field1" : "value1"
}
},
"negative" : {
"term" : {
"field2" : "value2"
}
},
"negative_boost" : 0.2
}
"boosting" : {
"positive" : {
"term" : {
"field1" : "value1"
}
},
"negative" : {
"term" : {
"field2" : "value2"
}
},
"negative_boost" : 0.2
}
}
--------------------------------------------------
// CONSOLE

View File

@ -70,19 +70,15 @@ In this example, words that have a document frequency greater than 0.1%
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"common": {
"body": {
"query": "this is bonsai cool",
"cutoff_frequency": 0.001
}
}
"common": {
"body": {
"query": "this is bonsai cool",
"cutoff_frequency": 0.001
}
}
}
--------------------------------------------------
// CONSOLE
The number of terms which should match can be controlled with the
<<query-dsl-minimum-should-match,`minimum_should_match`>>
@ -94,44 +90,36 @@ all terms required:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"common": {
"body": {
"query": "nelly the elephant as a cartoon",
"cutoff_frequency": 0.001,
"low_freq_operator": "and"
}
}
"common": {
"body": {
"query": "nelly the elephant as a cartoon",
"cutoff_frequency": 0.001,
"low_freq_operator": "and"
}
}
}
--------------------------------------------------
// CONSOLE
which is roughly equivalent to:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"bool": {
"must": [
{ "term": { "body": "nelly"}},
{ "term": { "body": "elephant"}},
{ "term": { "body": "cartoon"}}
],
"should": [
{ "term": { "body": "the"}},
{ "term": { "body": "as"}},
{ "term": { "body": "a"}}
]
}
}
"bool": {
"must": [
{ "term": { "body": "nelly"}},
{ "term": { "body": "elephant"}},
{ "term": { "body": "cartoon"}}
],
"should": [
{ "term": { "body": "the"}}
{ "term": { "body": "as"}}
{ "term": { "body": "a"}}
]
}
}
--------------------------------------------------
// CONSOLE
Alternatively use
<<query-dsl-minimum-should-match,`minimum_should_match`>>
@ -140,49 +128,41 @@ must be present, for instance:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"common": {
"body": {
"query": "nelly the elephant as a cartoon",
"cutoff_frequency": 0.001,
"minimum_should_match": 2
}
}
"common": {
"body": {
"query": "nelly the elephant as a cartoon",
"cutoff_frequency": 0.001,
"minimum_should_match": 2
}
}
}
--------------------------------------------------
// CONSOLE
which is roughly equivalent to:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"bool": {
"must": {
"bool": {
"should": [
{ "term": { "body": "nelly"}},
{ "term": { "body": "elephant"}},
{ "term": { "body": "cartoon"}}
],
"minimum_should_match": 2
}
},
"should": [
{ "term": { "body": "the"}},
{ "term": { "body": "as"}},
{ "term": { "body": "a"}}
]
}
}
"bool": {
"must": {
"bool": {
"should": [
{ "term": { "body": "nelly"}},
{ "term": { "body": "elephant"}},
{ "term": { "body": "cartoon"}}
],
"minimum_should_match": 2
}
},
"should": [
{ "term": { "body": "the"}}
{ "term": { "body": "as"}}
{ "term": { "body": "a"}}
]
}
}
--------------------------------------------------
// CONSOLE
minimum_should_match
@ -194,58 +174,50 @@ additional parameters (note the change in structure):
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"common": {
"body": {
"query": "nelly the elephant not as a cartoon",
"cutoff_frequency": 0.001,
"minimum_should_match": {
"low_freq" : 2,
"high_freq" : 3
}
}
}
"common": {
"body": {
"query": "nelly the elephant not as a cartoon",
"cutoff_frequency": 0.001,
"minimum_should_match": {
"low_freq" : 2,
"high_freq" : 3
}
}
}
}
--------------------------------------------------
// CONSOLE
which is roughly equivalent to:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"bool": {
"must": {
"bool": {
"should": [
{ "term": { "body": "nelly"}},
{ "term": { "body": "elephant"}},
{ "term": { "body": "cartoon"}}
],
"minimum_should_match": 2
}
},
"should": {
"bool": {
"should": [
{ "term": { "body": "the"}},
{ "term": { "body": "not"}},
{ "term": { "body": "as"}},
{ "term": { "body": "a"}}
],
"minimum_should_match": 3
}
}
}
"bool": {
"must": {
"bool": {
"should": [
{ "term": { "body": "nelly"}},
{ "term": { "body": "elephant"}},
{ "term": { "body": "cartoon"}}
],
"minimum_should_match": 2
}
},
"should": {
"bool": {
"should": [
{ "term": { "body": "the"}},
{ "term": { "body": "not"}},
{ "term": { "body": "as"}},
{ "term": { "body": "a"}}
],
"minimum_should_match": 3
}
}
}
}
--------------------------------------------------
// CONSOLE
In this case it means the high frequency terms have only an impact on
relevance when there are at least three of them. But the most
@ -255,44 +227,36 @@ for high frequency terms is when there are only high frequency terms:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"common": {
"body": {
"query": "how not to be",
"cutoff_frequency": 0.001,
"minimum_should_match": {
"low_freq" : 2,
"high_freq" : 3
}
}
}
"common": {
"body": {
"query": "how not to be",
"cutoff_frequency": 0.001,
"minimum_should_match": {
"low_freq" : 2,
"high_freq" : 3
}
}
}
}
--------------------------------------------------
// CONSOLE
which is roughly equivalent to:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"bool": {
"should": [
{ "term": { "body": "how"}},
{ "term": { "body": "not"}},
{ "term": { "body": "to"}},
{ "term": { "body": "be"}}
],
"minimum_should_match": "3<50%"
}
}
"bool": {
"should": [
{ "term": { "body": "how"}},
{ "term": { "body": "not"}},
{ "term": { "body": "to"}},
{ "term": { "body": "be"}}
],
"minimum_should_match": "3<50%"
}
}
--------------------------------------------------
// CONSOLE
The high frequency generated query is then slightly less restrictive
than with an `AND`.

View File

@ -7,16 +7,12 @@ filter. Maps to Lucene `ConstantScoreQuery`.
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"constant_score" : {
"filter" : {
"term" : { "user" : "kimchy"}
},
"boost" : 1.2
}
"constant_score" : {
"filter" : {
"term" : { "user" : "kimchy"}
},
"boost" : 1.2
}
}
--------------------------------------------------
// CONSOLE

View File

@ -27,22 +27,18 @@ This query maps to Lucene `DisjunctionMaxQuery`.
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"dis_max" : {
"tie_breaker" : 0.7,
"boost" : 1.2,
"queries" : [
{
"term" : { "age" : 34 }
},
{
"term" : { "age" : 35 }
}
]
}
"dis_max" : {
"tie_breaker" : 0.7,
"boost" : 1.2,
"queries" : [
{
"term" : { "age" : 34 }
},
{
"term" : { "age" : 35 }
}
]
}
}
--------------------------------------------------
// CONSOLE

View File

@ -5,14 +5,10 @@ Returns documents that have at least one non-`null` value in the original field:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"exists" : { "field" : "user" }
}
"exists" : { "field" : "user" }
}
--------------------------------------------------
// CONSOLE
For instance, these documents would all match the above query:
@ -81,20 +77,14 @@ clause as follows:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"bool": {
"must_not": {
"exists": {
"field": "user"
}
}
"bool": {
"must_not": {
"exists": {
"field": "user"
}
}
}
--------------------------------------------------
// CONSOLE
This query returns documents that have no value in the user field.

View File

@ -14,20 +14,13 @@ by the query.
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"function_score": {
"query": {},
"boost": "5",
"random_score": {}, <1>
"boost_mode":"multiply"
}
}
"function_score": {
"query": {},
"boost": "boost for the whole query",
"FUNCTION": {}, <1>
"boost_mode":"(multiply|replace|...)"
}
--------------------------------------------------
// CONSOLE
<1> See <<score-functions>> for a list of supported functions.
Furthermore, several functions can be combined. In this case one can
@ -36,35 +29,30 @@ given filtering query
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"function_score": {
"query": {},
"boost": "5", <1>
"functions": [
{
"filter": {},
"random_score": {}, <2>
"weight": 23
},
{
"filter": {},
"weight": 42
}
],
"max_boost": 42,
"score_mode": "max",
"boost_mode": "multiply",
"min_score" : 42
"function_score": {
"query": {},
"boost": "boost for the whole query",
"functions": [
{
"filter": {},
"FUNCTION": {}, <1>
"weight": number
},
{
"FUNCTION": {} <1>
},
{
"filter": {},
"weight": number
}
}
],
"max_boost": number,
"score_mode": "(multiply|max|...)",
"boost_mode": "(multiply|replace|...)",
"min_score" : number
}
--------------------------------------------------
// CONSOLE
<1> Boost for the whole query.
<2> See <<score-functions>> for a list of supported functions.
<1> See <<score-functions>> for a list of supported functions.
NOTE: The scores produced by the filtering query of each function do not matter.
@ -471,36 +459,36 @@ the request would look like this:
[source,js]
--------------------------------------------------
GET /_search
GET _search
{
"query": {
"function_score": {
"functions": [
{
"gauss": {
"price": {
"origin": "0",
"scale": "20"
}
}
},
{
"gauss": {
"location": {
"origin": "11, 12",
"scale": "2km"
}
}
"query": {
"function_score": {
"functions": [
{
"gauss": {
"price": {
"origin": "0",
"scale": "20"
}
],
"query": {
"match": {
"properties": "balcony"
}
},
{
"gauss": {
"location": {
"origin": "11, 12",
"scale": "2km"
}
},
"score_mode": "multiply"
}
}
],
"query": {
"match": {
"properties": "balcony"
}
},
"score_mode": "multiply"
}
}
}
--------------------------------------------------
// CONSOLE

View File

@ -16,35 +16,27 @@ Here is a simple example:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"fuzzy" : { "user" : "ki" }
}
"fuzzy" : { "user" : "ki" }
}
--------------------------------------------------
// CONSOLE
Or with more advanced settings:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"fuzzy" : {
"user" : {
"value" : "ki",
"boost" : 1.0,
"fuzziness" : 2,
"prefix_length" : 0,
"max_expansions": 100
}
"fuzzy" : {
"user" : {
"value" : "ki",
"boost" : 1.0,
"fuzziness" : 2,
"prefix_length" : 0,
"max_expansions": 100
}
}
}
--------------------------------------------------
// CONSOLE
[float]
===== Parameters
@ -70,4 +62,3 @@ WARNING: This query can be very heavy if `prefix_length` is set to `0` and if
`max_expansions` is set to a high number. It could result in every term in the
index being examined!

View File

@ -6,24 +6,6 @@ bounding box. Assuming the following indexed document:
[source,js]
--------------------------------------------------
PUT /my_locations
{
"mappings": {
"location": {
"properties": {
"pin": {
"properties": {
"location": {
"type": "geo_point"
}
}
}
}
}
}
}
PUT /my_locations/location/1
{
"pin" : {
"location" : {
@ -33,32 +15,27 @@ PUT /my_locations/location/1
}
}
--------------------------------------------------
// CONSOLE
// TESTSETUP
Then the following simple query can be executed with a
`geo_bounding_box` filter:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"bool" : {
"must" : {
"match_all" : {}
},
"filter" : {
"geo_bounding_box" : {
"pin.location" : {
"top_left" : {
"lat" : 40.73,
"lon" : -74.1
},
"bottom_right" : {
"lat" : 40.01,
"lon" : -71.12
}
"bool" : {
"must" : {
"match_all" : {}
},
"filter" : {
"geo_bounding_box" : {
"pin.location" : {
"top_left" : {
"lat" : 40.73,
"lon" : -74.1
},
"bottom_right" : {
"lat" : 40.01,
"lon" : -71.12
}
}
}
@ -66,7 +43,6 @@ GET /_search
}
}
--------------------------------------------------
// CONSOLE
[float]
==== Query Options
@ -99,24 +75,21 @@ representation of the geo point, the filter can accept it as well:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"bool" : {
"must" : {
"match_all" : {}
},
"filter" : {
"geo_bounding_box" : {
"pin.location" : {
"top_left" : {
"lat" : 40.73,
"lon" : -74.1
},
"bottom_right" : {
"lat" : 40.01,
"lon" : -71.12
}
"bool" : {
"must" : {
"match_all" : {}
},
"filter" : {
"geo_bounding_box" : {
"pin.location" : {
"top_left" : {
"lat" : 40.73,
"lon" : -74.1
},
"bottom_right" : {
"lat" : 40.01,
"lon" : -71.12
}
}
}
@ -124,7 +97,6 @@ GET /_search
}
}
--------------------------------------------------
// CONSOLE
[float]
===== Lat Lon As Array
@ -134,26 +106,22 @@ conform with http://geojson.org/[GeoJSON].
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"bool" : {
"must" : {
"match_all" : {}
},
"filter" : {
"geo_bounding_box" : {
"pin.location" : {
"top_left" : [-74.1, 40.73],
"bottom_right" : [-71.12, 40.01]
}
"bool" : {
"must" : {
"match_all" : {}
},
"filter" : {
"geo_bounding_box" : {
"pin.location" : {
"top_left" : [-74.1, 40.73],
"bottom_right" : [-71.12, 40.01]
}
}
}
}
}
--------------------------------------------------
// CONSOLE
[float]
===== Lat Lon As String
@ -162,52 +130,44 @@ Format in `lat,lon`.
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"bool" : {
"must" : {
"match_all" : {}
},
"filter" : {
"geo_bounding_box" : {
"pin.location" : {
"top_left" : "40.73, -74.1",
"bottom_right" : "40.01, -71.12"
}
}
}
}
}
}
--------------------------------------------------
// CONSOLE
[float]
===== Geohash
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"bool" : {
"must" : {
"match_all" : {}
},
"filter" : {
"geo_bounding_box" : {
"pin.location" : {
"top_left" : "dr5r9ydj2y73",
"bottom_right" : "drj7teegpus6"
}
"bool" : {
"must" : {
"match_all" : {}
},
"filter" : {
"geo_bounding_box" : {
"pin.location" : {
"top_left" : "40.73, -74.1",
"bottom_right" : "40.01, -71.12"
}
}
}
}
}
--------------------------------------------------
[float]
===== Geohash
[source,js]
--------------------------------------------------
{
"bool" : {
"must" : {
"match_all" : {}
},
"filter" : {
"geo_bounding_box" : {
"pin.location" : {
"top_left" : "dr5r9ydj2y73",
"bottom_right" : "drj7teegpus6"
}
}
}
}
}
--------------------------------------------------
// CONSOLE
[float]
==== Vertices
@ -221,28 +181,24 @@ values separately.
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"bool" : {
"must" : {
"match_all" : {}
},
"filter" : {
"geo_bounding_box" : {
"pin.location" : {
"top" : 40.73,
"left" : -74.1,
"bottom" : 40.01,
"right" : -71.12
}
"bool" : {
"must" : {
"match_all" : {}
},
"filter" : {
"geo_bounding_box" : {
"pin.location" : {
"top" : 40.73,
"left" : -74.1,
"bottom" : 40.01,
"right" : -71.12
}
}
}
}
}
--------------------------------------------------
// CONSOLE
[float]
@ -271,33 +227,29 @@ are not supported. Here is an example:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"bool" : {
"must" : {
"match_all" : {}
},
"filter" : {
"geo_bounding_box" : {
"pin.location" : {
"top_left" : {
"lat" : 40.73,
"lon" : -74.1
},
"bottom_right" : {
"lat" : 40.10,
"lon" : -71.12
}
"bool" : {
"must" : {
"match_all" : {}
},
"filter" : {
"geo_bounding_box" : {
"pin.location" : {
"top_left" : {
"lat" : 40.73,
"lon" : -74.1
},
"type" : "indexed"
}
"bottom_right" : {
"lat" : 40.10,
"lon" : -71.12
}
},
"type" : "indexed"
}
}
}
}
--------------------------------------------------
// CONSOLE
[float]
==== Ignore Unmapped

View File

@ -2,29 +2,10 @@
=== Geo Distance Query
Filters documents that include only hits that exists within a specific
distance from a geo point. Assuming the following mapping and indexed
document:
distance from a geo point. Assuming the following indexed json:
[source,js]
--------------------------------------------------
PUT /my_locations
{
"mappings": {
"location": {
"properties": {
"pin": {
"properties": {
"location": {
"type": "geo_point"
}
}
}
}
}
}
}
PUT /my_locations/location/1
{
"pin" : {
"location" : {
@ -34,36 +15,29 @@ PUT /my_locations/location/1
}
}
--------------------------------------------------
// CONSOLE
// TESTSETUP
Then the following simple query can be executed with a `geo_distance`
filter:
[source,js]
--------------------------------------------------
GET /my_locations/location/_search
{
"query": {
"bool" : {
"must" : {
"match_all" : {}
},
"filter" : {
"geo_distance" : {
"distance" : "200km",
"pin.location" : {
"lat" : 40,
"lon" : -70
}
"bool" : {
"must" : {
"match_all" : {}
},
"filter" : {
"geo_distance" : {
"distance" : "200km",
"pin.location" : {
"lat" : 40,
"lon" : -70
}
}
}
}
}
--------------------------------------------------
// CONSOLE
[float]
==== Accepted Formats
@ -76,27 +50,23 @@ representation of the geo point, the filter can accept it as well:
[source,js]
--------------------------------------------------
GET /my_locations/location/_search
{
"query": {
"bool" : {
"must" : {
"match_all" : {}
},
"filter" : {
"geo_distance" : {
"distance" : "12km",
"pin.location" : {
"lat" : 40,
"lon" : -70
}
"bool" : {
"must" : {
"match_all" : {}
},
"filter" : {
"geo_distance" : {
"distance" : "12km",
"pin.location" : {
"lat" : 40,
"lon" : -70
}
}
}
}
}
--------------------------------------------------
// CONSOLE
[float]
===== Lat Lon As Array
@ -106,25 +76,20 @@ conform with http://geojson.org/[GeoJSON].
[source,js]
--------------------------------------------------
GET /my_locations/location/_search
{
"query": {
"bool" : {
"must" : {
"match_all" : {}
},
"filter" : {
"geo_distance" : {
"distance" : "12km",
"pin.location" : [-70, 40]
}
"bool" : {
"must" : {
"match_all" : {}
},
"filter" : {
"geo_distance" : {
"distance" : "12km",
"pin.location" : [-70, 40]
}
}
}
}
--------------------------------------------------
// CONSOLE
[float]
===== Lat Lon As String
@ -133,48 +98,40 @@ Format in `lat,lon`.
[source,js]
--------------------------------------------------
GET /my_locations/location/_search
{
"query": {
"bool" : {
"must" : {
"match_all" : {}
},
"filter" : {
"geo_distance" : {
"distance" : "12km",
"pin.location" : "40,-70"
}
"bool" : {
"must" : {
"match_all" : {}
},
"filter" : {
"geo_distance" : {
"distance" : "12km",
"pin.location" : "40,-70"
}
}
}
}
--------------------------------------------------
// CONSOLE
[float]
===== Geohash
[source,js]
--------------------------------------------------
GET /my_locations/location/_search
{
"query": {
"bool" : {
"must" : {
"match_all" : {}
},
"filter" : {
"geo_distance" : {
"distance" : "12km",
"pin.location" : "drm3btev3e86"
}
"bool" : {
"must" : {
"match_all" : {}
},
"filter" : {
"geo_distance" : {
"distance" : "12km",
"pin.location" : "drm3btev3e86"
}
}
}
}
--------------------------------------------------
// CONSOLE
[float]
==== Options

View File

@ -5,28 +5,24 @@ Filters documents that exists within a range from a specific point:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"bool" : {
"must" : {
"match_all" : {}
},
"filter" : {
"geo_distance_range" : {
"from" : "200km",
"to" : "400km",
"pin.location" : {
"lat" : 40,
"lon" : -70
}
"bool" : {
"must" : {
"match_all" : {}
},
"filter" : {
"geo_distance_range" : {
"from" : "200km",
"to" : "400km",
"pin.location" : {
"lat" : 40,
"lon" : -70
}
}
}
}
}
--------------------------------------------------
// CONSOLE
Supports the same point location parameter and query options as the
<<query-dsl-geo-distance-query,geo_distance>>

View File

@ -6,29 +6,25 @@ points. Here is an example:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"bool" : {
"must" : {
"match_all" : {}
},
"filter" : {
"geo_polygon" : {
"person.location" : {
"points" : [
"bool" : {
"query" : {
"match_all" : {}
},
"filter" : {
"geo_polygon" : {
"person.location" : {
"points" : [
{"lat" : 40, "lon" : -70},
{"lat" : 30, "lon" : -80},
{"lat" : 20, "lon" : -90}
]
}
]
}
}
}
}
}
--------------------------------------------------
// CONSOLE
[float]
==== Query Options
@ -57,29 +53,25 @@ conform with http://geojson.org/[GeoJSON].
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"bool" : {
"must" : {
"match_all" : {}
},
"filter" : {
"geo_polygon" : {
"person.location" : {
"points" : [
[-70, 40],
"bool" : {
"must" : {
"match_all" : {}
},
"filter" : {
"geo_polygon" : {
"person.location" : {
"points" : [
[-70, 40],
[-80, 30],
[-90, 20]
]
}
[-90, 20]
]
}
}
}
}
}
--------------------------------------------------
// CONSOLE
[float]
===== Lat Lon as String
@ -88,58 +80,50 @@ Format in `lat,lon`.
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"bool" : {
"must" : {
"match_all" : {}
},
"filter" : {
"geo_polygon" : {
"person.location" : {
"points" : [
"40, -70",
"30, -80",
"20, -90"
]
}
"bool" : {
"must" : {
"match_all" : {}
},
"filter" : {
"geo_polygon" : {
"person.location" : {
"points" : [
"40, -70",
"30, -80",
"20, -90"
]
}
}
}
}
}
--------------------------------------------------
// CONSOLE
[float]
===== Geohash
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"bool" : {
"must" : {
"match_all" : {}
},
"filter" : {
"geo_polygon" : {
"person.location" : {
"points" : [
"drn5x1g8cu2y",
"30, -80",
"20, -90"
]
}
"bool" : {
"must" : {
"match_all" : {}
},
"filter" : {
"geo_polygon" : {
"person.location" : {
"points" : [
"drn5x1g8cu2y",
"30, -80",
"20, -90"
]
}
}
}
}
}
--------------------------------------------------
// CONSOLE
[float]
==== geo_point Type

View File

@ -26,10 +26,10 @@ Given a document that looks like this:
--------------------------------------------------
{
"name": "Wind & Wetter, Berlin, Germany",
"location": {
"type": "Point",
"coordinates": [13.400544, 52.530286]
}
"location": {
"type": "Point",
"coordinates": [13.400544, 52.530286]
}
}
--------------------------------------------------
@ -38,7 +38,6 @@ The following query will find the point using the Elasticsearch's
[source,js]
--------------------------------------------------
GET /_search
{
"query":{
"bool": {
@ -60,7 +59,6 @@ GET /_search
}
}
--------------------------------------------------
// CONSOLE
==== Pre-Indexed Shape
@ -83,30 +81,26 @@ shape:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"bool": {
"must": {
"match_all": {}
},
"filter": {
"geo_shape": {
"location": {
"indexed_shape": {
"id": "DEU",
"type": "countries",
"index": "shapes",
"path": "location"
}
}
"bool": {
"must": {
"match_all": {}
},
"filter": {
"geo_shape": {
"location": {
"indexed_shape": {
"id": "DEU",
"type": "countries",
"index": "shapes",
"path": "location"
}
}
}
}
}
}
--------------------------------------------------
// CONSOLE
==== Spatial Relations

View File

@ -13,7 +13,6 @@ setting the `geohash_prefix` option:
[source,js]
--------------------------------------------------
PUT /my_index
{
"mappings" : {
"location": {
@ -29,8 +28,6 @@ PUT /my_index
}
}
--------------------------------------------------
// CONSOLE
// TESTSETUP
The geohash cell can defined by all formats of `geo_points`. If such a cell is
defined by a latitude and longitude pair the size of the cell needs to be
@ -45,28 +42,24 @@ next to the given cell.
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"bool" : {
"must" : {
"match_all" : {}
},
"filter" : {
"geohash_cell": {
"pin": {
"lat": 13.4080,
"lon": 52.5186
},
"precision": 3,
"neighbors": true
}
"bool" : {
"must" : {
"match_all" : {}
},
"filter" : {
"geohash_cell": {
"pin": {
"lat": 13.4080,
"lon": 52.5186
},
"precision": 3,
"neighbors": true
}
}
}
}
--------------------------------------------------
// CONSOLE
[float]
==== Ignore Unmapped

View File

@ -7,21 +7,17 @@ an example:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"has_child" : {
"type" : "blog_tag",
"query" : {
"term" : {
"tag" : "something"
}
}
"has_child" : {
"type" : "blog_tag",
"query" : {
"term" : {
"tag" : "something"
}
}
}
}
--------------------------------------------------
// CONSOLE
[float]
==== Scoring capabilities
@ -36,22 +32,18 @@ inside the `has_child` query:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"has_child" : {
"type" : "blog_tag",
"score_mode" : "min",
"query" : {
"term" : {
"tag" : "something"
}
}
"has_child" : {
"type" : "blog_tag",
"score_mode" : "min",
"query" : {
"term" : {
"tag" : "something"
}
}
}
}
--------------------------------------------------
// CONSOLE
[float]
==== Min/Max Children
@ -62,24 +54,20 @@ a match:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"has_child" : {
"type" : "blog_tag",
"score_mode" : "min",
"min_children": 2, <1>
"max_children": 10, <1>
"query" : {
"term" : {
"tag" : "something"
}
"has_child" : {
"type" : "blog_tag",
"score_mode" : "min",
"min_children": 2, <1>
"max_children": 10, <1>
"query" : {
"term" : {
"tag" : "something"
}
}
}
}
--------------------------------------------------
// CONSOLE
<1> Both `min_children` and `max_children` are optional.
The `min_children` and `max_children` parameters can be combined with

View File

@ -9,21 +9,17 @@ in the same manner as the `has_child` query.
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"has_parent" : {
"parent_type" : "blog",
"query" : {
"term" : {
"tag" : "something"
}
}
"has_parent" : {
"parent_type" : "blog",
"query" : {
"term" : {
"tag" : "something"
}
}
}
}
--------------------------------------------------
// CONSOLE
[float]
==== Scoring capabilities
@ -38,22 +34,18 @@ matching parent document. The score mode can be specified with the
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"has_parent" : {
"parent_type" : "blog",
"score" : true,
"query" : {
"term" : {
"tag" : "something"
}
}
"has_parent" : {
"parent_type" : "blog",
"score" : true,
"query" : {
"term" : {
"tag" : "something"
}
}
}
}
--------------------------------------------------
// CONSOLE
[float]
==== Ignore Unmapped

View File

@ -6,17 +6,13 @@ uses the <<mapping-uid-field,_uid>> field.
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"ids" : {
"type" : "my_type",
"values" : ["1", "4", "100"]
}
"ids" : {
"type" : "my_type",
"values" : ["1", "4", "100"]
}
}
--------------------------------------------------
// CONSOLE
The `type` is optional and can be omitted, and can also accept an array
of values. If no type is specified, all types defined in the index mapping are tried.

View File

@ -9,18 +9,18 @@ on the list, the alternative `no_match_query` is executed.
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"indices" : {
"indices" : ["index1", "index2"],
"query" : { "term" : { "tag" : "wow" } },
"no_match_query" : { "term" : { "tag" : "kow" } }
"indices" : {
"indices" : ["index1", "index2"],
"query" : {
"term" : { "tag" : "wow" }
},
"no_match_query" : {
"term" : { "tag" : "kow" }
}
}
}
--------------------------------------------------
// CONSOLE
You can use the `index` field to provide a single index.

View File

@ -6,27 +6,15 @@ of `1.0`.
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"match_all": {}
}
}
{ "match_all": {} }
--------------------------------------------------
// CONSOLE
The `_score` can be changed with the `boost` parameter:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"match_all": { "boost" : 1.2 }
}
}
{ "match_all": { "boost" : 1.2 }}
--------------------------------------------------
// CONSOLE
[[query-dsl-match-none-query]]
[float]
@ -36,11 +24,5 @@ This is the inverse of the `match_all` query, which matches no documents.
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"match_none": {}
}
}
{ "match_none": {} }
--------------------------------------------------
// CONSOLE

View File

@ -6,16 +6,12 @@ allows for prefix matches on the last term in the text. For example:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"match_phrase_prefix" : {
"message" : "quick brown f"
}
"match_phrase_prefix" : {
"message" : "quick brown f"
}
}
--------------------------------------------------
// CONSOLE
It accepts the same parameters as the phrase type. In addition, it also
accepts a `max_expansions` parameter (default `50`) that can control to how
@ -25,19 +21,15 @@ example:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"match_phrase_prefix" : {
"message" : {
"query" : "quick brown f",
"max_expansions" : 10
}
"match_phrase_prefix" : {
"message" : {
"query" : "quick brown f",
"max_expansions" : 10
}
}
}
--------------------------------------------------
// CONSOLE
[IMPORTANT]
===================================================
@ -61,4 +53,4 @@ For better solutions for _search-as-you-type_ see the
<<search-suggesters-completion,completion suggester>> and
{guide}/_index_time_search_as_you_type.html[Index-Time Search-as-You-Type].
===================================================
===================================================

View File

@ -6,16 +6,12 @@ out of the analyzed text. For example:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"match_phrase" : {
"message" : "this is a test"
}
"match_phrase" : {
"message" : "this is a test"
}
}
--------------------------------------------------
// CONSOLE
A phrase query matches terms up to a configurable `slop`
(which defaults to 0) in any order. Transposed terms have a slop of 2.
@ -26,16 +22,12 @@ definition, or the default search analyzer, for example:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"match_phrase" : {
"message" : {
"query" : "this is a test",
"analyzer" : "my_analyzer"
}
"match_phrase" : {
"message" : {
"query" : "this is a test",
"analyzer" : "my_analyzer"
}
}
}
--------------------------------------------------
// CONSOLE

View File

@ -7,16 +7,12 @@ them, and constructs a query. For example:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"match" : {
"message" : "this is a test"
}
"match" : {
"message" : "this is a test"
}
}
--------------------------------------------------
// CONSOLE
Note, `message` is the name of a field, you can substitute the name of
any field (including `_all`) instead.
@ -61,19 +57,15 @@ change in structure, `message` is the field name):
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"match" : {
"message" : {
"query" : "this is a test",
"operator" : "and"
}
"match" : {
"message" : {
"query" : "this is a test",
"operator" : "and"
}
}
}
--------------------------------------------------
// CONSOLE
[[query-dsl-match-query-zero]]
===== Zero terms query
@ -84,20 +76,16 @@ change that the `zero_terms_query` option can be used, which accepts
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"match" : {
"message" : {
"query" : "to be or not to be",
"operator" : "and",
"zero_terms_query": "all"
}
"match" : {
"message" : {
"query" : "to be or not to be",
"operator" : "and",
"zero_terms_query": "all"
}
}
}
--------------------------------------------------
// CONSOLE
[[query-dsl-match-query-cutoff]]
===== Cutoff frequency
@ -125,19 +113,16 @@ Here is an example showing a query composed of stopwords exclusively:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"match" : {
"message" : {
"query" : "to be or not to be",
"cutoff_frequency" : 0.001
}
"match" : {
"message" : {
"query" : "to be or not to be",
"cutoff_frequency" : 0.001
}
}
}
--------------------------------------------------
// CONSOLE
IMPORTANT: The `cutoff_frequency` option operates on a per-shard-level. This means
that when trying it out on test indexes with low document numbers you

View File

@ -15,19 +15,15 @@ fields, limiting the number of selected terms to 12.
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"more_like_this" : {
"fields" : ["title", "description"],
"like" : "Once upon a time",
"min_term_freq" : 1,
"max_query_terms" : 12
}
"more_like_this" : {
"fields" : ["title", "description"],
"like" : "Once upon a time",
"min_term_freq" : 1,
"max_query_terms" : 12
}
}
--------------------------------------------------
// CONSOLE
A more complicated use case consists of mixing texts with documents already
existing in the index. In this case, the syntax to specify a document is
@ -35,31 +31,27 @@ similar to the one used in the <<docs-multi-get,Multi GET API>>.
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"more_like_this" : {
"fields" : ["title", "description"],
"like" : [
{
"_index" : "imdb",
"_type" : "movies",
"_id" : "1"
},
{
"_index" : "imdb",
"_type" : "movies",
"_id" : "2"
},
"and potentially some more text here as well"
],
"min_term_freq" : 1,
"max_query_terms" : 12
}
"more_like_this" : {
"fields" : ["title", "description"],
"like" : [
{
"_index" : "imdb",
"_type" : "movies",
"_id" : "1"
},
{
"_index" : "imdb",
"_type" : "movies",
"_id" : "2"
},
"and potentially some more text here as well"
],
"min_term_freq" : 1,
"max_query_terms" : 12
}
}
--------------------------------------------------
// CONSOLE
Finally, users can mix some texts, a chosen set of documents but also provide
documents not necessarily present in the index. To provide documents not
@ -67,36 +59,32 @@ present in the index, the syntax is similar to <<docs-termvectors-artificial-doc
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"more_like_this" : {
"fields" : ["name.first", "name.last"],
"like" : [
{
"_index" : "marvel",
"_type" : "quotes",
"doc" : {
"name": {
"first": "Ben",
"last": "Grimm"
},
"tweet": "You got no idea what I'd... what I'd give to be invisible."
}
},
{
"_index" : "marvel",
"_type" : "quotes",
"_id" : "2"
}
],
"min_term_freq" : 1,
"max_query_terms" : 12
"more_like_this" : {
"fields" : ["name.first", "name.last"],
"like" : [
{
"_index" : "marvel",
"_type" : "quotes",
"doc" : {
"name": {
"first": "Ben",
"last": "Grimm"
},
"tweet": "You got no idea what I'd... what I'd give to be invisible."
}
},
{
"_index" : "marvel",
"_type" : "quotes",
"_id" : "2"
}
],
"min_term_freq" : 1,
"max_query_terms" : 12
}
}
--------------------------------------------------
// CONSOLE
==== How it Works
@ -123,34 +111,32 @@ default, but there will be no speed up on analysis for these fields.
[source,js]
--------------------------------------------------
PUT /imdb
{
"mappings": {
"movies": {
"properties": {
"title": {
"type": "text",
"term_vector": "yes"
},
"description": {
"type": "text"
},
"tags": {
"type": "text",
"fields" : {
"raw": {
"type" : "text",
"analyzer": "keyword",
"term_vector" : "yes"
}
}
}
curl -s -XPUT 'http://localhost:9200/imdb/' -d '{
"mappings": {
"movies": {
"properties": {
"title": {
"type": "text",
"term_vector": "yes"
},
"description": {
"type": "text"
},
"tags": {
"type": "text",
"fields" : {
"raw": {
"type" : "text",
"analyzer": "keyword",
"term_vector" : "yes"
}
}
}
}
}
}
}
--------------------------------------------------
// CONSOLE
==== Parameters

View File

@ -6,17 +6,13 @@ to allow multi-field queries:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"multi_match" : {
"query": "this is a test", <1>
"fields": [ "subject", "message" ] <2>
}
"multi_match" : {
"query": "this is a test", <1>
"fields": [ "subject", "message" ] <2>
}
}
--------------------------------------------------
// CONSOLE
<1> The query string.
<2> The fields to be queried.
@ -27,35 +23,26 @@ Fields can be specified with wildcards, eg:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"multi_match" : {
"query": "Will Smith",
"fields": [ "title", "*_name" ] <1>
}
"multi_match" : {
"query": "Will Smith",
"fields": [ "title", "*_name" ] <1>
}
}
--------------------------------------------------
// CONSOLE
<1> Query the `title`, `first_name` and `last_name` fields.
Individual fields can be boosted with the caret (`^`) notation:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"multi_match" : {
"query" : "this is a test",
"fields" : [ "subject^3", "message" ] <1>
}
"multi_match" : {
"query" : "this is a test",
"fields" : [ "subject^3", "message" ] <1>
}
}
--------------------------------------------------
// CONSOLE
<1> The `subject` field is three times as important as the `message` field.
[[multi-match-types]]
@ -95,38 +82,30 @@ find the single best matching field. For instance, this query:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"multi_match" : {
"query": "brown fox",
"type": "best_fields",
"fields": [ "subject", "message" ],
"tie_breaker": 0.3
}
"multi_match" : {
"query": "brown fox",
"type": "best_fields",
"fields": [ "subject", "message" ],
"tie_breaker": 0.3
}
}
--------------------------------------------------
// CONSOLE
would be executed as:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"dis_max": {
"queries": [
{ "match": { "subject": "brown fox" }},
{ "match": { "message": "brown fox" }}
],
"tie_breaker": 0.3
}
"dis_max": {
"queries": [
{ "match": { "subject": "brown fox" }},
{ "match": { "message": "brown fox" }}
],
"tie_breaker": 0.3
}
}
--------------------------------------------------
// CONSOLE
Normally the `best_fields` type uses the score of the *single* best matching
field, but if `tie_breaker` is specified, then it calculates the score as
@ -153,20 +132,15 @@ Take this query for example:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"multi_match" : {
"query": "Will Smith",
"type": "best_fields",
"fields": [ "first_name", "last_name" ],
"operator": "and" <1>
}
"multi_match" : {
"query": "Will Smith",
"type": "best_fields",
"fields": [ "first_name", "last_name" ],
"operator": "and" <1>
}
}
--------------------------------------------------
// CONSOLE
<1> All terms must be present.
This query is executed as:
@ -196,37 +170,29 @@ This query:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"multi_match" : {
"query": "quick brown fox",
"type": "most_fields",
"fields": [ "title", "title.original", "title.shingles" ]
}
"multi_match" : {
"query": "quick brown fox",
"type": "most_fields",
"fields": [ "title", "title.original", "title.shingles" ]
}
}
--------------------------------------------------
// CONSOLE
would be executed as:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"bool": {
"should": [
{ "match": { "title": "quick brown fox" }},
{ "match": { "title.original": "quick brown fox" }},
{ "match": { "title.shingles": "quick brown fox" }}
]
}
"bool": {
"should": [
{ "match": { "title": "quick brown fox" }},
{ "match": { "title.original": "quick brown fox" }},
{ "match": { "title.shingles": "quick brown fox" }}
]
}
}
--------------------------------------------------
// CONSOLE
The score from each `match` clause is added together, then divided by the
number of `match` clauses.
@ -246,36 +212,28 @@ but they use a `match_phrase` or `match_phrase_prefix` query instead of a
This query:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"multi_match" : {
"query": "quick brown f",
"type": "phrase_prefix",
"fields": [ "subject", "message" ]
}
"multi_match" : {
"query": "quick brown f",
"type": "phrase_prefix",
"fields": [ "subject", "message" ]
}
}
--------------------------------------------------
// CONSOLE
would be executed as:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"dis_max": {
"queries": [
{ "match_phrase_prefix": { "subject": "quick brown f" }},
{ "match_phrase_prefix": { "message": "quick brown f" }}
]
}
"dis_max": {
"queries": [
{ "match_phrase_prefix": { "subject": "quick brown f" }},
{ "match_phrase_prefix": { "message": "quick brown f" }}
]
}
}
--------------------------------------------------
// CONSOLE
Also, accepts `analyzer`, `boost`, `slop` and `zero_terms_query` as explained
in <<query-dsl-match-query>>. Type `phrase_prefix` additionally accepts
@ -330,19 +288,15 @@ A query like:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"multi_match" : {
"query": "Will Smith",
"type": "cross_fields",
"fields": [ "first_name", "last_name" ],
"operator": "and"
}
"multi_match" : {
"query": "Will Smith",
"type": "cross_fields",
"fields": [ "first_name", "last_name" ],
"operator": "and"
}
}
--------------------------------------------------
// CONSOLE
is executed as:
@ -390,21 +344,17 @@ both use an `edge_ngram` analyzer, this query:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"multi_match" : {
"query": "Jon",
"type": "cross_fields",
"fields": [
"multi_match" : {
"query": "Jon",
"type": "cross_fields",
"fields": [
"first", "first.edge",
"last", "last.edge"
]
}
]
}
}
--------------------------------------------------
// CONSOLE
would be executed as:
@ -429,33 +379,28 @@ parameter to just one of them:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"bool": {
"should": [
{
"multi_match" : {
"query": "Will Smith",
"type": "cross_fields",
"fields": [ "first", "last" ],
"minimum_should_match": "50%" <1>
}
},
{
"multi_match" : {
"query": "Will Smith",
"type": "cross_fields",
"fields": [ "*.edge" ]
}
}
]
"should": [
{
"multi_match" : {
"query": "Will Smith",
"type": "cross_fields",
"fields": [ "first", "last" ],
"minimum_should_match": "50%" <1>
}
},
{
"multi_match" : {
"query": "Will Smith",
"type": "cross_fields",
"fields": [ "*.edge" ]
}
}
]
}
}
}
--------------------------------------------------
// CONSOLE
<1> Either `will` or `smith` must be present in either of the `first`
or `last` fields
@ -464,20 +409,15 @@ parameter in the query.
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"multi_match" : {
"query": "Jon",
"type": "cross_fields",
"analyzer": "standard", <1>
"fields": [ "first", "last", "*.edge" ]
}
"multi_match" : {
"query": "Jon",
"type": "cross_fields",
"analyzer": "standard", <1>
"fields": [ "first", "last", "*.edge" ]
}
}
--------------------------------------------------
// CONSOLE
<1> Use the `standard` analyzer for all fields.
which will be executed as:

View File

@ -10,45 +10,40 @@ will work with:
[source,js]
--------------------------------------------------
PUT /my_index
{
"mappings": {
"type1" : {
"properties" : {
"obj1" : {
"type" : "nested"
}
"type1" : {
"properties" : {
"obj1" : {
"type" : "nested"
}
}
}
}
--------------------------------------------------
// CONSOLE
// TESTSETUP
And here is a sample nested query usage:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"nested" : {
"path" : "obj1",
"score_mode" : "avg",
"query" : {
"bool" : {
"must" : [
{ "match" : {"obj1.name" : "blue"} },
{ "range" : {"obj1.count" : {"gt" : 5}} }
]
}
"nested" : {
"path" : "obj1",
"score_mode" : "avg",
"query" : {
"bool" : {
"must" : [
{
"match" : {"obj1.name" : "blue"}
},
{
"range" : {"obj1.count" : {"gt" : 5}}
}
]
}
}
}
}
--------------------------------------------------
// CONSOLE
The query `path` points to the nested object path, and the `query`
includes the query that will run on the nested docs matching the

View File

@ -3,11 +3,11 @@
added[5.0.0]
The `parent_id` query can be used to find child documents which belong to a
particular parent. Given the following mapping definition:
The `parent_id` query can be used to find child documents which belong to a particular parent.
Given the following mapping definition:
[source,js]
--------------------------------------------------
--------------------------------------------
PUT /my_index
{
"mappings": {
@ -28,7 +28,7 @@ PUT /my_index
}
}
}
--------------------------------------------------
------------------------------------------
// CONSOLE
// TESTSETUP
@ -52,21 +52,17 @@ better as it does not need to do a join:
[source,js]
--------------------------------------------------
GET /my_index/_search
{
"has_parent": {
"type": "blog",
"query": {
"has_parent": {
"type": "blog_post",
"query": {
"term": {
"_id": "1"
}
}
}
"term": {
"_id": "1"
}
}
}
}
--------------------------------------------------
// CONSOLE
==== Parameters

View File

@ -13,27 +13,26 @@ Create an index with two mappings:
[source,js]
--------------------------------------------------
PUT /my-index
curl -XPUT "http://localhost:9200/my-index" -d'
{
"mappings": {
"doctype": {
"properties": {
"message": {
"type": "string"
}
}
},
"queries": {
"properties": {
"query": {
"type": "percolator"
}
}
"mappings": {
"doctype": {
"properties": {
"message": {
"type": "string"
}
}
},
"queries": {
"properties": {
"query": {
"type": "percolator"
}
}
}
}
}
}'
--------------------------------------------------
// CONSOLE
The `doctype` mapping is the mapping used to preprocess
the document defined in the `percolator` query before it
@ -51,24 +50,20 @@ Register a query in the percolator:
[source,js]
--------------------------------------------------
PUT /my-index/queries/1
{
curl -XPUT 'localhost:9200/my-index/queries/1' -d '{
"query" : {
"match" : {
"message" : "bonsai tree"
}
}
}
}'
--------------------------------------------------
// CONSOLE
// TEST[continued]
Match a document to the registered percolator queries:
[source,js]
--------------------------------------------------
GET /my-index/_search
{
curl -XGET 'localhost:9200/my-index/_search' -d '{
"query" : {
"percolate" : {
"field" : "query",
@ -78,10 +73,8 @@ GET /my-index/_search
}
}
}
}
}'
--------------------------------------------------
// CONSOLE
// TEST[continued]
The above request will yield the following response:
@ -158,13 +151,12 @@ Index the document we want to percolate:
[source,js]
--------------------------------------------------
PUT /my-index/message/1
curl -XPUT "http://localhost:9200/my-index/message/1" -d'
{
"message" : "A new bonsai tree in the office"
}
}'
--------------------------------------------------
// CONSOLE
// TEST[continued]
Index response:
[source,js]
@ -187,7 +179,7 @@ Percolating an existing document, using the index response as basis to build to
[source,js]
--------------------------------------------------
GET /my-index/_search
curl -XGET "http://localhost:9200/my-index/_search" -d'
{
"query" : {
"percolate" : {
@ -199,10 +191,8 @@ GET /my-index/_search
"version" : 1 <1>
}
}
}
}'
--------------------------------------------------
// CONSOLE
// TEST[continued]
<1> The version is optional, but useful in certain cases. We can then ensure that we are try to percolate
the document we just have indexed. A change may be made after we have indexed, and if that is the
@ -226,39 +216,35 @@ Save a query:
[source,js]
--------------------------------------------------
PUT /my-index/queries/1
curl -XPUT "http://localhost:9200/my-index/queries/1" -d'
{
"query" : {
"match" : {
"message" : "brown fox"
}
}
}
}'
--------------------------------------------------
// CONSOLE
// TEST[continued]
Save another query:
[source,js]
--------------------------------------------------
PUT /my-index/queries/2
curl -XPUT "http://localhost:9200/my-index/queries/2" -d'
{
"query" : {
"match" : {
"message" : "lazy dog"
}
}
}
}'
--------------------------------------------------
// CONSOLE
// TEST[continued]
Execute a search request with the `percolate` query and highlighting enabled:
[source,js]
--------------------------------------------------
GET /my-index/_search
curl -XGET "http://localhost:9200/my-index/_search" -d'
{
"query" : {
"percolate" : {
@ -274,10 +260,8 @@ GET /my-index/_search
"message": {}
}
}
}
}'
--------------------------------------------------
// CONSOLE
// TEST[continued]
This will yield the following response.
@ -352,4 +336,4 @@ that are registered to the index that the search request is targeted for, are go
in-memory index. This happens on each shard the search request needs to execute.
By using `routing` or additional queries the amount of percolator queries that need to be executed can be reduced and thus
the time the search API needs to run can be decreased.
the time the search API needs to run can be decreased.

View File

@ -8,37 +8,28 @@ that starts with `ki`:
[source,js]
--------------------------------------------------
GET /_search
{ "query": {
{
"prefix" : { "user" : "ki" }
}
}
--------------------------------------------------
// CONSOLE
A boost can also be associated with the query:
[source,js]
--------------------------------------------------
GET /_search
{ "query": {
{
"prefix" : { "user" : { "value" : "ki", "boost" : 2.0 } }
}
}
--------------------------------------------------
// CONSOLE
Or :
[source,js]
--------------------------------------------------
GET /_search
{ "query": {
{
"prefix" : { "user" : { "prefix" : "ki", "boost" : 2.0 } }
}
}
--------------------------------------------------
// CONSOLE
This multi term query allows you to control how it gets rewritten using the
<<query-dsl-multi-term-rewrite,rewrite>>

View File

@ -6,17 +6,13 @@ an example:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"query_string" : {
"default_field" : "content",
"query" : "this AND that OR thus"
}
"query_string" : {
"default_field" : "content",
"query" : "this AND that OR thus"
}
}
--------------------------------------------------
// CONSOLE
The `query_string` top level parameters include:
@ -117,33 +113,25 @@ For example, the following query
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"query_string" : {
"fields" : ["content", "name"],
"query" : "this AND that"
}
"query_string" : {
"fields" : ["content", "name"],
"query" : "this AND that"
}
}
--------------------------------------------------
// CONSOLE
matches the same words as
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"query_string": {
"query": "(content:this OR name:this) AND (content:that OR name:that)"
}
"query_string": {
"query": "(content:this OR name:this) AND (content:that OR name:that)"
}
}
--------------------------------------------------
// CONSOLE
Since several queries are generated from the individual search terms,
combining them can be automatically done using either a `dis_max` query or a
@ -152,18 +140,14 @@ notation):
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"query_string" : {
"fields" : ["content", "name^5"],
"query" : "this AND that OR thus",
"use_dis_max" : true
}
"query_string" : {
"fields" : ["content", "name^5"],
"query" : "this AND that OR thus",
"use_dis_max" : true
}
}
--------------------------------------------------
// CONSOLE
Simple wildcard can also be used to search "within" specific inner
elements of the document. For example, if we have a `city` object with
@ -172,18 +156,14 @@ search on all "city" fields:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"query_string" : {
"fields" : ["city.*"],
"query" : "this AND that OR thus",
"use_dis_max" : true
}
"query_string" : {
"fields" : ["city.*"],
"query" : "this AND that OR thus",
"use_dis_max" : true
}
}
--------------------------------------------------
// CONSOLE
Another option is to provide the wildcard fields search in the query
string itself (properly escaping the `*` sign), for example:
@ -208,17 +188,13 @@ introduced fields included). For example:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"query_string" : {
"fields" : ["content", "name.*^5"],
"query" : "this AND that OR thus",
"use_dis_max" : true
}
"query_string" : {
"fields" : ["content", "name.*^5"],
"query" : "this AND that OR thus",
"use_dis_max" : true
}
}
--------------------------------------------------
// CONSOLE
include::query-string-syntax.asciidoc[]

View File

@ -47,7 +47,7 @@ conditions are met:
[source,js]
------------------------------------
GET /_search
GET _search
{
"query": { <1>
"bool": { <2>
@ -63,7 +63,6 @@ GET /_search
}
}
------------------------------------
// CONSOLE
<1> The `query` parameter indicates query context.
<2> The `bool` and two `match` clauses are used in query context,
which means that they are used to score how well each document

View File

@ -9,20 +9,16 @@ a `NumericRangeQuery`. The following example returns all documents where
[source,js]
--------------------------------------------------
GET _search
{
"query": {
"range" : {
"age" : {
"gte" : 10,
"lte" : 20,
"boost" : 2.0
}
"range" : {
"age" : {
"gte" : 10,
"lte" : 20,
"boost" : 2.0
}
}
}
--------------------------------------------------
// CONSOLE
The `range` query accepts the following parameters:
@ -42,19 +38,15 @@ specified using <<date-math>>:
[source,js]
--------------------------------------------------
GET _search
{
"query": {
"range" : {
"date" : {
"gte" : "now-1d/d",
"lt" : "now/d"
}
"range" : {
"date" : {
"gte" : "now-1d/d",
"lt" : "now/d"
}
}
}
--------------------------------------------------
// CONSOLE
===== Date math and rounding
@ -94,20 +86,16 @@ passing the `format` parameter to the `range` query:
[source,js]
--------------------------------------------------
GET _search
{
"query": {
"range" : {
"born" : {
"gte": "01/01/2012",
"lte": "2013",
"format": "dd/MM/yyyy||yyyy"
}
"range" : {
"born" : {
"gte": "01/01/2012",
"lte": "2013",
"format": "dd/MM/yyyy||yyyy"
}
}
}
--------------------------------------------------
// CONSOLE
===== Time zone in range queries
@ -117,19 +105,15 @@ accepts it), or it can be specified as the `time_zone` parameter:
[source,js]
--------------------------------------------------
GET _search
{
"query": {
"range" : {
"timestamp" : {
"gte": "2015-01-01 00:00:00", <1>
"lte": "now", <2>
"time_zone": "+01:00"
}
"range" : {
"timestamp" : {
"gte": "2015-01-01 00:00:00", <1>
"lte": "now", <2>
"time_zone": "+01:00"
}
}
}
--------------------------------------------------
// CONSOLE
<1> This date will be converted to `2014-12-31T23:00:00 UTC`.
<2> `now` is not affected by the `time_zone` parameter (dates must be stored as UTC).

View File

@ -15,52 +15,40 @@ matchers like `.*?+` will mostly lower performance.
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"regexp":{
"name.first": "s.*y"
}
"regexp":{
"name.first": "s.*y"
}
}
--------------------------------------------------
// CONSOLE
Boosting is also supported
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"regexp":{
"name.first":{
"value":"s.*y",
"boost":1.2
}
"regexp":{
"name.first":{
"value":"s.*y",
"boost":1.2
}
}
}
--------------------------------------------------
// CONSOLE
You can also use special flags
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"regexp":{
"name.first": {
"value": "s.*y",
"flags" : "INTERSECTION|COMPLEMENT|EMPTY"
}
"regexp":{
"name.first": {
"value": "s.*y",
"flags" : "INTERSECTION|COMPLEMENT|EMPTY"
}
}
}
--------------------------------------------------
// CONSOLE
Possible flags are `ALL` (default), `ANYSTRING`, `COMPLEMENT`,
`EMPTY`, `INTERSECTION`, `INTERVAL`, or `NONE`. Please check the
@ -76,19 +64,16 @@ this limit to allow more complex regular expressions to execute.
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"regexp":{
"name.first": {
"value": "s.*y",
"flags" : "INTERSECTION|COMPLEMENT|EMPTY",
"max_determinized_states": 20000
}
"regexp":{
"name.first": {
"value": "s.*y",
"flags" : "INTERSECTION|COMPLEMENT|EMPTY",
"max_determinized_states": 20000
}
}
}
--------------------------------------------------
// CONSOLE
include::regexp-syntax.asciidoc[]

View File

@ -7,20 +7,17 @@ context, for example:
[source,js]
----------------------------------------------
GET /_search
{
"query": {
"bool" : {
"must" : {
"script" : {
"script" : "doc['num1'].value > 1"
}
}
"bool" : {
"must" : {
...
},
"filter" : {
"script" : {
"script" : "doc['num1'].value > 1"
}
}
}
----------------------------------------------
// CONSOLE
[float]
==== Custom Parameters
@ -31,23 +28,20 @@ to use the ability to pass parameters to the script itself, for example:
[source,js]
----------------------------------------------
GET /_search
{
"query": {
"bool" : {
"must" : {
"script" : {
"script" : {
"inline" : "doc['num1'].value > param1",
"params" : {
"param1" : 5
}
}
"bool" : {
"must" : {
...
},
"filter" : {
"script" : {
"script" : {
"inline" : "doc['num1'].value > param1"
"params" : {
"param1" : 5
}
}
}
}
}
----------------------------------------------
// CONSOLE

View File

@ -8,19 +8,15 @@ an example:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"simple_query_string" : {
"query": "\"fried eggs\" +(eggplant | potato) -frittata",
"analyzer": "snowball",
"fields": ["body^5","_all"],
"default_operator": "and"
}
}
}
--------------------------------------------------
// CONSOLE
The `simple_query_string` top level parameters include:
@ -98,17 +94,13 @@ introduced fields included). For example:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"simple_query_string" : {
"fields" : ["content", "name.*^5"],
"query" : "foo bar baz"
}
"simple_query_string" : {
"fields" : ["content", "name.*^5"],
"query" : "foo bar baz"
}
}
--------------------------------------------------
// CONSOLE
[float]
==== Flags
@ -118,17 +110,13 @@ should be enabled. It is specified as a `|`-delimited string with the
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"simple_query_string" : {
"query" : "foo | bar + baz*",
"flags" : "OR|AND|PREFIX"
}
"simple_query_string" : {
"query" : "foo | bar + baz*",
"flags" : "OR|AND|PREFIX"
}
}
--------------------------------------------------
// CONSOLE
The available flags are: `ALL`, `NONE`, `AND`, `OR`, `NOT`, `PREFIX`, `PHRASE`,
`PRECEDENCE`, `ESCAPE`, `WHITESPACE`, `FUZZY`, `NEAR`, and `SLOP`.

View File

@ -6,28 +6,24 @@ query maps to Lucene `SpanContainingQuery`. Here is an example:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"span_containing" : {
"little" : {
"span_term" : { "field1" : "foo" }
},
"big" : {
"span_near" : {
"clauses" : [
{ "span_term" : { "field1" : "bar" } },
{ "span_term" : { "field1" : "baz" } }
],
"slop" : 5,
"in_order" : true
}
"span_containing" : {
"little" : {
"span_term" : { "field1" : "foo" }
},
"big" : {
"span_near" : {
"clauses" : [
{ "span_term" : { "field1" : "bar" } },
{ "span_term" : { "field1" : "baz" } }
],
"slop" : 5,
"in_order" : true
}
}
}
}
--------------------------------------------------
// CONSOLE
The `big` and `little` clauses can be any span type query. Matching
spans from `big` that contain matches from `little` are returned.

View File

@ -6,19 +6,15 @@ to Lucene `SpanFirstQuery`. Here is an example:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"span_first" : {
"match" : {
"span_term" : { "user" : "kimchy" }
},
"end" : 3
}
"span_first" : {
"match" : {
"span_term" : { "user" : "kimchy" }
},
"end" : 3
}
}
--------------------------------------------------
// CONSOLE
The `match` clause can be any other span type query. The `end` controls
the maximum end position permitted in a match.

View File

@ -7,32 +7,24 @@ it can be nested. Example:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"span_multi":{
"match":{
"prefix" : { "user" : { "value" : "ki" } }
}
"span_multi":{
"match":{
"prefix" : { "user" : { "value" : "ki" } }
}
}
}
--------------------------------------------------
// CONSOLE
A boost can also be associated with the query:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"span_multi":{
"match":{
"prefix" : { "user" : { "value" : "ki", "boost" : 1.08 } }
}
"span_multi":{
"match":{
"prefix" : { "user" : { "value" : "ki", "boost" : 1.08 } }
}
}
}
--------------------------------------------------
// CONSOLE

View File

@ -8,22 +8,18 @@ matches are required to be in-order. The span near query maps to Lucene
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"span_near" : {
"clauses" : [
{ "span_term" : { "field" : "value1" } },
{ "span_term" : { "field" : "value2" } },
{ "span_term" : { "field" : "value3" } }
],
"slop" : 12,
"in_order" : false
}
"span_near" : {
"clauses" : [
{ "span_term" : { "field" : "value1" } },
{ "span_term" : { "field" : "value2" } },
{ "span_term" : { "field" : "value3" } }
],
"slop" : 12,
"in_order" : false
}
}
--------------------------------------------------
// CONSOLE
The `clauses` element is a list of one or more other span type queries
and the `slop` controls the maximum number of intervening unmatched

View File

@ -6,28 +6,24 @@ query maps to Lucene `SpanNotQuery`. Here is an example:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"span_not" : {
"include" : {
"span_term" : { "field1" : "hoya" }
},
"exclude" : {
"span_near" : {
"clauses" : [
{ "span_term" : { "field1" : "la" } },
{ "span_term" : { "field1" : "hoya" } }
],
"slop" : 0,
"in_order" : true
}
"span_not" : {
"include" : {
"span_term" : { "field1" : "hoya" }
},
"exclude" : {
"span_near" : {
"clauses" : [
{ "span_term" : { "field1" : "la" } },
{ "span_term" : { "field1" : "hoya" } }
],
"slop" : 0,
"in_order" : true
}
}
}
}
--------------------------------------------------
// CONSOLE
The `include` and `exclude` clauses can be any span type query. The
`include` clause is the span query whose matches are filtered, and the

View File

@ -6,19 +6,15 @@ Matches the union of its span clauses. The span or query maps to Lucene
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"span_or" : {
"clauses" : [
{ "span_term" : { "field" : "value1" } },
{ "span_term" : { "field" : "value2" } },
{ "span_term" : { "field" : "value3" } }
]
}
"span_or" : {
"clauses" : [
{ "span_term" : { "field" : "value1" } },
{ "span_term" : { "field" : "value2" } },
{ "span_term" : { "field" : "value3" } }
]
}
}
--------------------------------------------------
// CONSOLE
The `clauses` element is a list of one or more other span type queries.

View File

@ -6,37 +6,25 @@ Matches spans containing a term. The span term query maps to Lucene
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"span_term" : { "user" : "kimchy" }
}
"span_term" : { "user" : "kimchy" }
}
--------------------------------------------------
// CONSOLE
A boost can also be associated with the query:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"span_term" : { "user" : { "value" : "kimchy", "boost" : 2.0 } }
}
"span_term" : { "user" : { "value" : "kimchy", "boost" : 2.0 } }
}
--------------------------------------------------
// CONSOLE
Or :
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"span_term" : { "user" : { "term" : "kimchy", "boost" : 2.0 } }
}
"span_term" : { "user" : { "term" : "kimchy", "boost" : 2.0 } }
}
--------------------------------------------------
// CONSOLE

View File

@ -6,28 +6,24 @@ query maps to Lucene `SpanWithinQuery`. Here is an example:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"span_within" : {
"little" : {
"span_term" : { "field1" : "foo" }
},
"big" : {
"span_near" : {
"clauses" : [
{ "span_term" : { "field1" : "bar" } },
{ "span_term" : { "field1" : "baz" } }
],
"slop" : 5,
"in_order" : true
}
"span_within" : {
"little" : {
"span_term" : { "field1" : "foo" }
},
"big" : {
"span_near" : {
"clauses" : [
{ "span_term" : { "field1" : "bar" } },
{ "span_term" : { "field1" : "baz" } }
],
"slop" : 5,
"in_order" : true
}
}
}
}
--------------------------------------------------
// CONSOLE
The `big` and `little` clauses can be any span type query. Matching
spans from `little` that are enclosed within `big` are returned.

View File

@ -19,8 +19,8 @@ GET /_search
}
}
}
------------------------------------------
// CONSOLE
The above request is translated into:
@ -34,8 +34,8 @@ GET /_search
}
}
}
------------------------------------------
// CONSOLE
Alternatively passing the template as an escaped string works as well:
@ -53,8 +53,6 @@ GET /_search
}
}
------------------------------------------
// CONSOLE
<1> New line characters (`\n`) should be escaped as `\\n` or removed,
and quotes (`"`) should be escaped as `\\"`.
@ -79,8 +77,6 @@ GET /_search
}
}
------------------------------------------
// CONSOLE
<1> Name of the query template in `config/scripts/`, i.e., `my_template.mustache`.
Alternatively, you can register a query template in the cluster state with:
@ -89,10 +85,9 @@ Alternatively, you can register a query template in the cluster state with:
------------------------------------------
PUT /_search/template/my_template
{
"template": { "match": { "text": "{{query_string}}" }}
"template": { "match": { "text": "{{query_string}}" }},
}
------------------------------------------
// CONSOLE
and refer to it in the `template` query with the `id` parameter:
@ -111,13 +106,9 @@ GET /_search
}
}
------------------------------------------
// CONSOLE
// TEST[continued]
<1> Name of the query template in `config/scripts/`, i.e., `my_template.mustache`.
There is also a dedicated `template` endpoint, allows you to template an entire search request.
Please see <<search-template>> for more details.

View File

@ -6,18 +6,14 @@ Filters documents that have fields that match any of the provided terms
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"constant_score" : {
"filter" : {
"terms" : { "user" : ["kimchy", "elasticsearch"]}
}
"constant_score" : {
"filter" : {
"terms" : { "user" : ["kimchy", "elasticsearch"]}
}
}
}
--------------------------------------------------
// CONSOLE
The `terms` query is also aliased with `in` as the filter name for
simpler usage deprecated[5.0.0,use `terms` instead].
@ -67,37 +63,33 @@ possible, reducing the need for networking.
[float]
===== Terms lookup twitter example
At first we index the information for user with id 2, specifically, its
followers, than index a tweet from user with id 1. Finally we search on
all the tweets that match the followers of user 2.
[source,js]
--------------------------------------------------
PUT /users/user/2
{
"followers" : ["1", "3"]
}
# index the information for user with id 2, specifically, its followers
curl -XPUT localhost:9200/users/user/2 -d '{
"followers" : ["1", "3"]
}'
PUT /tweets/tweet/1
{
"user" : "1"
}
# index a tweet, from user with id 1
curl -XPUT localhost:9200/tweets/tweet/1 -d '{
"user" : "1"
}'
GET /tweets/_search
{
"query" : {
"terms" : {
"user" : {
"index" : "users",
"type" : "user",
"id" : "2",
"path" : "followers"
}
}
# search on all the tweets that match the followers of user 2
curl -XGET localhost:9200/tweets/_search -d '{
"query" : {
"terms" : {
"user" : {
"index" : "users",
"type" : "user",
"id" : "2",
"path" : "followers"
}
}
}
}
}'
--------------------------------------------------
// CONSOLE
The structure of the external terms document can also include array of
inner objects, for example:

View File

@ -5,13 +5,9 @@ Filters documents matching the provided document / mapping type.
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"type" : {
"value" : "my_type"
}
"type" : {
"value" : "my_type"
}
}
--------------------------------------------------
// CONSOLE

View File

@ -11,40 +11,28 @@ query maps to Lucene `WildcardQuery`.
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"wildcard" : { "user" : "ki*y" }
}
"wildcard" : { "user" : "ki*y" }
}
--------------------------------------------------
// CONSOLE
A boost can also be associated with the query:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"wildcard" : { "user" : { "value" : "ki*y", "boost" : 2.0 } }
}
"wildcard" : { "user" : { "value" : "ki*y", "boost" : 2.0 } }
}
--------------------------------------------------
// CONSOLE
Or :
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"wildcard" : { "user" : { "wildcard" : "ki*y", "boost" : 2.0 } }
}
"wildcard" : { "user" : { "wildcard" : "ki*y", "boost" : 2.0 } }
}
--------------------------------------------------
// CONSOLE
This multi term query allows to control how it gets rewritten using the
<<query-dsl-multi-term-rewrite,rewrite>>

View File

@ -65,8 +65,8 @@ This will yield the following result:
{
"query": [
{
"query_type": "BooleanQuery",
"lucene": "message:search message:test",
"type": "BooleanQuery",
"description": "message:search message:test",
"time": "15.52889800ms",
"breakdown": {
"score": 0,
@ -78,8 +78,8 @@ This will yield the following result:
},
"children": [
{
"query_type": "TermQuery",
"lucene": "message:search",
"type": "TermQuery",
"description": "message:search",
"time": "4.938855000ms",
"breakdown": {
"score": 0,
@ -91,8 +91,8 @@ This will yield the following result:
}
},
{
"query_type": "TermQuery",
"lucene": "message:test",
"type": "TermQuery",
"description": "message:test",
"time": "0.5016660000ms",
"breakdown": {
"score": 0,
@ -180,20 +180,20 @@ The overall structure of this query tree will resemble your original Elasticsear
--------------------------------------------------
"query": [
{
"query_type": "BooleanQuery",
"lucene": "message:search message:test",
"type": "BooleanQuery",
"description": "message:search message:test",
"time": "15.52889800ms",
"breakdown": {...}, <1>
"children": [
{
"query_type": "TermQuery",
"lucene": "message:search",
"type": "TermQuery",
"description": "message:search",
"time": "4.938855000ms",
"breakdown": {...}
},
{
"query_type": "TermQuery",
"lucene": "message:test",
"type": "TermQuery",
"description": "message:test",
"time": "0.5016660000ms",
"breakdown": {...}
}
@ -204,8 +204,8 @@ The overall structure of this query tree will resemble your original Elasticsear
<1> The breakdown timings are omitted for simplicity
Based on the profile structure, we can see that our `match` query was rewritten by Lucene into a BooleanQuery with two
clauses (both holding a TermQuery). The `"query_type"` field displays the Lucene class name, and often aligns with
the equivalent name in Elasticsearch. The `"lucene"` field displays the Lucene explanation text for the query, and
clauses (both holding a TermQuery). The `"type"` field displays the Lucene class name, and often aligns with
the equivalent name in Elasticsearch. The `"description"` field displays the Lucene explanation text for the query, and
is made available to help differentiating between parts of your query (e.g. both `"message:search"` and `"message:test"`
are TermQuery's and would appear identical otherwise.
@ -214,7 +214,7 @@ of all children.
The `"breakdown"` field will give detailed stats about how the time was spent, we'll look at
that in a moment. Finally, the `"children"` array lists any sub-queries that may be present. Because we searched for two
values ("search test"), our BooleanQuery holds two children TermQueries. They have identical information (query_type, time,
values ("search test"), our BooleanQuery holds two children TermQueries. They have identical information (type, time,
breakdown, etc). Children are allowed to have their own children.
==== Timing Breakdown
@ -465,8 +465,8 @@ And the response:
{
"query": [
{
"query_type": "TermQuery",
"lucene": "my_field:foo",
"type": "TermQuery",
"description": "my_field:foo",
"time": "0.4094560000ms",
"breakdown": {
"score": 0,
@ -478,8 +478,8 @@ And the response:
}
},
{
"query_type": "TermQuery",
"lucene": "message:search",
"type": "TermQuery",
"description": "message:search",
"time": "0.3037020000ms",
"breakdown": {
"score": 0,
@ -522,8 +522,8 @@ And the response:
{
"query": [
{
"query_type": "MatchAllDocsQuery",
"lucene": "*:*",
"type": "MatchAllDocsQuery",
"description": "*:*",
"time": "0.04829300000ms",
"breakdown": {
"score": 0,

View File

@ -22,6 +22,7 @@
- match: { nodes.$master.ingest.processors.10.type: remove }
- match: { nodes.$master.ingest.processors.11.type: rename }
- match: { nodes.$master.ingest.processors.12.type: set }
- match: { nodes.$master.ingest.processors.13.type: split }
- match: { nodes.$master.ingest.processors.14.type: trim }
- match: { nodes.$master.ingest.processors.15.type: uppercase }
- match: { nodes.$master.ingest.processors.13.type: sort }
- match: { nodes.$master.ingest.processors.14.type: split }
- match: { nodes.$master.ingest.processors.15.type: trim }
- match: { nodes.$master.ingest.processors.16.type: uppercase }

Some files were not shown because too many files have changed in this diff Show More