Merge branch 'master' into cleanup/transport_bulk

This commit is contained in:
Areek Zillur 2016-10-05 17:51:39 -04:00
commit 9b691f0d93
34 changed files with 433 additions and 257 deletions

28
Vagrantfile vendored
View File

@ -156,6 +156,7 @@ def dnf_common(config)
update_command: "dnf check-update",
update_tracking_file: "/var/cache/dnf/last_update",
install_command: "dnf install -y",
install_command_retries: 5,
java_package: "java-1.8.0-openjdk-devel")
if Vagrant.has_plugin?("vagrant-cachier")
# Autodetect doesn't work....
@ -205,6 +206,7 @@ def provision(config,
update_command: 'required',
update_tracking_file: 'required',
install_command: 'required',
install_command_retries: 0,
java_package: 'required',
extra: '')
# Vagrant run ruby 2.0.0 which doesn't have required named parameters....
@ -215,9 +217,27 @@ def provision(config,
config.vm.provision "bats dependencies", type: "shell", inline: <<-SHELL
set -e
set -o pipefail
# Retry install command up to $2 times, if failed
retry_installcommand() {
n=0
while true; do
#{install_command} $1 && break
let n=n+1
if [ $n -ge $2 ]; then
echo "==> Exhausted retries to install $1"
return 1
fi
echo "==> Retrying installing $1, attempt $((n+1))"
# Add a small delay to increase chance of metalink providing updated list of mirrors
sleep 5
done
}
installed() {
command -v $1 2>&1 >/dev/null
}
install() {
# Only apt-get update if we haven't in the last day
if [ ! -f #{update_tracking_file} ] || [ "x$(find #{update_tracking_file} -mtime +0)" == "x#{update_tracking_file}" ]; then
@ -226,8 +246,14 @@ def provision(config,
touch #{update_tracking_file}
fi
echo "==> Installing $1"
#{install_command} $1
if [ #{install_command_retries} -eq 0 ]
then
#{install_command} $1
else
retry_installcommand $1 #{install_command_retries}
fi
}
ensure() {
installed $1 || install $1
}

View File

@ -231,6 +231,7 @@ public class PluginBuildPlugin extends BuildPlugin {
* ahold of the actual task. Furthermore, this entire hack only exists so we can make publishing to
* maven local work, since we publish to maven central externally. */
zipReal(MavenPublication) {
artifactId = project.pluginProperties.extension.name
pom.withXml { XmlProvider xml ->
Node root = xml.asNode()
root.appendNode('name', project.pluginProperties.extension.name)

View File

@ -45,7 +45,7 @@ public class BulkShardRequest extends ReplicatedWriteRequest<BulkShardRequest> {
setRefreshPolicy(refreshPolicy);
}
BulkItemRequest[] items() {
public BulkItemRequest[] items() {
return items;
}

View File

@ -73,9 +73,9 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING;
public class BalancedShardsAllocator extends AbstractComponent implements ShardsAllocator {
public static final Setting<Float> INDEX_BALANCE_FACTOR_SETTING =
Setting.floatSetting("cluster.routing.allocation.balance.index", 0.55f, Property.Dynamic, Property.NodeScope);
Setting.floatSetting("cluster.routing.allocation.balance.index", 0.55f, 0.0f, Property.Dynamic, Property.NodeScope);
public static final Setting<Float> SHARD_BALANCE_FACTOR_SETTING =
Setting.floatSetting("cluster.routing.allocation.balance.shard", 0.45f, Property.Dynamic, Property.NodeScope);
Setting.floatSetting("cluster.routing.allocation.balance.shard", 0.45f, 0.0f, Property.Dynamic, Property.NodeScope);
public static final Setting<Float> THRESHOLD_SETTING =
Setting.floatSetting("cluster.routing.allocation.balance.threshold", 1.0f, 0.0f,
Property.Dynamic, Property.NodeScope);
@ -210,7 +210,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
*/
public static class Balancer {
private final Logger logger;
private final Map<String, ModelNode> nodes = new HashMap<>();
private final Map<String, ModelNode> nodes;
private final RoutingAllocation allocation;
private final RoutingNodes routingNodes;
private final WeightFunction weight;
@ -218,6 +218,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
private final float threshold;
private final MetaData metaData;
private final float avgShardsPerNode;
private final NodeSorter sorter;
public Balancer(Logger logger, RoutingAllocation allocation, WeightFunction weight, float threshold) {
this.logger = logger;
@ -227,7 +228,8 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
this.routingNodes = allocation.routingNodes();
this.metaData = allocation.metaData();
avgShardsPerNode = ((float) metaData.getTotalNumberOfShards()) / routingNodes.size();
buildModelFromAssigned();
nodes = Collections.unmodifiableMap(buildModelFromAssigned());
sorter = newNodeSorter();
}
/**
@ -304,11 +306,10 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
}
public Map<DiscoveryNode, Float> weighShard(ShardRouting shard) {
final NodeSorter sorter = newNodeSorter();
final ModelNode[] modelNodes = sorter.modelNodes;
final float[] weights = sorter.weights;
buildWeightOrderedIndices(sorter);
buildWeightOrderedIndices();
Map<DiscoveryNode, Float> nodes = new HashMap<>(modelNodes.length);
float currentNodeWeight = 0.0f;
for (int i = 0; i < modelNodes.length; i++) {
@ -332,20 +333,19 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
* weight of the maximum node and the minimum node according to the
* {@link WeightFunction}. This weight is calculated per index to
* distribute shards evenly per index. The balancer tries to relocate
* shards only if the delta exceeds the threshold. If the default case
* shards only if the delta exceeds the threshold. In the default case
* the threshold is set to <tt>1.0</tt> to enforce gaining relocation
* only, or in other words relocations that move the weight delta closer
* to <tt>0.0</tt>
*/
private void balanceByWeights() {
final NodeSorter sorter = newNodeSorter();
final AllocationDeciders deciders = allocation.deciders();
final ModelNode[] modelNodes = sorter.modelNodes;
final float[] weights = sorter.weights;
for (String index : buildWeightOrderedIndices(sorter)) {
for (String index : buildWeightOrderedIndices()) {
IndexMetaData indexMetaData = metaData.index(index);
// find nodes that have a shard of this index or where shards of this index are allowed to stay
// find nodes that have a shard of this index or where shards of this index are allowed to be allocated to,
// move these nodes to the front of modelNodes so that we can only balance based on these nodes
int relevantNodes = 0;
for (int i = 0; i < modelNodes.length; i++) {
@ -440,14 +440,14 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
* allocations on added nodes from one index when the weight parameters
* for global balance overrule the index balance at an intermediate
* state. For example this can happen if we have 3 nodes and 3 indices
* with 3 shards and 1 shard. At the first stage all three nodes hold
* 2 shard for each index. now we add another node and the first index
* is balanced moving 3 two of the nodes over to the new node since it
* with 3 primary and 1 replica shards. At the first stage all three nodes hold
* 2 shard for each index. Now we add another node and the first index
* is balanced moving three shards from two of the nodes over to the new node since it
* has no shards yet and global balance for the node is way below
* average. To re-balance we need to move shards back eventually likely
* to the nodes we relocated them from.
*/
private String[] buildWeightOrderedIndices(NodeSorter sorter) {
private String[] buildWeightOrderedIndices() {
final String[] indices = allocation.routingTable().indicesRouting().keys().toArray(String.class);
final float[] deltas = new float[indices.length];
for (int i = 0; i < deltas.length; i++) {
@ -501,7 +501,6 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
// Iterate over the started shards interleaving between nodes, and check if they can remain. In the presence of throttling
// shard movements, the goal of this iteration order is to achieve a fairer movement of shards from the nodes that are
// offloading the shards.
final NodeSorter sorter = newNodeSorter();
for (Iterator<ShardRouting> it = allocation.routingNodes().nodeInterleavedShardIterator(); it.hasNext(); ) {
ShardRouting shardRouting = it.next();
// we can only move started shards...
@ -511,7 +510,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
RoutingNode routingNode = sourceNode.getRoutingNode();
Decision decision = allocation.deciders().canRemain(shardRouting, routingNode, allocation);
if (decision.type() == Decision.Type.NO) {
moveShard(sorter, shardRouting, sourceNode, routingNode);
moveShard(shardRouting, sourceNode, routingNode);
}
}
}
@ -520,7 +519,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
/**
* Move started shard to the minimal eligible node with respect to the weight function
*/
private void moveShard(NodeSorter sorter, ShardRouting shardRouting, ModelNode sourceNode, RoutingNode routingNode) {
private void moveShard(ShardRouting shardRouting, ModelNode sourceNode, RoutingNode routingNode) {
logger.debug("[{}][{}] allocated on [{}], but can no longer be allocated on it, moving...", shardRouting.index(), shardRouting.id(), routingNode.node());
sorter.reset(shardRouting.getIndexName());
/*
@ -557,7 +556,8 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
* on the target node which we respect during the allocation / balancing
* process. In short, this method recreates the status-quo in the cluster.
*/
private void buildModelFromAssigned() {
private Map<String, ModelNode> buildModelFromAssigned() {
Map<String, ModelNode> nodes = new HashMap<>();
for (RoutingNode rn : routingNodes) {
ModelNode node = new ModelNode(rn);
nodes.put(rn.nodeId(), node);
@ -572,6 +572,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
}
}
}
return nodes;
}
/**
@ -626,91 +627,37 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
do {
for (int i = 0; i < primaryLength; i++) {
ShardRouting shard = primary[i];
if (!shard.primary()) {
final Decision decision = deciders.canAllocate(shard, allocation);
if (decision.type() == Type.NO) {
UnassignedInfo.AllocationStatus allocationStatus = UnassignedInfo.AllocationStatus.fromDecision(decision);
unassigned.ignoreShard(shard, allocationStatus, allocation.changes());
while(i < primaryLength-1 && comparator.compare(primary[i], primary[i+1]) == 0) {
unassigned.ignoreShard(primary[++i], allocationStatus, allocation.changes());
}
continue;
} else {
Tuple<Decision, ModelNode> allocationDecision = allocateUnassignedShard(shard, throttledNodes);
final Decision decision = allocationDecision.v1();
final ModelNode minNode = allocationDecision.v2();
if (decision.type() == Type.YES) {
if (logger.isTraceEnabled()) {
logger.trace("Assigned shard [{}] to [{}]", shard, minNode.getNodeId());
}
final long shardSize = DiskThresholdDecider.getExpectedShardSize(shard, allocation,
ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);
shard = routingNodes.initializeShard(shard, minNode.getNodeId(), null, shardSize, allocation.changes());
minNode.addShard(shard);
if (!shard.primary()) {
// copy over the same replica shards to the secondary array so they will get allocated
// in a subsequent iteration, allowing replicas of other shards to be allocated first
while(i < primaryLength-1 && comparator.compare(primary[i], primary[i+1]) == 0) {
secondary[secondaryLength++] = primary[++i];
}
}
}
assert !shard.assignedToNode() : shard;
/* find an node with minimal weight we can allocate on*/
float minWeight = Float.POSITIVE_INFINITY;
ModelNode minNode = null;
Decision decision = null;
if (throttledNodes.size() < nodes.size()) {
/* Don't iterate over an identity hashset here the
* iteration order is different for each run and makes testing hard */
for (ModelNode node : nodes.values()) {
if (throttledNodes.contains(node)) {
continue;
}
if (!node.containsShard(shard)) {
// simulate weight if we would add shard to node
float currentWeight = weight.weightShardAdded(this, node, shard.getIndexName());
/*
* Unless the operation is not providing any gains we
* don't check deciders
*/
if (currentWeight <= minWeight) {
Decision currentDecision = deciders.canAllocate(shard, node.getRoutingNode(), allocation);
NOUPDATE:
if (currentDecision.type() == Type.YES || currentDecision.type() == Type.THROTTLE) {
if (currentWeight == minWeight) {
/* we have an equal weight tie breaking:
* 1. if one decision is YES prefer it
* 2. prefer the node that holds the primary for this index with the next id in the ring ie.
* for the 3 shards 2 replica case we try to build up:
* 1 2 0
* 2 0 1
* 0 1 2
* such that if we need to tie-break we try to prefer the node holding a shard with the minimal id greater
* than the id of the shard we need to assign. This works find when new indices are created since
* primaries are added first and we only add one shard set a time in this algorithm.
*/
if (currentDecision.type() == decision.type()) {
final int repId = shard.id();
final int nodeHigh = node.highestPrimary(shard.index().getName());
final int minNodeHigh = minNode.highestPrimary(shard.getIndexName());
if ((((nodeHigh > repId && minNodeHigh > repId) || (nodeHigh < repId && minNodeHigh < repId)) && (nodeHigh < minNodeHigh))
|| (nodeHigh > minNodeHigh && nodeHigh > repId && minNodeHigh < repId)) {
// nothing to set here; the minNode, minWeight, and decision get set below
} else {
break NOUPDATE;
}
} else if (currentDecision.type() != Type.YES) {
break NOUPDATE;
}
}
minNode = node;
minWeight = currentWeight;
decision = currentDecision;
}
}
}
} else {
// did *not* receive a YES decision
if (logger.isTraceEnabled()) {
logger.trace("No eligible node found to assign shard [{}] decision [{}]", shard, decision.type());
}
}
assert (decision == null) == (minNode == null);
if (minNode != null) {
final long shardSize = DiskThresholdDecider.getExpectedShardSize(shard, allocation,
ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);
if (decision.type() == Type.YES) {
if (logger.isTraceEnabled()) {
logger.trace("Assigned shard [{}] to [{}]", shard, minNode.getNodeId());
}
shard = routingNodes.initializeShard(shard, minNode.getNodeId(), null, shardSize, allocation.changes());
minNode.addShard(shard);
continue; // don't add to ignoreUnassigned
} else {
if (minNode != null) {
// throttle decision scenario
assert decision.type() == Type.THROTTLE;
final long shardSize = DiskThresholdDecider.getExpectedShardSize(shard, allocation,
ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);
minNode.addShard(shard.initialize(minNode.getNodeId(), null, shardSize));
final RoutingNode node = minNode.getRoutingNode();
final Decision.Type nodeLevelDecision = deciders.canAllocate(node, allocation).type();
@ -721,21 +668,19 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
assert nodeLevelDecision == Type.NO;
throttledNodes.add(minNode);
}
} else {
assert decision.type() == Type.NO;
if (logger.isTraceEnabled()) {
logger.trace("No Node found to assign shard [{}]", shard);
}
}
if (logger.isTraceEnabled()) {
logger.trace("No eligible node found to assign shard [{}] decision [{}]", shard, decision.type());
}
} else if (logger.isTraceEnabled()) {
logger.trace("No Node found to assign shard [{}]", shard);
}
assert decision == null || decision.type() == Type.THROTTLE;
UnassignedInfo.AllocationStatus allocationStatus =
decision == null ? UnassignedInfo.AllocationStatus.DECIDERS_NO :
UnassignedInfo.AllocationStatus.fromDecision(decision);
unassigned.ignoreShard(shard, allocationStatus, allocation.changes());
if (!shard.primary()) { // we could not allocate it and we are a replica - check if we can ignore the other replicas
while(secondaryLength > 0 && comparator.compare(shard, secondary[secondaryLength-1]) == 0) {
unassigned.ignoreShard(secondary[--secondaryLength], allocationStatus, allocation.changes());
UnassignedInfo.AllocationStatus allocationStatus = UnassignedInfo.AllocationStatus.fromDecision(decision);
unassigned.ignoreShard(shard, allocationStatus, allocation.changes());
if (!shard.primary()) { // we could not allocate it and we are a replica - check if we can ignore the other replicas
while(i < primaryLength-1 && comparator.compare(primary[i], primary[i+1]) == 0) {
unassigned.ignoreShard(primary[++i], allocationStatus, allocation.changes());
}
}
}
}
@ -748,6 +693,84 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
// clear everything we have either added it or moved to ignoreUnassigned
}
/**
* Make a decision for allocating an unassigned shard. This method returns a two values in a tuple: the
* first value is the {@link Decision} taken to allocate the unassigned shard, the second value is the
* {@link ModelNode} representing the node that the shard should be assigned to. If the decision returned
* is of type {@link Type#NO}, then the assigned node will be null.
*/
private Tuple<Decision, ModelNode> allocateUnassignedShard(final ShardRouting shard, final Set<ModelNode> throttledNodes) {
assert !shard.assignedToNode() : "not an unassigned shard: " + shard;
if (allocation.deciders().canAllocate(shard, allocation).type() == Type.NO) {
// NO decision for allocating the shard, irrespective of any particular node, so exit early
return Tuple.tuple(Decision.NO, null);
}
/* find an node with minimal weight we can allocate on*/
float minWeight = Float.POSITIVE_INFINITY;
ModelNode minNode = null;
Decision decision = null;
if (throttledNodes.size() < nodes.size()) {
/* Don't iterate over an identity hashset here the
* iteration order is different for each run and makes testing hard */
for (ModelNode node : nodes.values()) {
if (throttledNodes.contains(node)) {
continue;
}
if (!node.containsShard(shard)) {
// simulate weight if we would add shard to node
float currentWeight = weight.weightShardAdded(this, node, shard.getIndexName());
/*
* Unless the operation is not providing any gains we
* don't check deciders
*/
if (currentWeight <= minWeight) {
Decision currentDecision = allocation.deciders().canAllocate(shard, node.getRoutingNode(), allocation);
if (currentDecision.type() == Type.YES || currentDecision.type() == Type.THROTTLE) {
final boolean updateMinNode;
if (currentWeight == minWeight) {
/* we have an equal weight tie breaking:
* 1. if one decision is YES prefer it
* 2. prefer the node that holds the primary for this index with the next id in the ring ie.
* for the 3 shards 2 replica case we try to build up:
* 1 2 0
* 2 0 1
* 0 1 2
* such that if we need to tie-break we try to prefer the node holding a shard with the minimal id greater
* than the id of the shard we need to assign. This works find when new indices are created since
* primaries are added first and we only add one shard set a time in this algorithm.
*/
if (currentDecision.type() == decision.type()) {
final int repId = shard.id();
final int nodeHigh = node.highestPrimary(shard.index().getName());
final int minNodeHigh = minNode.highestPrimary(shard.getIndexName());
updateMinNode = ((((nodeHigh > repId && minNodeHigh > repId)
|| (nodeHigh < repId && minNodeHigh < repId))
&& (nodeHigh < minNodeHigh))
|| (nodeHigh > minNodeHigh && nodeHigh > repId && minNodeHigh < repId));
} else {
updateMinNode = currentDecision.type() == Type.YES;
}
} else {
updateMinNode = true;
}
if (updateMinNode) {
minNode = node;
minWeight = currentWeight;
decision = currentDecision;
}
}
}
}
}
}
if (decision == null) {
// decision was not set and a node was not assigned, so treat it as a NO decision
decision = Decision.NO;
}
return Tuple.tuple(decision, minNode);
}
/**
* Tries to find a relocation from the max node to the minimal node for an arbitrary shard of the given index on the
* balance model. Iff this method returns a <code>true</code> the relocation has already been executed on the

View File

@ -19,8 +19,11 @@
package org.elasticsearch.rest;
import org.apache.lucene.search.spell.LevensteinDistance;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
@ -28,9 +31,14 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.plugins.ActionPlugin;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
import java.util.stream.Collectors;
/**
@ -58,12 +66,48 @@ public abstract class BaseRestHandler extends AbstractComponent implements RestH
final RestChannelConsumer action = prepareRequest(request, client);
// validate unconsumed params, but we must exclude params used to format the response
final List<String> unconsumedParams =
request.unconsumedParams().stream().filter(p -> !responseParams().contains(p)).collect(Collectors.toList());
// use a sorted set so the unconsumed parameters appear in a reliable sorted order
final SortedSet<String> unconsumedParams =
request.unconsumedParams().stream().filter(p -> !responseParams().contains(p)).collect(Collectors.toCollection(TreeSet::new));
// validate the non-response params
if (!unconsumedParams.isEmpty()) {
throw new IllegalArgumentException("request [" + request.path() + "] contains unused params: " + unconsumedParams.toString());
if (unconsumedParams.isEmpty() == false) {
String message = String.format(
Locale.ROOT,
"request [%s] contains unrecognized parameter%s: ",
request.path(),
unconsumedParams.size() > 1 ? "s" : "");
boolean first = true;
for (final String unconsumedParam : unconsumedParams) {
final LevensteinDistance ld = new LevensteinDistance();
final List<Tuple<Float, String>> scoredParams = new ArrayList<>();
final Set<String> candidateParams = new HashSet<>();
candidateParams.addAll(request.consumedParams());
candidateParams.addAll(responseParams());
for (final String candidateParam : candidateParams) {
final float distance = ld.getDistance(unconsumedParam, candidateParam);
if (distance > 0.5f) {
scoredParams.add(new Tuple<>(distance, candidateParam));
}
}
CollectionUtil.timSort(scoredParams, (a, b) -> {
// sort by distance in reverse order, then parameter name for equal distances
int compare = a.v1().compareTo(b.v1());
if (compare != 0) return -compare;
else return a.v2().compareTo(b.v2());
});
if (first == false) {
message += ", ";
}
message += "[" + unconsumedParam + "]";
final List<String> keys = scoredParams.stream().map(Tuple::v2).collect(Collectors.toList());
if (keys.isEmpty() == false) {
message += " -> did you mean " + (keys.size() == 1 ? "[" + keys.get(0) + "]": "any of " + keys.toString()) + "?";
}
first = false;
}
throw new IllegalArgumentException(message);
}
// execute the action

View File

@ -28,7 +28,6 @@ import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.ToXContent;
import java.net.SocketAddress;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
@ -129,6 +128,16 @@ public abstract class RestRequest implements ToXContent.Params {
return params;
}
/**
* Returns a list of parameters that have been consumed. This method returns a copy, callers
* are free to modify the returned list.
*
* @return the list of currently consumed parameters.
*/
List<String> consumedParams() {
return consumedParams.stream().collect(Collectors.toList());
}
/**
* Returns a list of parameters that have not yet been consumed. This method returns a copy,
* callers are free to modify the returned list.

View File

@ -73,8 +73,9 @@ public class RestAnalyzeAction extends BaseRestHandler {
analyzeRequest.text(texts);
analyzeRequest.analyzer(request.param("analyzer"));
analyzeRequest.field(request.param("field"));
if (request.hasParam("tokenizer")) {
analyzeRequest.tokenizer(request.param("tokenizer"));
final String tokenizer = request.param("tokenizer");
if (tokenizer != null) {
analyzeRequest.tokenizer(tokenizer);
}
for (String filter : request.paramAsStringArray("filter", Strings.EMPTY_ARRAY)) {
analyzeRequest.addTokenFilter(filter);

View File

@ -40,10 +40,10 @@ public class AbstractRangeAggregatorFactory<AF extends AbstractRangeAggregatorFa
extends ValuesSourceAggregatorFactory<ValuesSource.Numeric, AF> {
private final InternalRange.Factory<?, ?> rangeFactory;
private final List<R> ranges;
private final R[] ranges;
private final boolean keyed;
public AbstractRangeAggregatorFactory(String name, Type type, ValuesSourceConfig<Numeric> config, List<R> ranges, boolean keyed,
public AbstractRangeAggregatorFactory(String name, Type type, ValuesSourceConfig<Numeric> config, R[] ranges, boolean keyed,
InternalRange.Factory<?, ?> rangeFactory, AggregationContext context, AggregatorFactory<?> parent,
AggregatorFactories.Builder subFactoriesBuilder, Map<String, Object> metaData) throws IOException {
super(name, type, config, context, parent, subFactoriesBuilder, metaData);
@ -55,7 +55,7 @@ public class AbstractRangeAggregatorFactory<AF extends AbstractRangeAggregatorFa
@Override
protected Aggregator createUnmapped(Aggregator parent, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData)
throws IOException {
return new Unmapped<R>(name, ranges, keyed, config.format(), context, parent, rangeFactory, pipelineAggregators, metaData);
return new Unmapped<>(name, ranges, keyed, config.format(), context, parent, rangeFactory, pipelineAggregators, metaData);
}
@Override

View File

@ -19,13 +19,17 @@
package org.elasticsearch.search.aggregations.bucket.range;
import org.apache.lucene.util.InPlaceMergeSorter;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.search.aggregations.bucket.range.RangeAggregator.Range;
import org.elasticsearch.search.aggregations.support.AggregationContext;
import org.elasticsearch.search.aggregations.support.ValuesSource;
import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder;
import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric;
import java.io.IOException;
import java.util.ArrayList;
@ -55,6 +59,40 @@ public abstract class AbstractRangeBuilder<AB extends AbstractRangeBuilder<AB, R
keyed = in.readBoolean();
}
/**
* Resolve any strings in the ranges so we have a number value for the from
* and to of each range. The ranges are also sorted before being returned.
*/
protected Range[] processRanges(AggregationContext context, ValuesSourceConfig<Numeric> config) {
Range[] ranges = new Range[this.ranges.size()];
for (int i = 0; i < ranges.length; i++) {
ranges[i] = this.ranges.get(i).process(config.format(), context.searchContext());
}
sortRanges(ranges);
return ranges;
}
private static void sortRanges(final Range[] ranges) {
new InPlaceMergeSorter() {
@Override
protected void swap(int i, int j) {
final Range tmp = ranges[i];
ranges[i] = ranges[j];
ranges[j] = tmp;
}
@Override
protected int compare(int i, int j) {
int cmp = Double.compare(ranges[i].from, ranges[j].from);
if (cmp == 0) {
cmp = Double.compare(ranges[i].to, ranges[j].to);
}
return cmp;
}
}.sort(0, ranges.length);
}
@Override
protected void innerWriteTo(StreamOutput out) throws IOException {
out.writeVInt(ranges.size());

View File

@ -114,6 +114,8 @@ public class RangeAggregationBuilder extends AbstractRangeBuilder<RangeAggregati
@Override
protected RangeAggregatorFactory innerBuild(AggregationContext context, ValuesSourceConfig<Numeric> config,
AggregatorFactory<?> parent, Builder subFactoriesBuilder) throws IOException {
// We need to call processRanges here so they are parsed before we make the decision of whether to cache the request
Range[] ranges = processRanges(context, config);
return new RangeAggregatorFactory(name, type, config, ranges, keyed, rangeFactory, context, parent, subFactoriesBuilder,
metaData);
}

View File

@ -19,7 +19,6 @@
package org.elasticsearch.search.aggregations.bucket.range;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.util.InPlaceMergeSorter;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.io.stream.StreamInput;
@ -210,7 +209,7 @@ public class RangeAggregator extends BucketsAggregator {
final double[] maxTo;
public RangeAggregator(String name, AggregatorFactories factories, ValuesSource.Numeric valuesSource, DocValueFormat format,
InternalRange.Factory rangeFactory, List<? extends Range> ranges, boolean keyed, AggregationContext aggregationContext,
InternalRange.Factory rangeFactory, Range[] ranges, boolean keyed, AggregationContext aggregationContext,
Aggregator parent, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException {
super(name, factories, aggregationContext, parent, pipelineAggregators, metaData);
@ -220,11 +219,7 @@ public class RangeAggregator extends BucketsAggregator {
this.keyed = keyed;
this.rangeFactory = rangeFactory;
this.ranges = new Range[ranges.size()];
for (int i = 0; i < this.ranges.length; i++) {
this.ranges[i] = ranges.get(i).process(format, context.searchContext());
}
sortRanges(this.ranges);
this.ranges = ranges;
maxTo = new double[this.ranges.length];
maxTo[0] = this.ranges[0].to;
@ -337,45 +332,21 @@ public class RangeAggregator extends BucketsAggregator {
return rangeFactory.create(name, buckets, format, keyed, pipelineAggregators(), metaData());
}
private static void sortRanges(final Range[] ranges) {
new InPlaceMergeSorter() {
@Override
protected void swap(int i, int j) {
final Range tmp = ranges[i];
ranges[i] = ranges[j];
ranges[j] = tmp;
}
@Override
protected int compare(int i, int j) {
int cmp = Double.compare(ranges[i].from, ranges[j].from);
if (cmp == 0) {
cmp = Double.compare(ranges[i].to, ranges[j].to);
}
return cmp;
}
}.sort(0, ranges.length);
}
public static class Unmapped<R extends RangeAggregator.Range> extends NonCollectingAggregator {
private final List<R> ranges;
private final R[] ranges;
private final boolean keyed;
private final InternalRange.Factory factory;
private final DocValueFormat format;
@SuppressWarnings("unchecked")
public Unmapped(String name, List<R> ranges, boolean keyed, DocValueFormat format,
public Unmapped(String name, R[] ranges, boolean keyed, DocValueFormat format,
AggregationContext context,
Aggregator parent, InternalRange.Factory factory, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData)
throws IOException {
super(name, context, parent, pipelineAggregators, metaData);
this.ranges = new ArrayList<>();
for (R range : ranges) {
this.ranges.add((R) range.process(format, context.searchContext()));
}
this.ranges = ranges;
this.keyed = keyed;
this.format = format;
this.factory = factory;
@ -384,7 +355,7 @@ public class RangeAggregator extends BucketsAggregator {
@Override
public InternalAggregation buildEmptyAggregation() {
InternalAggregations subAggs = buildEmptySubAggregations();
List<org.elasticsearch.search.aggregations.bucket.range.Range.Bucket> buckets = new ArrayList<>(ranges.size());
List<org.elasticsearch.search.aggregations.bucket.range.Range.Bucket> buckets = new ArrayList<>(ranges.length);
for (RangeAggregator.Range range : ranges) {
buckets.add(factory.createBucket(range.key, range.from, range.to, 0, subAggs, keyed, format));
}

View File

@ -29,12 +29,11 @@ import org.elasticsearch.search.aggregations.support.AggregationContext;
import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
import java.io.IOException;
import java.util.List;
import java.util.Map;
public class RangeAggregatorFactory extends AbstractRangeAggregatorFactory<RangeAggregatorFactory, RangeAggregator.Range> {
public RangeAggregatorFactory(String name, Type type, ValuesSourceConfig<Numeric> config, List<Range> ranges, boolean keyed,
public RangeAggregatorFactory(String name, Type type, ValuesSourceConfig<Numeric> config, Range[] ranges, boolean keyed,
Factory<?, ?> rangeFactory, AggregationContext context, AggregatorFactory<?> parent,
AggregatorFactories.Builder subFactoriesBuilder, Map<String, Object> metaData) throws IOException {
super(name, type, config, ranges, keyed, rangeFactory, context, parent, subFactoriesBuilder, metaData);

View File

@ -259,6 +259,9 @@ public class DateRangeAggregationBuilder extends AbstractRangeBuilder<DateRangeA
@Override
protected DateRangeAggregatorFactory innerBuild(AggregationContext context, ValuesSourceConfig<Numeric> config,
AggregatorFactory<?> parent, Builder subFactoriesBuilder) throws IOException {
// We need to call processRanges here so they are parsed and we know whether `now` has been used before we make
// the decision of whether to cache the request
Range[] ranges = processRanges(context, config);
return new DateRangeAggregatorFactory(name, type, config, ranges, keyed, rangeFactory, context, parent, subFactoriesBuilder,
metaData);
}

View File

@ -30,12 +30,11 @@ import org.elasticsearch.search.aggregations.support.AggregationContext;
import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
import java.io.IOException;
import java.util.List;
import java.util.Map;
public class DateRangeAggregatorFactory extends AbstractRangeAggregatorFactory<DateRangeAggregatorFactory, Range> {
public DateRangeAggregatorFactory(String name, Type type, ValuesSourceConfig<Numeric> config, List<Range> ranges, boolean keyed,
public DateRangeAggregatorFactory(String name, Type type, ValuesSourceConfig<Numeric> config, Range[] ranges, boolean keyed,
Factory<?, ?> rangeFactory, AggregationContext context, AggregatorFactory<?> parent,
AggregatorFactories.Builder subFactoriesBuilder, Map<String, Object> metaData) throws IOException {
super(name, type, config, ranges, keyed, rangeFactory, context, parent, subFactoriesBuilder, metaData);

View File

@ -215,6 +215,7 @@ public class GeoDistanceAggregationBuilder extends ValuesSourceAggregationBuilde
protected ValuesSourceAggregatorFactory<ValuesSource.GeoPoint, ?> innerBuild(AggregationContext context,
ValuesSourceConfig<ValuesSource.GeoPoint> config, AggregatorFactory<?> parent, Builder subFactoriesBuilder)
throws IOException {
Range[] ranges = this.ranges.toArray(new Range[this.range().size()]);
return new GeoDistanceRangeAggregatorFactory(name, type, config, origin, ranges, unit, distanceType, keyed, context, parent,
subFactoriesBuilder, metaData);
}

View File

@ -51,13 +51,13 @@ public class GeoDistanceRangeAggregatorFactory
private final InternalRange.Factory<InternalGeoDistance.Bucket, InternalGeoDistance> rangeFactory = InternalGeoDistance.FACTORY;
private final GeoPoint origin;
private final List<Range> ranges;
private final Range[] ranges;
private final DistanceUnit unit;
private final GeoDistance distanceType;
private final boolean keyed;
public GeoDistanceRangeAggregatorFactory(String name, Type type, ValuesSourceConfig<ValuesSource.GeoPoint> config, GeoPoint origin,
List<Range> ranges, DistanceUnit unit, GeoDistance distanceType, boolean keyed, AggregationContext context,
Range[] ranges, DistanceUnit unit, GeoDistance distanceType, boolean keyed, AggregationContext context,
AggregatorFactory<?> parent, AggregatorFactories.Builder subFactoriesBuilder, Map<String, Object> metaData) throws IOException {
super(name, type, config, context, parent, subFactoriesBuilder, metaData);
this.origin = origin;
@ -70,7 +70,7 @@ public class GeoDistanceRangeAggregatorFactory
@Override
protected Aggregator createUnmapped(Aggregator parent, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData)
throws IOException {
return new Unmapped<Range>(name, ranges, keyed, config.format(), context, parent, rangeFactory, pipelineAggregators, metaData);
return new Unmapped<>(name, ranges, keyed, config.format(), context, parent, rangeFactory, pipelineAggregators, metaData);
}
@Override

View File

@ -34,6 +34,8 @@ import org.joda.time.chrono.ISOChronology;
import java.util.List;
import static org.elasticsearch.search.aggregations.AggregationBuilders.dateHistogram;
import static org.elasticsearch.search.aggregations.AggregationBuilders.filter;
import static org.elasticsearch.search.aggregations.AggregationBuilders.dateRange;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
import static org.hamcrest.Matchers.equalTo;
@ -406,11 +408,33 @@ public class IndicesRequestCacheIT extends ESIntegTestCase {
assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(),
equalTo(0L));
// If size > 1 and cache flag is set on the request we should cache
final SearchResponse r4 = client().prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(1)
.setRequestCache(true).setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-21").lte("2016-03-27")).get();
// If the request has an aggregation containng now we should not cache
final SearchResponse r4 = client().prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0)
.setRequestCache(true).setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-20").lte("2016-03-26"))
.addAggregation(filter("foo", QueryBuilders.rangeQuery("s").from("now-10y").to("now"))).get();
assertSearchResponse(r4);
assertThat(r4.getHits().getTotalHits(), equalTo(7L));
assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(),
equalTo(0L));
assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(),
equalTo(0L));
// If the request has an aggregation containng now we should not cache
final SearchResponse r5 = client().prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0)
.setRequestCache(true).setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-20").lte("2016-03-26"))
.addAggregation(dateRange("foo").field("s").addRange("now-10y", "now")).get();
assertSearchResponse(r5);
assertThat(r5.getHits().getTotalHits(), equalTo(7L));
assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(),
equalTo(0L));
assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(),
equalTo(0L));
// If size > 1 and cache flag is set on the request we should cache
final SearchResponse r6 = client().prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(1)
.setRequestCache(true).setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-21").lte("2016-03-27")).get();
assertSearchResponse(r6);
assertThat(r6.getHits().getTotalHits(), equalTo(7L));
assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(),
equalTo(0L));
assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(),

View File

@ -39,7 +39,7 @@ import static org.mockito.Mockito.mock;
public class BaseRestHandlerTests extends ESTestCase {
public void testUnconsumedParameters() throws Exception {
public void testOneUnconsumedParameters() throws Exception {
final AtomicBoolean executed = new AtomicBoolean();
BaseRestHandler handler = new BaseRestHandler(Settings.EMPTY) {
@Override
@ -56,7 +56,71 @@ public class BaseRestHandlerTests extends ESTestCase {
RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1);
final IllegalArgumentException e =
expectThrows(IllegalArgumentException.class, () -> handler.handleRequest(request, channel, mock(NodeClient.class)));
assertThat(e, hasToString(containsString("request [/] contains unused params: [unconsumed]")));
assertThat(e, hasToString(containsString("request [/] contains unrecognized parameter: [unconsumed]")));
assertFalse(executed.get());
}
public void testMultipleUnconsumedParameters() throws Exception {
final AtomicBoolean executed = new AtomicBoolean();
BaseRestHandler handler = new BaseRestHandler(Settings.EMPTY) {
@Override
protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException {
request.param("consumed");
return channel -> executed.set(true);
}
};
final HashMap<String, String> params = new HashMap<>();
params.put("consumed", randomAsciiOfLength(8));
params.put("unconsumed-first", randomAsciiOfLength(8));
params.put("unconsumed-second", randomAsciiOfLength(8));
RestRequest request = new FakeRestRequest.Builder().withParams(params).build();
RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1);
final IllegalArgumentException e =
expectThrows(IllegalArgumentException.class, () -> handler.handleRequest(request, channel, mock(NodeClient.class)));
assertThat(e, hasToString(containsString("request [/] contains unrecognized parameters: [unconsumed-first], [unconsumed-second]")));
assertFalse(executed.get());
}
public void testUnconsumedParametersDidYouMean() throws Exception {
final AtomicBoolean executed = new AtomicBoolean();
BaseRestHandler handler = new BaseRestHandler(Settings.EMPTY) {
@Override
protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException {
request.param("consumed");
request.param("field");
request.param("tokenizer");
request.param("very_close_to_parameter_1");
request.param("very_close_to_parameter_2");
return channel -> executed.set(true);
}
@Override
protected Set<String> responseParams() {
return Collections.singleton("response_param");
}
};
final HashMap<String, String> params = new HashMap<>();
params.put("consumed", randomAsciiOfLength(8));
params.put("flied", randomAsciiOfLength(8));
params.put("respones_param", randomAsciiOfLength(8));
params.put("tokenzier", randomAsciiOfLength(8));
params.put("very_close_to_parametre", randomAsciiOfLength(8));
params.put("very_far_from_every_consumed_parameter", randomAsciiOfLength(8));
RestRequest request = new FakeRestRequest.Builder().withParams(params).build();
RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1);
final IllegalArgumentException e =
expectThrows(IllegalArgumentException.class, () -> handler.handleRequest(request, channel, mock(NodeClient.class)));
assertThat(
e,
hasToString(containsString(
"request [/] contains unrecognized parameters: " +
"[flied] -> did you mean [field]?, " +
"[respones_param] -> did you mean [response_param]?, " +
"[tokenzier] -> did you mean [tokenizer]?, " +
"[very_close_to_parametre] -> did you mean any of [very_close_to_parameter_1, very_close_to_parameter_2]?, " +
"[very_far_from_every_consumed_parameter]")));
assertFalse(executed.get());
}

View File

@ -171,7 +171,6 @@ buildRestTests.expectedUnconvertedCandidates = [
'reference/search/request/highlighting.asciidoc',
'reference/search/request/inner-hits.asciidoc',
'reference/search/request/rescore.asciidoc',
'reference/search/request/scroll.asciidoc',
'reference/search/search-template.asciidoc',
'reference/search/suggesters/completion-suggest.asciidoc',
]

View File

@ -175,7 +175,7 @@ this into a tree_levels setting of 26.
===== Performance considerations
Elasticsearch uses the paths in the prefix tree as terms in the index
and in queries. The higher the levels is (and thus the precision), the
and in queries. The higher the level is (and thus the precision), the
more terms are generated. Of course, calculating the terms, keeping them in
memory, and storing them on disk all have a price. Especially with higher
tree levels, indices can become extremely large even with a modest

View File

@ -92,7 +92,7 @@ Default is `memory`.
==== Accepted Formats
In much the same way the geo_point type can accept different
representation of the geo point, the filter can accept it as well:
representations of the geo point, the filter can accept it as well:
[float]
===== Lat Lon As Properties

View File

@ -69,7 +69,7 @@ GET /my_locations/location/_search
==== Accepted Formats
In much the same way the `geo_point` type can accept different
representation of the geo point, the filter can accept it as well:
representations of the geo point, the filter can accept it as well:
[float]
===== Lat Lon As Properties

View File

@ -7,7 +7,7 @@ which are designed to scale horizontally.
<<query-dsl-nested-query,`nested` query>>::
Documents may contains fields of type <<nested,`nested`>>. These
Documents may contain fields of type <<nested,`nested`>>. These
fields are used to index arrays of objects, where each object can be queried
(with the `nested` query) as an independent document.

View File

@ -38,7 +38,7 @@ should keep the ``search context'' alive (see <<scroll-search-context>>), eg `?s
[source,js]
--------------------------------------------------
curl -XGET 'localhost:9200/twitter/tweet/_search?scroll=1m' -d '
POST /twitter/tweet/_search?scroll=1m
{
"query": {
"match" : {
@ -46,8 +46,9 @@ curl -XGET 'localhost:9200/twitter/tweet/_search?scroll=1m' -d '
}
}
}
'
--------------------------------------------------
// CONSOLE
// TEST[setup:twitter]
The result from the above request includes a `_scroll_id`, which should
be passed to the `scroll` API in order to retrieve the next batch of
@ -55,13 +56,14 @@ results.
[source,js]
--------------------------------------------------
curl -XGET <1> 'localhost:9200/_search/scroll' <2> -d'
POST <1> /_search/scroll <2>
{
"scroll" : "1m", <3>
"scroll_id" : "c2Nhbjs2OzM0NDg1ODpzRlBLc0FXNlNyNm5JWUc1" <4>
"scroll_id" : "DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==" <4>
}
'
--------------------------------------------------
// CONSOLE
// TEST[continued s/DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==/$body._scroll_id/]
<1> `GET` or `POST` can be used.
<2> The URL should not include the `index` or `type` name -- these
@ -73,14 +75,6 @@ curl -XGET <1> 'localhost:9200/_search/scroll' <2> -d'
Each call to the `scroll` API returns the next batch of results until there
are no more results left to return, ie the `hits` array is empty.
For backwards compatibility, `scroll_id` and `scroll` can be passed in the query string.
And the `scroll_id` can be passed in the request body
[source,js]
--------------------------------------------------
curl -XGET 'localhost:9200/_search/scroll?scroll=1m' -d 'c2Nhbjs2OzM0NDg1ODpzRlBLc0FXNlNyNm5JWUc1'
--------------------------------------------------
IMPORTANT: The initial search request and each subsequent scroll request
returns a new `_scroll_id` -- only the most recent `_scroll_id` should be
used.
@ -94,14 +88,15 @@ order, this is the most efficient option:
[source,js]
--------------------------------------------------
curl -XGET 'localhost:9200/_search?scroll=1m' -d '
GET /_search?scroll=1m
{
"sort": [
"_doc"
]
}
'
--------------------------------------------------
// CONSOLE
// TEST[setup:twitter]
[[scroll-search-context]]
==== Keeping the search context alive
@ -130,8 +125,9 @@ You can check how many search contexts are open with the
[source,js]
---------------------------------------
curl -XGET localhost:9200/_nodes/stats/indices/search?pretty
GET /_nodes/stats/indices/search
---------------------------------------
// CONSOLE
==== Clear scroll API
@ -143,37 +139,46 @@ cleared as soon as the scroll is not being used anymore using the
[source,js]
---------------------------------------
curl -XDELETE localhost:9200/_search/scroll -d '
DELETE /_search/scroll
{
"scroll_id" : ["c2Nhbjs2OzM0NDg1ODpzRlBLc0FXNlNyNm5JWUc1"]
}'
"scroll_id" : ["DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ=="]
}
---------------------------------------
// CONSOLE
// TEST[catch:missing]
Multiple scroll IDs can be passed as array:
[source,js]
---------------------------------------
curl -XDELETE localhost:9200/_search/scroll -d '
DELETE /_search/scroll
{
"scroll_id" : ["c2Nhbjs2OzM0NDg1ODpzRlBLc0FXNlNyNm5JWUc1", "aGVuRmV0Y2g7NTsxOnkxaDZ"]
}'
"scroll_id" : [
"DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==",
"DnF1ZXJ5VGhlbkZldGNoBQAAAAAAAAABFmtSWWRRWUJrU2o2ZExpSGJCVmQxYUEAAAAAAAAAAxZrUllkUVlCa1NqNmRMaUhiQlZkMWFBAAAAAAAAAAIWa1JZZFFZQmtTajZkTGlIYkJWZDFhQQAAAAAAAAAFFmtSWWRRWUJrU2o2ZExpSGJCVmQxYUEAAAAAAAAABBZrUllkUVlCa1NqNmRMaUhiQlZkMWFB"
]
}
---------------------------------------
// CONSOLE
// TEST[catch:missing]
All search contexts can be cleared with the `_all` parameter:
[source,js]
---------------------------------------
curl -XDELETE localhost:9200/_search/scroll/_all
DELETE /_search/scroll/_all
---------------------------------------
// CONSOLE
The `scroll_id` can also be passed as a query string parameter or in the request body.
Multiple scroll IDs can be passed as comma separated values:
[source,js]
---------------------------------------
curl -XDELETE localhost:9200/_search/scroll \
-d 'c2Nhbjs2OzM0NDg1ODpzRlBLc0FXNlNyNm5JWUc1,aGVuRmV0Y2g7NTsxOnkxaDZ'
DELETE /_search/scroll/DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==,DnF1ZXJ5VGhlbkZldGNoBQAAAAAAAAABFmtSWWRRWUJrU2o2ZExpSGJCVmQxYUEAAAAAAAAAAxZrUllkUVlCa1NqNmRMaUhiQlZkMWFBAAAAAAAAAAIWa1JZZFFZQmtTajZkTGlIYkJWZDFhQQAAAAAAAAAFFmtSWWRRWUJrU2o2ZExpSGJCVmQxYUEAAAAAAAAABBZrUllkUVlCa1NqNmRMaUhiQlZkMWFB
---------------------------------------
// CONSOLE
// TEST[catch:missing]
[[sliced-scroll]]
==== Sliced Scroll

View File

@ -82,8 +82,6 @@ class Chronology -> java.time.chrono.Chronology extends Comparable {
ChronoLocalDate date(Era,int,int,int)
ChronoLocalDate date(int,int,int)
ChronoLocalDate dateEpochDay(long)
ChronoLocalDate dateNow()
ChronoLocalDate dateNow(ZoneId)
ChronoLocalDate dateYearDay(Era,int,int)
ChronoLocalDate dateYearDay(int,int)
boolean equals(Object)
@ -171,8 +169,6 @@ class HijrahChronology -> java.time.chrono.HijrahChronology extends AbstractChro
HijrahDate date(int,int,int)
HijrahDate date(Era,int,int,int)
HijrahDate dateEpochDay(long)
HijrahDate dateNow()
HijrahDate dateNow(ZoneId)
HijrahDate dateYearDay(int,int)
HijrahDate dateYearDay(Era,int,int)
HijrahEra eraOf(int)
@ -185,8 +181,6 @@ class HijrahDate -> java.time.chrono.HijrahDate extends ChronoLocalDate,Temporal
HijrahEra getEra()
HijrahDate minus(TemporalAmount)
HijrahDate minus(long,TemporalUnit)
HijrahDate now()
HijrahDate now(ZoneId)
HijrahDate of(int,int,int)
HijrahDate plus(TemporalAmount)
HijrahDate plus(long,TemporalUnit)
@ -201,8 +195,6 @@ class IsoChronology -> java.time.chrono.IsoChronology extends AbstractChronology
LocalDate date(int,int,int)
LocalDate date(Era,int,int,int)
LocalDate dateEpochDay(long)
LocalDate dateNow()
LocalDate dateNow(ZoneId)
LocalDate dateYearDay(int,int)
LocalDate dateYearDay(Era,int,int)
IsoEra eraOf(int)
@ -219,8 +211,6 @@ class JapaneseChronology -> java.time.chrono.JapaneseChronology extends Abstract
JapaneseDate date(int,int,int)
JapaneseDate date(Era,int,int,int)
JapaneseDate dateEpochDay(long)
JapaneseDate dateNow()
JapaneseDate dateNow(ZoneId)
JapaneseDate dateYearDay(int,int)
JapaneseDate dateYearDay(Era,int,int)
JapaneseEra eraOf(int)
@ -228,8 +218,6 @@ class JapaneseChronology -> java.time.chrono.JapaneseChronology extends Abstract
}
class JapaneseDate -> java.time.chrono.JapaneseDate extends ChronoLocalDate,Temporal,TemporalAccessor,TemporalAdjuster,Comparable,Object {
JapaneseDate now()
JapaneseDate now(ZoneId)
JapaneseDate of(int,int,int)
JapaneseDate from(TemporalAccessor)
JapaneseChronology getChronology()
@ -259,8 +247,6 @@ class MinguoChronology -> java.time.chrono.MinguoChronology extends AbstractChro
MinguoDate date(int,int,int)
MinguoDate date(Era,int,int,int)
MinguoDate dateEpochDay(long)
MinguoDate dateNow()
MinguoDate dateNow(ZoneId)
MinguoDate dateYearDay(int,int)
MinguoDate dateYearDay(Era,int,int)
MinguoEra eraOf(int)
@ -268,8 +254,6 @@ class MinguoChronology -> java.time.chrono.MinguoChronology extends AbstractChro
}
class MinguoDate -> java.time.chrono.MinguoDate extends ChronoLocalDate,Temporal,TemporalAccessor,TemporalAdjuster,Comparable,Object {
MinguoDate now()
MinguoDate now(ZoneId)
MinguoDate of(int,int,int)
MinguoDate from(TemporalAccessor)
MinguoChronology getChronology()
@ -288,8 +272,6 @@ class ThaiBuddhistChronology -> java.time.chrono.ThaiBuddhistChronology extends
ThaiBuddhistDate date(int,int,int)
ThaiBuddhistDate date(Era,int,int,int)
ThaiBuddhistDate dateEpochDay(long)
ThaiBuddhistDate dateNow()
ThaiBuddhistDate dateNow(ZoneId)
ThaiBuddhistDate dateYearDay(int,int)
ThaiBuddhistDate dateYearDay(Era,int,int)
ThaiBuddhistEra eraOf(int)
@ -297,8 +279,6 @@ class ThaiBuddhistChronology -> java.time.chrono.ThaiBuddhistChronology extends
}
class ThaiBuddhistDate -> java.time.chrono.ThaiBuddhistDate extends ChronoLocalDate,Temporal,TemporalAccessor,TemporalAdjuster,Comparable,Object {
ThaiBuddhistDate now()
ThaiBuddhistDate now(ZoneId)
ThaiBuddhistDate of(int,int,int)
ThaiBuddhistDate from(TemporalAccessor)
ThaiBuddhistChronology getChronology()

View File

@ -103,8 +103,6 @@ class Instant -> java.time.Instant extends Comparable,Temporal,TemporalAccessor,
Instant minusMillis(long)
Instant minusNanos(long)
Instant minusSeconds(long)
Instant now()
Instant now(Clock)
Instant ofEpochSecond(long)
Instant ofEpochSecond(long,long)
Instant ofEpochMilli(long)
@ -143,8 +141,6 @@ class LocalDate -> java.time.LocalDate extends ChronoLocalDate,Temporal,Temporal
LocalDate minusMonths(long)
LocalDate minusWeeks(long)
LocalDate minusDays(long)
LocalDate now()
LocalDate now(ZoneId)
LocalDate of(int,int,int)
LocalDate ofYearDay(int,int)
LocalDate ofEpochDay(long)
@ -191,8 +187,6 @@ class LocalDateTime -> java.time.LocalDateTime extends ChronoLocalDateTime,Tempo
LocalDateTime minusSeconds(long)
LocalDateTime minusWeeks(long)
LocalDateTime minusYears(long)
LocalDateTime now()
LocalDateTime now(ZoneId)
LocalDateTime of(LocalDate,LocalTime)
LocalDateTime of(int,int,int,int,int)
LocalDateTime of(int,int,int,int,int,int)
@ -246,8 +240,6 @@ class LocalTime -> java.time.LocalTime extends Temporal,TemporalAccessor,Tempora
LocalTime minusMinutes(long)
LocalTime minusNanos(long)
LocalTime minusSeconds(long)
LocalTime now()
LocalTime now(ZoneId)
LocalTime of(int,int)
LocalTime of(int,int,int)
LocalTime of(int,int,int,int)
@ -283,8 +275,6 @@ class MonthDay -> java.time.MonthDay extends TemporalAccessor,TemporalAdjuster,C
boolean isAfter(MonthDay)
boolean isBefore(MonthDay)
boolean isValidYear(int)
MonthDay now()
MonthDay now(ZoneId)
MonthDay of(int,int)
MonthDay parse(CharSequence)
MonthDay parse(CharSequence,DateTimeFormatter)
@ -325,8 +315,6 @@ class OffsetDateTime -> java.time.OffsetDateTime extends Temporal,TemporalAccess
OffsetDateTime minusMinutes(long)
OffsetDateTime minusSeconds(long)
OffsetDateTime minusNanos(long)
OffsetDateTime now()
OffsetDateTime now(ZoneId)
OffsetDateTime of(LocalDate,LocalTime,ZoneOffset)
OffsetDateTime of(LocalDateTime,ZoneOffset)
OffsetDateTime of(int,int,int,int,int,int,int,ZoneOffset)
@ -380,8 +368,6 @@ class OffsetTime -> java.time.OffsetTime extends Temporal,TemporalAccessor,Tempo
boolean isAfter(OffsetTime)
boolean isBefore(OffsetTime)
boolean isEqual(OffsetTime)
OffsetTime now()
OffsetTime now(ZoneId)
OffsetTime of(LocalTime,ZoneOffset)
OffsetTime of(int,int,int,int,ZoneOffset)
OffsetTime ofInstant(Instant,ZoneId)
@ -460,8 +446,6 @@ class Year -> java.time.Year extends Temporal,TemporalAccessor,TemporalAdjuster,
Year minus(TemporalAmount)
Year minus(long,TemporalUnit)
Year minusYears(long)
Year now()
Year now(ZoneId)
Year of(int)
Year parse(CharSequence)
Year parse(CharSequence,DateTimeFormatter)
@ -491,8 +475,6 @@ class YearMonth -> java.time.YearMonth extends Temporal,TemporalAccessor,Tempora
YearMonth minus(long,TemporalUnit)
YearMonth minusYears(long)
YearMonth minusMonths(long)
YearMonth now()
YearMonth now(ZoneId)
YearMonth of(int,int)
YearMonth parse(CharSequence)
YearMonth parse(CharSequence,DateTimeFormatter)
@ -530,8 +512,6 @@ class ZonedDateTime -> java.time.ZonedDateTime extends ChronoZonedDateTime,Tempo
ZonedDateTime minusMinutes(long)
ZonedDateTime minusSeconds(long)
ZonedDateTime minusNanos(long)
ZonedDateTime now()
ZonedDateTime now(ZoneId)
ZonedDateTime of(LocalDate,LocalTime,ZoneId)
ZonedDateTime of(LocalDateTime,ZoneId)
ZonedDateTime of(int,int,int,int,int,int,int,ZoneId)

View File

@ -23,10 +23,10 @@ esplugin {
}
dependencies {
compile ('com.maxmind.geoip2:geoip2:2.7.0')
compile ('com.maxmind.geoip2:geoip2:2.8.0')
// geoip2 dependencies:
compile('com.fasterxml.jackson.core:jackson-annotations:2.7.1')
compile('com.fasterxml.jackson.core:jackson-databind:2.7.1')
compile('com.fasterxml.jackson.core:jackson-annotations:2.8.2')
compile('com.fasterxml.jackson.core:jackson-databind:2.8.2')
compile('com.maxmind.db:maxmind-db:1.2.1')
testCompile 'org.elasticsearch:geolite2-databases:20160608'
@ -50,14 +50,19 @@ bundlePlugin {
}
thirdPartyAudit.excludes = [
// geoip WebServiceClient needs Google http client, but we're not using WebServiceClient:
'com.google.api.client.http.HttpTransport',
'com.google.api.client.http.GenericUrl',
'com.google.api.client.http.HttpResponse',
'com.google.api.client.http.HttpRequestFactory',
'com.google.api.client.http.HttpRequest',
'com.google.api.client.http.HttpHeaders',
'com.google.api.client.http.HttpResponseException',
'com.google.api.client.http.javanet.NetHttpTransport',
'com.google.api.client.http.javanet.NetHttpTransport',
// geoip WebServiceClient needs apache http client, but we're not using WebServiceClient:
'org.apache.http.HttpEntity',
'org.apache.http.HttpHost',
'org.apache.http.HttpResponse',
'org.apache.http.StatusLine',
'org.apache.http.auth.UsernamePasswordCredentials',
'org.apache.http.client.config.RequestConfig$Builder',
'org.apache.http.client.config.RequestConfig',
'org.apache.http.client.methods.CloseableHttpResponse',
'org.apache.http.client.methods.HttpGet',
'org.apache.http.client.utils.URIBuilder',
'org.apache.http.impl.auth.BasicScheme',
'org.apache.http.impl.client.CloseableHttpClient',
'org.apache.http.impl.client.HttpClientBuilder',
'org.apache.http.util.EntityUtils'
]

View File

@ -1 +0,0 @@
2010d922191f5801939b462a5703ab79a7829626

View File

@ -0,0 +1 @@
46226778ec32b776e80f282c5bf65b88d36cc0a0

View File

@ -1 +0,0 @@
8b93f301823b79033fcbe873779b3d84f9730fc1

View File

@ -0,0 +1 @@
a38d544583e90cf163b2e45e4a57f5c54de670d3

View File

@ -1 +0,0 @@
14d88822bca655de7aa6ed3e4c498d115505710a

View File

@ -0,0 +1 @@
1f12816593c1422be957471c98a80bfbace60fa2

View File

@ -39,7 +39,9 @@ public class LongGCDisruption extends SingleNodeDisruption {
private static final Pattern[] unsafeClasses = new Pattern[]{
// logging has shared JVM locks - we may suspend a thread and block other nodes from doing their thing
Pattern.compile("logging\\.log4j")
Pattern.compile("logging\\.log4j"),
// security manager is shared across all nodes AND it uses synced hashmaps interanlly
Pattern.compile("java\\.lang\\.SecurityManager")
};
protected final String disruptedNode;