Merge branch 'master' into docs/add_console_to_highlighting
This commit is contained in:
commit
611ece6127
|
@ -517,7 +517,6 @@
|
||||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]processor[/\\]ConvertProcessor.java" checks="LineLength" />
|
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]processor[/\\]ConvertProcessor.java" checks="LineLength" />
|
||||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]jvm[/\\]GcNames.java" checks="LineLength" />
|
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]jvm[/\\]GcNames.java" checks="LineLength" />
|
||||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]jvm[/\\]HotThreads.java" checks="LineLength" />
|
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]jvm[/\\]HotThreads.java" checks="LineLength" />
|
||||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]jvm[/\\]JvmGcMonitorService.java" checks="LineLength" />
|
|
||||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]jvm[/\\]JvmStats.java" checks="LineLength" />
|
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]jvm[/\\]JvmStats.java" checks="LineLength" />
|
||||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]node[/\\]Node.java" checks="LineLength" />
|
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]node[/\\]Node.java" checks="LineLength" />
|
||||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]node[/\\]internal[/\\]InternalSettingsPreparer.java" checks="LineLength" />
|
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]node[/\\]internal[/\\]InternalSettingsPreparer.java" checks="LineLength" />
|
||||||
|
|
|
@ -249,7 +249,7 @@ public class TransportClusterAllocationExplainAction
|
||||||
protected void masterOperation(final ClusterAllocationExplainRequest request, final ClusterState state,
|
protected void masterOperation(final ClusterAllocationExplainRequest request, final ClusterState state,
|
||||||
final ActionListener<ClusterAllocationExplainResponse> listener) {
|
final ActionListener<ClusterAllocationExplainResponse> listener) {
|
||||||
final RoutingNodes routingNodes = state.getRoutingNodes();
|
final RoutingNodes routingNodes = state.getRoutingNodes();
|
||||||
final RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, state.nodes(),
|
final RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, state,
|
||||||
clusterInfoService.getClusterInfo(), System.nanoTime());
|
clusterInfoService.getClusterInfo(), System.nanoTime());
|
||||||
|
|
||||||
ShardRouting foundShard = null;
|
ShardRouting foundShard = null;
|
||||||
|
|
|
@ -54,10 +54,6 @@ import java.util.function.Predicate;
|
||||||
*/
|
*/
|
||||||
public class RoutingNodes implements Iterable<RoutingNode> {
|
public class RoutingNodes implements Iterable<RoutingNode> {
|
||||||
|
|
||||||
private final MetaData metaData;
|
|
||||||
|
|
||||||
private final ClusterBlocks blocks;
|
|
||||||
|
|
||||||
private final RoutingTable routingTable;
|
private final RoutingTable routingTable;
|
||||||
|
|
||||||
private final Map<String, RoutingNode> nodesToShards = new HashMap<>();
|
private final Map<String, RoutingNode> nodesToShards = new HashMap<>();
|
||||||
|
@ -66,8 +62,6 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
||||||
|
|
||||||
private final Map<ShardId, List<ShardRouting>> assignedShards = new HashMap<>();
|
private final Map<ShardId, List<ShardRouting>> assignedShards = new HashMap<>();
|
||||||
|
|
||||||
private final ImmutableOpenMap<String, ClusterState.Custom> customs;
|
|
||||||
|
|
||||||
private final boolean readOnly;
|
private final boolean readOnly;
|
||||||
|
|
||||||
private int inactivePrimaryCount = 0;
|
private int inactivePrimaryCount = 0;
|
||||||
|
@ -85,10 +79,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
||||||
|
|
||||||
public RoutingNodes(ClusterState clusterState, boolean readOnly) {
|
public RoutingNodes(ClusterState clusterState, boolean readOnly) {
|
||||||
this.readOnly = readOnly;
|
this.readOnly = readOnly;
|
||||||
this.metaData = clusterState.metaData();
|
|
||||||
this.blocks = clusterState.blocks();
|
|
||||||
this.routingTable = clusterState.routingTable();
|
this.routingTable = clusterState.routingTable();
|
||||||
this.customs = clusterState.customs();
|
|
||||||
|
|
||||||
Map<String, LinkedHashMap<ShardId, ShardRouting>> nodesToShards = new HashMap<>();
|
Map<String, LinkedHashMap<ShardId, ShardRouting>> nodesToShards = new HashMap<>();
|
||||||
// fill in the nodeToShards with the "live" nodes
|
// fill in the nodeToShards with the "live" nodes
|
||||||
|
@ -232,28 +223,6 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
||||||
return routingTable();
|
return routingTable();
|
||||||
}
|
}
|
||||||
|
|
||||||
public MetaData metaData() {
|
|
||||||
return this.metaData;
|
|
||||||
}
|
|
||||||
|
|
||||||
public MetaData getMetaData() {
|
|
||||||
return metaData();
|
|
||||||
}
|
|
||||||
|
|
||||||
public ClusterBlocks blocks() {
|
|
||||||
return this.blocks;
|
|
||||||
}
|
|
||||||
|
|
||||||
public ClusterBlocks getBlocks() {
|
|
||||||
return this.blocks;
|
|
||||||
}
|
|
||||||
|
|
||||||
public ImmutableOpenMap<String, ClusterState.Custom> customs() {
|
|
||||||
return this.customs;
|
|
||||||
}
|
|
||||||
|
|
||||||
public <T extends ClusterState.Custom> T custom(String type) { return (T) customs.get(type); }
|
|
||||||
|
|
||||||
public UnassignedShards unassigned() {
|
public UnassignedShards unassigned() {
|
||||||
return this.unassignedShards;
|
return this.unassignedShards;
|
||||||
}
|
}
|
||||||
|
|
|
@ -90,7 +90,7 @@ public class AllocationService extends AbstractComponent {
|
||||||
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
|
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
|
||||||
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
|
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
|
||||||
routingNodes.unassigned().shuffle();
|
routingNodes.unassigned().shuffle();
|
||||||
StartedRerouteAllocation allocation = new StartedRerouteAllocation(allocationDeciders, routingNodes, clusterState.nodes(), startedShards, clusterInfoService.getClusterInfo());
|
StartedRerouteAllocation allocation = new StartedRerouteAllocation(allocationDeciders, routingNodes, clusterState, startedShards, clusterInfoService.getClusterInfo());
|
||||||
boolean changed = applyStartedShards(routingNodes, startedShards);
|
boolean changed = applyStartedShards(routingNodes, startedShards);
|
||||||
if (!changed) {
|
if (!changed) {
|
||||||
return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
|
return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
|
||||||
|
@ -216,7 +216,7 @@ public class AllocationService extends AbstractComponent {
|
||||||
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
|
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
|
||||||
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
|
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
|
||||||
routingNodes.unassigned().shuffle();
|
routingNodes.unassigned().shuffle();
|
||||||
FailedRerouteAllocation allocation = new FailedRerouteAllocation(allocationDeciders, routingNodes, clusterState.nodes(), failedShards, clusterInfoService.getClusterInfo());
|
FailedRerouteAllocation allocation = new FailedRerouteAllocation(allocationDeciders, routingNodes, clusterState, failedShards, clusterInfoService.getClusterInfo());
|
||||||
boolean changed = false;
|
boolean changed = false;
|
||||||
// as failing primaries also fail associated replicas, we fail replicas first here so that their nodes are added to ignore list
|
// as failing primaries also fail associated replicas, we fail replicas first here so that their nodes are added to ignore list
|
||||||
List<FailedRerouteAllocation.FailedShard> orderedFailedShards = new ArrayList<>(failedShards);
|
List<FailedRerouteAllocation.FailedShard> orderedFailedShards = new ArrayList<>(failedShards);
|
||||||
|
@ -266,7 +266,7 @@ public class AllocationService extends AbstractComponent {
|
||||||
// we don't shuffle the unassigned shards here, to try and get as close as possible to
|
// we don't shuffle the unassigned shards here, to try and get as close as possible to
|
||||||
// a consistent result of the effect the commands have on the routing
|
// a consistent result of the effect the commands have on the routing
|
||||||
// this allows systems to dry run the commands, see the resulting cluster state, and act on it
|
// this allows systems to dry run the commands, see the resulting cluster state, and act on it
|
||||||
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState.nodes(), clusterInfoService.getClusterInfo(), currentNanoTime());
|
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState, clusterInfoService.getClusterInfo(), currentNanoTime());
|
||||||
// don't short circuit deciders, we want a full explanation
|
// don't short circuit deciders, we want a full explanation
|
||||||
allocation.debugDecision(true);
|
allocation.debugDecision(true);
|
||||||
// we ignore disable allocation, because commands are explicit
|
// we ignore disable allocation, because commands are explicit
|
||||||
|
@ -305,7 +305,7 @@ public class AllocationService extends AbstractComponent {
|
||||||
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
|
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
|
||||||
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
|
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
|
||||||
routingNodes.unassigned().shuffle();
|
routingNodes.unassigned().shuffle();
|
||||||
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState.nodes(), clusterInfoService.getClusterInfo(), currentNanoTime());
|
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState, clusterInfoService.getClusterInfo(), currentNanoTime());
|
||||||
allocation.debugDecision(debug);
|
allocation.debugDecision(debug);
|
||||||
if (!reroute(allocation)) {
|
if (!reroute(allocation)) {
|
||||||
return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
|
return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
|
||||||
|
|
|
@ -21,7 +21,7 @@ package org.elasticsearch.cluster.routing.allocation;
|
||||||
|
|
||||||
import org.elasticsearch.ExceptionsHelper;
|
import org.elasticsearch.ExceptionsHelper;
|
||||||
import org.elasticsearch.cluster.ClusterInfo;
|
import org.elasticsearch.cluster.ClusterInfo;
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
||||||
|
@ -57,8 +57,8 @@ public class FailedRerouteAllocation extends RoutingAllocation {
|
||||||
|
|
||||||
private final List<FailedShard> failedShards;
|
private final List<FailedShard> failedShards;
|
||||||
|
|
||||||
public FailedRerouteAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, DiscoveryNodes nodes, List<FailedShard> failedShards, ClusterInfo clusterInfo) {
|
public FailedRerouteAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState, List<FailedShard> failedShards, ClusterInfo clusterInfo) {
|
||||||
super(deciders, routingNodes, nodes, clusterInfo, System.nanoTime());
|
super(deciders, routingNodes, clusterState, clusterInfo, System.nanoTime());
|
||||||
this.failedShards = failedShards;
|
this.failedShards = failedShards;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -20,12 +20,14 @@
|
||||||
package org.elasticsearch.cluster.routing.allocation;
|
package org.elasticsearch.cluster.routing.allocation;
|
||||||
|
|
||||||
import org.elasticsearch.cluster.ClusterInfo;
|
import org.elasticsearch.cluster.ClusterInfo;
|
||||||
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
import org.elasticsearch.cluster.metadata.MetaData;
|
import org.elasticsearch.cluster.metadata.MetaData;
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||||
import org.elasticsearch.cluster.routing.RoutingTable;
|
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
||||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||||
|
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||||
import org.elasticsearch.index.shard.ShardId;
|
import org.elasticsearch.index.shard.ShardId;
|
||||||
|
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
|
@ -118,8 +120,12 @@ public class RoutingAllocation {
|
||||||
|
|
||||||
private final RoutingNodes routingNodes;
|
private final RoutingNodes routingNodes;
|
||||||
|
|
||||||
|
private final MetaData metaData;
|
||||||
|
|
||||||
private final DiscoveryNodes nodes;
|
private final DiscoveryNodes nodes;
|
||||||
|
|
||||||
|
private final ImmutableOpenMap<String, ClusterState.Custom> customs;
|
||||||
|
|
||||||
private final AllocationExplanation explanation = new AllocationExplanation();
|
private final AllocationExplanation explanation = new AllocationExplanation();
|
||||||
|
|
||||||
private final ClusterInfo clusterInfo;
|
private final ClusterInfo clusterInfo;
|
||||||
|
@ -139,13 +145,15 @@ public class RoutingAllocation {
|
||||||
* Creates a new {@link RoutingAllocation}
|
* Creates a new {@link RoutingAllocation}
|
||||||
* @param deciders {@link AllocationDeciders} to used to make decisions for routing allocations
|
* @param deciders {@link AllocationDeciders} to used to make decisions for routing allocations
|
||||||
* @param routingNodes Routing nodes in the current cluster
|
* @param routingNodes Routing nodes in the current cluster
|
||||||
* @param nodes TODO: Documentation
|
* @param clusterState cluster state before rerouting
|
||||||
* @param currentNanoTime the nano time to use for all delay allocation calculation (typically {@link System#nanoTime()})
|
* @param currentNanoTime the nano time to use for all delay allocation calculation (typically {@link System#nanoTime()})
|
||||||
*/
|
*/
|
||||||
public RoutingAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, DiscoveryNodes nodes, ClusterInfo clusterInfo, long currentNanoTime) {
|
public RoutingAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState, ClusterInfo clusterInfo, long currentNanoTime) {
|
||||||
this.deciders = deciders;
|
this.deciders = deciders;
|
||||||
this.routingNodes = routingNodes;
|
this.routingNodes = routingNodes;
|
||||||
this.nodes = nodes;
|
this.metaData = clusterState.metaData();
|
||||||
|
this.nodes = clusterState.nodes();
|
||||||
|
this.customs = clusterState.customs();
|
||||||
this.clusterInfo = clusterInfo;
|
this.clusterInfo = clusterInfo;
|
||||||
this.currentNanoTime = currentNanoTime;
|
this.currentNanoTime = currentNanoTime;
|
||||||
}
|
}
|
||||||
|
@ -184,7 +192,7 @@ public class RoutingAllocation {
|
||||||
* @return Metadata of routing nodes
|
* @return Metadata of routing nodes
|
||||||
*/
|
*/
|
||||||
public MetaData metaData() {
|
public MetaData metaData() {
|
||||||
return routingNodes.metaData();
|
return metaData;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -199,6 +207,10 @@ public class RoutingAllocation {
|
||||||
return clusterInfo;
|
return clusterInfo;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public <T extends ClusterState.Custom> T custom(String key) {
|
||||||
|
return (T)customs.get(key);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get explanations of current routing
|
* Get explanations of current routing
|
||||||
* @return explanation of routing
|
* @return explanation of routing
|
||||||
|
|
|
@ -20,7 +20,7 @@
|
||||||
package org.elasticsearch.cluster.routing.allocation;
|
package org.elasticsearch.cluster.routing.allocation;
|
||||||
|
|
||||||
import org.elasticsearch.cluster.ClusterInfo;
|
import org.elasticsearch.cluster.ClusterInfo;
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
||||||
|
@ -35,8 +35,8 @@ public class StartedRerouteAllocation extends RoutingAllocation {
|
||||||
|
|
||||||
private final List<? extends ShardRouting> startedShards;
|
private final List<? extends ShardRouting> startedShards;
|
||||||
|
|
||||||
public StartedRerouteAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, DiscoveryNodes nodes, List<? extends ShardRouting> startedShards, ClusterInfo clusterInfo) {
|
public StartedRerouteAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState, List<? extends ShardRouting> startedShards, ClusterInfo clusterInfo) {
|
||||||
super(deciders, routingNodes, nodes, clusterInfo, System.nanoTime());
|
super(deciders, routingNodes, clusterState, clusterInfo, System.nanoTime());
|
||||||
this.startedShards = startedShards;
|
this.startedShards = startedShards;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -47,4 +47,4 @@ public class StartedRerouteAllocation extends RoutingAllocation {
|
||||||
public List<? extends ShardRouting> startedShards() {
|
public List<? extends ShardRouting> startedShards() {
|
||||||
return startedShards;
|
return startedShards;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -225,7 +225,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
||||||
this.weight = weight;
|
this.weight = weight;
|
||||||
this.threshold = threshold;
|
this.threshold = threshold;
|
||||||
this.routingNodes = allocation.routingNodes();
|
this.routingNodes = allocation.routingNodes();
|
||||||
metaData = routingNodes.metaData();
|
this.metaData = allocation.metaData();
|
||||||
avgShardsPerNode = ((float) metaData.getTotalNumberOfShards()) / routingNodes.size();
|
avgShardsPerNode = ((float) metaData.getTotalNumberOfShards()) / routingNodes.size();
|
||||||
buildModelFromAssigned();
|
buildModelFromAssigned();
|
||||||
}
|
}
|
||||||
|
|
|
@ -137,7 +137,7 @@ public class EnableAllocationDecider extends AllocationDecider {
|
||||||
return allocation.decision(Decision.YES, NAME, "allocation is explicitly ignoring any disabling of relocation");
|
return allocation.decision(Decision.YES, NAME, "allocation is explicitly ignoring any disabling of relocation");
|
||||||
}
|
}
|
||||||
|
|
||||||
Settings indexSettings = allocation.routingNodes().metaData().getIndexSafe(shardRouting.index()).getSettings();
|
Settings indexSettings = allocation.metaData().getIndexSafe(shardRouting.index()).getSettings();
|
||||||
final Rebalance enable;
|
final Rebalance enable;
|
||||||
if (INDEX_ROUTING_REBALANCE_ENABLE_SETTING.exists(indexSettings)) {
|
if (INDEX_ROUTING_REBALANCE_ENABLE_SETTING.exists(indexSettings)) {
|
||||||
enable = INDEX_ROUTING_REBALANCE_ENABLE_SETTING.get(indexSettings);
|
enable = INDEX_ROUTING_REBALANCE_ENABLE_SETTING.get(indexSettings);
|
||||||
|
|
|
@ -105,7 +105,7 @@ public class FilterAllocationDecider extends AllocationDecider {
|
||||||
Decision decision = shouldClusterFilter(node, allocation);
|
Decision decision = shouldClusterFilter(node, allocation);
|
||||||
if (decision != null) return decision;
|
if (decision != null) return decision;
|
||||||
|
|
||||||
decision = shouldIndexFilter(allocation.routingNodes().metaData().getIndexSafe(shardRouting.index()), node, allocation);
|
decision = shouldIndexFilter(allocation.metaData().getIndexSafe(shardRouting.index()), node, allocation);
|
||||||
if (decision != null) return decision;
|
if (decision != null) return decision;
|
||||||
|
|
||||||
return allocation.decision(Decision.YES, NAME, "node passes include/exclude/require filters");
|
return allocation.decision(Decision.YES, NAME, "node passes include/exclude/require filters");
|
||||||
|
|
|
@ -86,7 +86,7 @@ public class ShardsLimitAllocationDecider extends AllocationDecider {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
|
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
|
||||||
IndexMetaData indexMd = allocation.routingNodes().metaData().getIndexSafe(shardRouting.index());
|
IndexMetaData indexMd = allocation.metaData().getIndexSafe(shardRouting.index());
|
||||||
final int indexShardLimit = INDEX_TOTAL_SHARDS_PER_NODE_SETTING.get(indexMd.getSettings(), settings);
|
final int indexShardLimit = INDEX_TOTAL_SHARDS_PER_NODE_SETTING.get(indexMd.getSettings(), settings);
|
||||||
// Capture the limit here in case it changes during this method's
|
// Capture the limit here in case it changes during this method's
|
||||||
// execution
|
// execution
|
||||||
|
@ -125,7 +125,7 @@ public class ShardsLimitAllocationDecider extends AllocationDecider {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Decision canRemain(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
|
public Decision canRemain(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
|
||||||
IndexMetaData indexMd = allocation.routingNodes().metaData().getIndexSafe(shardRouting.index());
|
IndexMetaData indexMd = allocation.metaData().getIndexSafe(shardRouting.index());
|
||||||
final int indexShardLimit = INDEX_TOTAL_SHARDS_PER_NODE_SETTING.get(indexMd.getSettings(), settings);
|
final int indexShardLimit = INDEX_TOTAL_SHARDS_PER_NODE_SETTING.get(indexMd.getSettings(), settings);
|
||||||
// Capture the limit here in case it changes during this method's
|
// Capture the limit here in case it changes during this method's
|
||||||
// execution
|
// execution
|
||||||
|
|
|
@ -98,7 +98,7 @@ public class SnapshotInProgressAllocationDecider extends AllocationDecider {
|
||||||
if (!enableRelocation && shardRouting.primary()) {
|
if (!enableRelocation && shardRouting.primary()) {
|
||||||
// Only primary shards are snapshotted
|
// Only primary shards are snapshotted
|
||||||
|
|
||||||
SnapshotsInProgress snapshotsInProgress = allocation.routingNodes().custom(SnapshotsInProgress.TYPE);
|
SnapshotsInProgress snapshotsInProgress = allocation.custom(SnapshotsInProgress.TYPE);
|
||||||
if (snapshotsInProgress == null) {
|
if (snapshotsInProgress == null) {
|
||||||
// Snapshots are not running
|
// Snapshots are not running
|
||||||
return allocation.decision(Decision.YES, NAME, "no snapshots are currently running");
|
return allocation.decision(Decision.YES, NAME, "no snapshots are currently running");
|
||||||
|
|
|
@ -84,7 +84,7 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
||||||
public boolean allocateUnassigned(RoutingAllocation allocation) {
|
public boolean allocateUnassigned(RoutingAllocation allocation) {
|
||||||
boolean changed = false;
|
boolean changed = false;
|
||||||
final RoutingNodes routingNodes = allocation.routingNodes();
|
final RoutingNodes routingNodes = allocation.routingNodes();
|
||||||
final MetaData metaData = routingNodes.metaData();
|
final MetaData metaData = allocation.metaData();
|
||||||
|
|
||||||
final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator();
|
final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator();
|
||||||
while (unassignedIterator.hasNext()) {
|
while (unassignedIterator.hasNext()) {
|
||||||
|
|
|
@ -0,0 +1,137 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.ingest.processor;
|
||||||
|
|
||||||
|
import org.elasticsearch.common.ParseField;
|
||||||
|
import org.elasticsearch.ingest.core.AbstractProcessor;
|
||||||
|
import org.elasticsearch.ingest.core.AbstractProcessorFactory;
|
||||||
|
import org.elasticsearch.ingest.core.IngestDocument;
|
||||||
|
import org.elasticsearch.ingest.core.ConfigurationUtils;
|
||||||
|
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.Comparator;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Processor that sorts an array of items.
|
||||||
|
* Throws exception is the specified field is not an array.
|
||||||
|
*/
|
||||||
|
public final class SortProcessor extends AbstractProcessor {
|
||||||
|
|
||||||
|
public static final String TYPE = "sort";
|
||||||
|
public static final String FIELD = "field";
|
||||||
|
public static final String ORDER = "order";
|
||||||
|
public static final String DEFAULT_ORDER = "asc";
|
||||||
|
|
||||||
|
public enum SortOrder {
|
||||||
|
ASCENDING("asc"), DESCENDING("desc");
|
||||||
|
|
||||||
|
private final String direction;
|
||||||
|
|
||||||
|
SortOrder(String direction) {
|
||||||
|
this.direction = direction;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String toString() {
|
||||||
|
return this.direction;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static SortOrder fromString(String value) {
|
||||||
|
if (value == null) {
|
||||||
|
throw new IllegalArgumentException("Sort direction cannot be null");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (value.equals(ASCENDING.toString())) {
|
||||||
|
return ASCENDING;
|
||||||
|
} else if (value.equals(DESCENDING.toString())) {
|
||||||
|
return DESCENDING;
|
||||||
|
}
|
||||||
|
throw new IllegalArgumentException("Sort direction [" + value + "] not recognized."
|
||||||
|
+ " Valid values are: [asc, desc]");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private final String field;
|
||||||
|
private final SortOrder order;
|
||||||
|
|
||||||
|
SortProcessor(String tag, String field, SortOrder order) {
|
||||||
|
super(tag);
|
||||||
|
this.field = field;
|
||||||
|
this.order = order;
|
||||||
|
}
|
||||||
|
|
||||||
|
String getField() {
|
||||||
|
return field;
|
||||||
|
}
|
||||||
|
|
||||||
|
SortOrder getOrder() {
|
||||||
|
return order;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
@SuppressWarnings("unchecked")
|
||||||
|
public void execute(IngestDocument document) {
|
||||||
|
List<? extends Comparable> list = document.getFieldValue(field, List.class);
|
||||||
|
|
||||||
|
if (list == null) {
|
||||||
|
throw new IllegalArgumentException("field [" + field + "] is null, cannot sort.");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (list.size() <= 1) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (order.equals(SortOrder.ASCENDING)) {
|
||||||
|
Collections.sort(list);
|
||||||
|
} else {
|
||||||
|
Collections.sort(list, Collections.reverseOrder());
|
||||||
|
}
|
||||||
|
|
||||||
|
document.setFieldValue(field, list);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getType() {
|
||||||
|
return TYPE;
|
||||||
|
}
|
||||||
|
|
||||||
|
public final static class Factory extends AbstractProcessorFactory<SortProcessor> {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public SortProcessor doCreate(String processorTag, Map<String, Object> config) throws Exception {
|
||||||
|
String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, FIELD);
|
||||||
|
try {
|
||||||
|
SortOrder direction = SortOrder.fromString(
|
||||||
|
ConfigurationUtils.readStringProperty(
|
||||||
|
TYPE,
|
||||||
|
processorTag,
|
||||||
|
config,
|
||||||
|
ORDER,
|
||||||
|
DEFAULT_ORDER));
|
||||||
|
return new SortProcessor(processorTag, field, direction);
|
||||||
|
} catch (IllegalArgumentException e) {
|
||||||
|
throw ConfigurationUtils.newConfigurationException(TYPE, processorTag, ORDER, e.getMessage());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -20,9 +20,11 @@
|
||||||
package org.elasticsearch.monitor.jvm;
|
package org.elasticsearch.monitor.jvm;
|
||||||
|
|
||||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||||
|
import org.elasticsearch.common.logging.ESLogger;
|
||||||
import org.elasticsearch.common.settings.Setting;
|
import org.elasticsearch.common.settings.Setting;
|
||||||
import org.elasticsearch.common.settings.Setting.Property;
|
import org.elasticsearch.common.settings.Setting.Property;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
import org.elasticsearch.common.util.concurrent.FutureUtils;
|
import org.elasticsearch.common.util.concurrent.FutureUtils;
|
||||||
import org.elasticsearch.monitor.jvm.JvmStats.GarbageCollector;
|
import org.elasticsearch.monitor.jvm.JvmStats.GarbageCollector;
|
||||||
|
@ -30,14 +32,13 @@ import org.elasticsearch.threadpool.ThreadPool;
|
||||||
|
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.Objects;
|
||||||
import java.util.concurrent.ScheduledFuture;
|
import java.util.concurrent.ScheduledFuture;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
import java.util.function.BiFunction;
|
||||||
|
|
||||||
import static java.util.Collections.unmodifiableMap;
|
import static java.util.Collections.unmodifiableMap;
|
||||||
import static org.elasticsearch.monitor.jvm.JvmStats.jvmStats;
|
|
||||||
|
|
||||||
/**
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
public class JvmGcMonitorService extends AbstractLifecycleComponent<JvmGcMonitorService> {
|
public class JvmGcMonitorService extends AbstractLifecycleComponent<JvmGcMonitorService> {
|
||||||
|
|
||||||
private final ThreadPool threadPool;
|
private final ThreadPool threadPool;
|
||||||
|
@ -119,12 +120,123 @@ public class JvmGcMonitorService extends AbstractLifecycleComponent<JvmGcMonitor
|
||||||
return GC_COLLECTOR_PREFIX + key + "." + level;
|
return GC_COLLECTOR_PREFIX + key + "." + level;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static final String LOG_MESSAGE =
|
||||||
|
"[gc][{}][{}][{}] duration [{}], collections [{}]/[{}], total [{}]/[{}], memory [{}]->[{}]/[{}], all_pools {}";
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void doStart() {
|
protected void doStart() {
|
||||||
if (!enabled) {
|
if (!enabled) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
scheduledFuture = threadPool.scheduleWithFixedDelay(new JvmMonitor(), interval);
|
scheduledFuture = threadPool.scheduleWithFixedDelay(new JvmMonitor(gcThresholds) {
|
||||||
|
@Override
|
||||||
|
void onMonitorFailure(Throwable t) {
|
||||||
|
logger.debug("failed to monitor", t);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
void onSlowGc(final Threshold threshold, final long seq, final SlowGcEvent slowGcEvent) {
|
||||||
|
logSlowGc(logger, threshold, seq, slowGcEvent, JvmGcMonitorService::buildPools);
|
||||||
|
}
|
||||||
|
}, interval);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void logSlowGc(
|
||||||
|
final ESLogger logger,
|
||||||
|
final JvmMonitor.Threshold threshold,
|
||||||
|
final long seq,
|
||||||
|
final JvmMonitor.SlowGcEvent slowGcEvent,
|
||||||
|
BiFunction<JvmStats, JvmStats, String> pools) {
|
||||||
|
|
||||||
|
final String name = slowGcEvent.currentGc.getName();
|
||||||
|
final long elapsed = slowGcEvent.elapsed;
|
||||||
|
final long totalGcCollectionCount = slowGcEvent.currentGc.getCollectionCount();
|
||||||
|
final long currentGcCollectionCount = slowGcEvent.collectionCount;
|
||||||
|
final TimeValue totalGcCollectionTime = slowGcEvent.currentGc.getCollectionTime();
|
||||||
|
final TimeValue currentGcCollectionTime = slowGcEvent.collectionTime;
|
||||||
|
final JvmStats lastJvmStats = slowGcEvent.lastJvmStats;
|
||||||
|
final JvmStats currentJvmStats = slowGcEvent.currentJvmStats;
|
||||||
|
final ByteSizeValue maxHeapUsed = slowGcEvent.maxHeapUsed;
|
||||||
|
|
||||||
|
switch (threshold) {
|
||||||
|
case WARN:
|
||||||
|
if (logger.isWarnEnabled()) {
|
||||||
|
logger.warn(
|
||||||
|
LOG_MESSAGE,
|
||||||
|
name,
|
||||||
|
seq,
|
||||||
|
totalGcCollectionCount,
|
||||||
|
currentGcCollectionTime,
|
||||||
|
currentGcCollectionCount,
|
||||||
|
TimeValue.timeValueMillis(elapsed),
|
||||||
|
currentGcCollectionTime,
|
||||||
|
totalGcCollectionTime,
|
||||||
|
lastJvmStats.getMem().getHeapUsed(),
|
||||||
|
currentJvmStats.getMem().getHeapUsed(),
|
||||||
|
maxHeapUsed,
|
||||||
|
pools.apply(lastJvmStats, currentJvmStats));
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case INFO:
|
||||||
|
if (logger.isInfoEnabled()) {
|
||||||
|
logger.info(
|
||||||
|
LOG_MESSAGE,
|
||||||
|
name,
|
||||||
|
seq,
|
||||||
|
totalGcCollectionCount,
|
||||||
|
currentGcCollectionTime,
|
||||||
|
currentGcCollectionCount,
|
||||||
|
TimeValue.timeValueMillis(elapsed),
|
||||||
|
currentGcCollectionTime,
|
||||||
|
totalGcCollectionTime,
|
||||||
|
lastJvmStats.getMem().getHeapUsed(),
|
||||||
|
currentJvmStats.getMem().getHeapUsed(),
|
||||||
|
maxHeapUsed,
|
||||||
|
pools.apply(lastJvmStats, currentJvmStats));
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case DEBUG:
|
||||||
|
if (logger.isDebugEnabled()) {
|
||||||
|
logger.debug(
|
||||||
|
LOG_MESSAGE,
|
||||||
|
name,
|
||||||
|
seq,
|
||||||
|
totalGcCollectionCount,
|
||||||
|
currentGcCollectionTime,
|
||||||
|
currentGcCollectionCount,
|
||||||
|
TimeValue.timeValueMillis(elapsed),
|
||||||
|
currentGcCollectionTime,
|
||||||
|
totalGcCollectionTime,
|
||||||
|
lastJvmStats.getMem().getHeapUsed(),
|
||||||
|
currentJvmStats.getMem().getHeapUsed(),
|
||||||
|
maxHeapUsed,
|
||||||
|
pools.apply(lastJvmStats, currentJvmStats));
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static String buildPools(JvmStats last, JvmStats current) {
|
||||||
|
StringBuilder sb = new StringBuilder();
|
||||||
|
for (JvmStats.MemoryPool currentPool : current.getMem()) {
|
||||||
|
JvmStats.MemoryPool prevPool = null;
|
||||||
|
for (JvmStats.MemoryPool pool : last.getMem()) {
|
||||||
|
if (pool.getName().equals(currentPool.getName())) {
|
||||||
|
prevPool = pool;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sb.append("{[")
|
||||||
|
.append(currentPool.getName())
|
||||||
|
.append("] [")
|
||||||
|
.append(prevPool == null ? "?" : prevPool.getUsed())
|
||||||
|
.append("]->[")
|
||||||
|
.append(currentPool.getUsed())
|
||||||
|
.append("]/[")
|
||||||
|
.append(currentPool.getMax())
|
||||||
|
.append("]}");
|
||||||
|
}
|
||||||
|
return sb.toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -139,12 +251,46 @@ public class JvmGcMonitorService extends AbstractLifecycleComponent<JvmGcMonitor
|
||||||
protected void doClose() {
|
protected void doClose() {
|
||||||
}
|
}
|
||||||
|
|
||||||
private class JvmMonitor implements Runnable {
|
static abstract class JvmMonitor implements Runnable {
|
||||||
|
|
||||||
|
enum Threshold { DEBUG, INFO, WARN }
|
||||||
|
|
||||||
|
static class SlowGcEvent {
|
||||||
|
|
||||||
|
final GarbageCollector currentGc;
|
||||||
|
final long collectionCount;
|
||||||
|
final TimeValue collectionTime;
|
||||||
|
final long elapsed;
|
||||||
|
final JvmStats lastJvmStats;
|
||||||
|
final JvmStats currentJvmStats;
|
||||||
|
final ByteSizeValue maxHeapUsed;
|
||||||
|
|
||||||
|
public SlowGcEvent(
|
||||||
|
final GarbageCollector currentGc,
|
||||||
|
final long collectionCount,
|
||||||
|
final TimeValue collectionTime,
|
||||||
|
final long elapsed,
|
||||||
|
final JvmStats lastJvmStats,
|
||||||
|
final JvmStats currentJvmStats,
|
||||||
|
final ByteSizeValue maxHeapUsed) {
|
||||||
|
this.currentGc = currentGc;
|
||||||
|
this.collectionCount = collectionCount;
|
||||||
|
this.collectionTime = collectionTime;
|
||||||
|
this.elapsed = elapsed;
|
||||||
|
this.lastJvmStats = lastJvmStats;
|
||||||
|
this.currentJvmStats = currentJvmStats;
|
||||||
|
this.maxHeapUsed = maxHeapUsed;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
private long lastTime = now();
|
||||||
private JvmStats lastJvmStats = jvmStats();
|
private JvmStats lastJvmStats = jvmStats();
|
||||||
private long seq = 0;
|
private long seq = 0;
|
||||||
|
private final Map<String, GcThreshold> gcThresholds;
|
||||||
|
|
||||||
public JvmMonitor() {
|
public JvmMonitor(Map<String, GcThreshold> gcThresholds) {
|
||||||
|
this.gcThresholds = Objects.requireNonNull(gcThresholds);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -152,24 +298,28 @@ public class JvmGcMonitorService extends AbstractLifecycleComponent<JvmGcMonitor
|
||||||
try {
|
try {
|
||||||
monitorLongGc();
|
monitorLongGc();
|
||||||
} catch (Throwable t) {
|
} catch (Throwable t) {
|
||||||
logger.debug("failed to monitor", t);
|
onMonitorFailure(t);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private synchronized void monitorLongGc() {
|
abstract void onMonitorFailure(Throwable t);
|
||||||
|
|
||||||
|
synchronized void monitorLongGc() {
|
||||||
seq++;
|
seq++;
|
||||||
|
final long currentTime = now();
|
||||||
JvmStats currentJvmStats = jvmStats();
|
JvmStats currentJvmStats = jvmStats();
|
||||||
|
|
||||||
|
final long elapsed = TimeUnit.NANOSECONDS.toMillis(currentTime - lastTime);
|
||||||
for (int i = 0; i < currentJvmStats.getGc().getCollectors().length; i++) {
|
for (int i = 0; i < currentJvmStats.getGc().getCollectors().length; i++) {
|
||||||
GarbageCollector gc = currentJvmStats.getGc().getCollectors()[i];
|
GarbageCollector gc = currentJvmStats.getGc().getCollectors()[i];
|
||||||
GarbageCollector prevGc = lastJvmStats.gc.collectors[i];
|
GarbageCollector prevGc = lastJvmStats.getGc().getCollectors()[i];
|
||||||
|
|
||||||
// no collection has happened
|
// no collection has happened
|
||||||
long collections = gc.collectionCount - prevGc.collectionCount;
|
long collections = gc.getCollectionCount() - prevGc.getCollectionCount();
|
||||||
if (collections == 0) {
|
if (collections == 0) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
long collectionTime = gc.collectionTime - prevGc.collectionTime;
|
long collectionTime = gc.getCollectionTime().millis() - prevGc.getCollectionTime().millis();
|
||||||
if (collectionTime == 0) {
|
if (collectionTime == 0) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -181,34 +331,39 @@ public class JvmGcMonitorService extends AbstractLifecycleComponent<JvmGcMonitor
|
||||||
|
|
||||||
long avgCollectionTime = collectionTime / collections;
|
long avgCollectionTime = collectionTime / collections;
|
||||||
|
|
||||||
|
Threshold threshold = null;
|
||||||
if (avgCollectionTime > gcThreshold.warnThreshold) {
|
if (avgCollectionTime > gcThreshold.warnThreshold) {
|
||||||
logger.warn("[gc][{}][{}][{}] duration [{}], collections [{}]/[{}], total [{}]/[{}], memory [{}]->[{}]/[{}], all_pools {}",
|
threshold = Threshold.WARN;
|
||||||
gc.getName(), seq, gc.getCollectionCount(), TimeValue.timeValueMillis(collectionTime), collections, TimeValue.timeValueMillis(currentJvmStats.getTimestamp() - lastJvmStats.getTimestamp()), TimeValue.timeValueMillis(collectionTime), gc.getCollectionTime(), lastJvmStats.getMem().getHeapUsed(), currentJvmStats.getMem().getHeapUsed(), JvmInfo.jvmInfo().getMem().getHeapMax(), buildPools(lastJvmStats, currentJvmStats));
|
|
||||||
} else if (avgCollectionTime > gcThreshold.infoThreshold) {
|
} else if (avgCollectionTime > gcThreshold.infoThreshold) {
|
||||||
logger.info("[gc][{}][{}][{}] duration [{}], collections [{}]/[{}], total [{}]/[{}], memory [{}]->[{}]/[{}], all_pools {}",
|
threshold = Threshold.INFO;
|
||||||
gc.getName(), seq, gc.getCollectionCount(), TimeValue.timeValueMillis(collectionTime), collections, TimeValue.timeValueMillis(currentJvmStats.getTimestamp() - lastJvmStats.getTimestamp()), TimeValue.timeValueMillis(collectionTime), gc.getCollectionTime(), lastJvmStats.getMem().getHeapUsed(), currentJvmStats.getMem().getHeapUsed(), JvmInfo.jvmInfo().getMem().getHeapMax(), buildPools(lastJvmStats, currentJvmStats));
|
} else if (avgCollectionTime > gcThreshold.debugThreshold) {
|
||||||
} else if (avgCollectionTime > gcThreshold.debugThreshold && logger.isDebugEnabled()) {
|
threshold = Threshold.DEBUG;
|
||||||
logger.debug("[gc][{}][{}][{}] duration [{}], collections [{}]/[{}], total [{}]/[{}], memory [{}]->[{}]/[{}], all_pools {}",
|
}
|
||||||
gc.getName(), seq, gc.getCollectionCount(), TimeValue.timeValueMillis(collectionTime), collections, TimeValue.timeValueMillis(currentJvmStats.getTimestamp() - lastJvmStats.getTimestamp()), TimeValue.timeValueMillis(collectionTime), gc.getCollectionTime(), lastJvmStats.getMem().getHeapUsed(), currentJvmStats.getMem().getHeapUsed(), JvmInfo.jvmInfo().getMem().getHeapMax(), buildPools(lastJvmStats, currentJvmStats));
|
if (threshold != null) {
|
||||||
|
onSlowGc(threshold, seq, new SlowGcEvent(
|
||||||
|
gc,
|
||||||
|
collections,
|
||||||
|
TimeValue.timeValueMillis(collectionTime),
|
||||||
|
elapsed,
|
||||||
|
lastJvmStats,
|
||||||
|
currentJvmStats,
|
||||||
|
JvmInfo.jvmInfo().getMem().getHeapMax()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
lastTime = currentTime;
|
||||||
lastJvmStats = currentJvmStats;
|
lastJvmStats = currentJvmStats;
|
||||||
}
|
}
|
||||||
|
|
||||||
private String buildPools(JvmStats prev, JvmStats current) {
|
JvmStats jvmStats() {
|
||||||
StringBuilder sb = new StringBuilder();
|
return JvmStats.jvmStats();
|
||||||
for (JvmStats.MemoryPool currentPool : current.getMem()) {
|
|
||||||
JvmStats.MemoryPool prevPool = null;
|
|
||||||
for (JvmStats.MemoryPool pool : prev.getMem()) {
|
|
||||||
if (pool.getName().equals(currentPool.getName())) {
|
|
||||||
prevPool = pool;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sb.append("{[").append(currentPool.getName())
|
|
||||||
.append("] [").append(prevPool == null ? "?" : prevPool.getUsed()).append("]->[").append(currentPool.getUsed()).append("]/[").append(currentPool.getMax()).append("]}");
|
|
||||||
}
|
|
||||||
return sb.toString();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
long now() {
|
||||||
|
return System.nanoTime();
|
||||||
|
}
|
||||||
|
|
||||||
|
abstract void onSlowGc(final Threshold threshold, final long seq, final SlowGcEvent slowGcEvent);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -37,6 +37,7 @@ import org.elasticsearch.ingest.processor.LowercaseProcessor;
|
||||||
import org.elasticsearch.ingest.processor.RemoveProcessor;
|
import org.elasticsearch.ingest.processor.RemoveProcessor;
|
||||||
import org.elasticsearch.ingest.processor.RenameProcessor;
|
import org.elasticsearch.ingest.processor.RenameProcessor;
|
||||||
import org.elasticsearch.ingest.processor.SetProcessor;
|
import org.elasticsearch.ingest.processor.SetProcessor;
|
||||||
|
import org.elasticsearch.ingest.processor.SortProcessor;
|
||||||
import org.elasticsearch.ingest.processor.SplitProcessor;
|
import org.elasticsearch.ingest.processor.SplitProcessor;
|
||||||
import org.elasticsearch.ingest.processor.TrimProcessor;
|
import org.elasticsearch.ingest.processor.TrimProcessor;
|
||||||
import org.elasticsearch.ingest.processor.UppercaseProcessor;
|
import org.elasticsearch.ingest.processor.UppercaseProcessor;
|
||||||
|
@ -78,6 +79,7 @@ public class NodeModule extends AbstractModule {
|
||||||
registerProcessor(FailProcessor.TYPE, (templateService, registry) -> new FailProcessor.Factory(templateService));
|
registerProcessor(FailProcessor.TYPE, (templateService, registry) -> new FailProcessor.Factory(templateService));
|
||||||
registerProcessor(ForEachProcessor.TYPE, (templateService, registry) -> new ForEachProcessor.Factory(registry));
|
registerProcessor(ForEachProcessor.TYPE, (templateService, registry) -> new ForEachProcessor.Factory(registry));
|
||||||
registerProcessor(DateIndexNameProcessor.TYPE, (templateService, registry) -> new DateIndexNameProcessor.Factory());
|
registerProcessor(DateIndexNameProcessor.TYPE, (templateService, registry) -> new DateIndexNameProcessor.Factory());
|
||||||
|
registerProcessor(SortProcessor.TYPE, (templateService, registry) -> new SortProcessor.Factory());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -30,8 +30,8 @@ import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||||
import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator;
|
import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator;
|
||||||
import org.elasticsearch.search.aggregations.support.AggregationContext;
|
import org.elasticsearch.search.aggregations.support.AggregationContext;
|
||||||
import org.elasticsearch.search.internal.SearchContext;
|
import org.elasticsearch.search.internal.SearchContext;
|
||||||
import org.elasticsearch.search.profile.CollectorResult;
|
import org.elasticsearch.search.profile.query.CollectorResult;
|
||||||
import org.elasticsearch.search.profile.InternalProfileCollector;
|
import org.elasticsearch.search.profile.query.InternalProfileCollector;
|
||||||
import org.elasticsearch.search.query.QueryPhaseExecutionException;
|
import org.elasticsearch.search.query.QueryPhaseExecutionException;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
@ -125,7 +125,7 @@ public class AggregationPhase implements SearchPhase {
|
||||||
Collections.emptyList());
|
Collections.emptyList());
|
||||||
collector = profileCollector;
|
collector = profileCollector;
|
||||||
// start a new profile with this collector
|
// start a new profile with this collector
|
||||||
context.getProfilers().addProfiler().setCollector(profileCollector);
|
context.getProfilers().addQueryProfiler().setCollector(profileCollector);
|
||||||
}
|
}
|
||||||
globalsCollector.preCollection();
|
globalsCollector.preCollection();
|
||||||
context.searcher().search(query, collector);
|
context.searcher().search(query, collector);
|
||||||
|
|
|
@ -33,10 +33,10 @@ import org.apache.lucene.search.Weight;
|
||||||
import org.elasticsearch.common.lease.Releasable;
|
import org.elasticsearch.common.lease.Releasable;
|
||||||
import org.elasticsearch.index.engine.Engine;
|
import org.elasticsearch.index.engine.Engine;
|
||||||
import org.elasticsearch.search.dfs.AggregatedDfs;
|
import org.elasticsearch.search.dfs.AggregatedDfs;
|
||||||
import org.elasticsearch.search.profile.QueryProfileBreakdown;
|
import org.elasticsearch.search.profile.query.ProfileWeight;
|
||||||
import org.elasticsearch.search.profile.ProfileWeight;
|
import org.elasticsearch.search.profile.query.QueryProfileBreakdown;
|
||||||
import org.elasticsearch.search.profile.QueryProfiler;
|
import org.elasticsearch.search.profile.query.QueryProfiler;
|
||||||
import org.elasticsearch.search.profile.QueryTimingType;
|
import org.elasticsearch.search.profile.query.QueryTimingType;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
|
|
@ -43,7 +43,7 @@ import java.util.Map;
|
||||||
* Each InternalProfileResult has a List of InternalProfileResults, which will contain
|
* Each InternalProfileResult has a List of InternalProfileResults, which will contain
|
||||||
* "children" queries if applicable
|
* "children" queries if applicable
|
||||||
*/
|
*/
|
||||||
final class ProfileResult implements Writeable, ToXContent {
|
public final class ProfileResult implements Writeable, ToXContent {
|
||||||
|
|
||||||
private static final ParseField TYPE = new ParseField("type");
|
private static final ParseField TYPE = new ParseField("type");
|
||||||
private static final ParseField DESCRIPTION = new ParseField("description");
|
private static final ParseField DESCRIPTION = new ParseField("description");
|
||||||
|
|
|
@ -24,6 +24,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
import org.elasticsearch.common.io.stream.Writeable;
|
import org.elasticsearch.common.io.stream.Writeable;
|
||||||
import org.elasticsearch.common.xcontent.ToXContent;
|
import org.elasticsearch.common.xcontent.ToXContent;
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
|
import org.elasticsearch.search.profile.query.CollectorResult;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
package org.elasticsearch.search.profile;
|
package org.elasticsearch.search.profile;
|
||||||
|
|
||||||
import org.elasticsearch.search.internal.ContextIndexSearcher;
|
import org.elasticsearch.search.internal.ContextIndexSearcher;
|
||||||
|
import org.elasticsearch.search.profile.query.QueryProfiler;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
|
@ -29,31 +30,31 @@ import java.util.List;
|
||||||
public final class Profilers {
|
public final class Profilers {
|
||||||
|
|
||||||
private final ContextIndexSearcher searcher;
|
private final ContextIndexSearcher searcher;
|
||||||
private final List<QueryProfiler> profilers;
|
private final List<QueryProfiler> queryProfilers;
|
||||||
|
|
||||||
/** Sole constructor. This {@link Profilers} instance will initially wrap one {@link QueryProfiler}. */
|
/** Sole constructor. This {@link Profilers} instance will initially wrap one {@link QueryProfiler}. */
|
||||||
public Profilers(ContextIndexSearcher searcher) {
|
public Profilers(ContextIndexSearcher searcher) {
|
||||||
this.searcher = searcher;
|
this.searcher = searcher;
|
||||||
this.profilers = new ArrayList<>();
|
this.queryProfilers = new ArrayList<>();
|
||||||
addProfiler();
|
addQueryProfiler();
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Switch to a new profile. */
|
/** Switch to a new profile. */
|
||||||
public QueryProfiler addProfiler() {
|
public QueryProfiler addQueryProfiler() {
|
||||||
QueryProfiler profiler = new QueryProfiler();
|
QueryProfiler profiler = new QueryProfiler();
|
||||||
searcher.setProfiler(profiler);
|
searcher.setProfiler(profiler);
|
||||||
profilers.add(profiler);
|
queryProfilers.add(profiler);
|
||||||
return profiler;
|
return profiler;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Get the current profiler. */
|
/** Get the current profiler. */
|
||||||
public QueryProfiler getCurrent() {
|
public QueryProfiler getCurrentQueryProfiler() {
|
||||||
return profilers.get(profilers.size() - 1);
|
return queryProfilers.get(queryProfilers.size() - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Return the list of all created {@link QueryProfiler}s so far. */
|
/** Return the list of all created {@link QueryProfiler}s so far. */
|
||||||
public List<QueryProfiler> getProfilers() {
|
public List<QueryProfiler> getQueryProfilers() {
|
||||||
return Collections.unmodifiableList(profilers);
|
return Collections.unmodifiableList(queryProfilers);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,6 +24,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
import org.elasticsearch.common.io.stream.Writeable;
|
import org.elasticsearch.common.io.stream.Writeable;
|
||||||
import org.elasticsearch.common.xcontent.ToXContent;
|
import org.elasticsearch.common.xcontent.ToXContent;
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
|
import org.elasticsearch.search.profile.query.QueryProfiler;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.elasticsearch.search.profile;
|
package org.elasticsearch.search.profile.query;
|
||||||
|
|
||||||
import org.elasticsearch.common.ParseField;
|
import org.elasticsearch.common.ParseField;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
|
@ -17,7 +17,7 @@
|
||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.elasticsearch.search.profile;
|
package org.elasticsearch.search.profile.query;
|
||||||
|
|
||||||
import org.apache.lucene.index.LeafReaderContext;
|
import org.apache.lucene.index.LeafReaderContext;
|
||||||
import org.apache.lucene.search.Collector;
|
import org.apache.lucene.search.Collector;
|
|
@ -17,9 +17,10 @@
|
||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.elasticsearch.search.profile;
|
package org.elasticsearch.search.profile.query;
|
||||||
|
|
||||||
import org.apache.lucene.search.Query;
|
import org.apache.lucene.search.Query;
|
||||||
|
import org.elasticsearch.search.profile.ProfileResult;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
|
@ -17,7 +17,7 @@
|
||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.elasticsearch.search.profile;
|
package org.elasticsearch.search.profile.query;
|
||||||
|
|
||||||
import org.apache.lucene.index.LeafReaderContext;
|
import org.apache.lucene.index.LeafReaderContext;
|
||||||
import org.apache.lucene.search.Collector;
|
import org.apache.lucene.search.Collector;
|
|
@ -17,7 +17,7 @@
|
||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.elasticsearch.search.profile;
|
package org.elasticsearch.search.profile.query;
|
||||||
|
|
||||||
import org.apache.lucene.search.DocIdSetIterator;
|
import org.apache.lucene.search.DocIdSetIterator;
|
||||||
import org.apache.lucene.search.Scorer;
|
import org.apache.lucene.search.Scorer;
|
|
@ -17,7 +17,7 @@
|
||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.elasticsearch.search.profile;
|
package org.elasticsearch.search.profile.query;
|
||||||
|
|
||||||
import org.apache.lucene.index.LeafReaderContext;
|
import org.apache.lucene.index.LeafReaderContext;
|
||||||
import org.apache.lucene.index.Term;
|
import org.apache.lucene.index.Term;
|
|
@ -17,7 +17,9 @@
|
||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.elasticsearch.search.profile;
|
package org.elasticsearch.search.profile.query;
|
||||||
|
|
||||||
|
import org.elasticsearch.search.profile.AbstractProfileBreakdown;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A record of timings for the various operations that may happen during query execution.
|
* A record of timings for the various operations that may happen during query execution.
|
|
@ -17,9 +17,10 @@
|
||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.elasticsearch.search.profile;
|
package org.elasticsearch.search.profile.query;
|
||||||
|
|
||||||
import org.apache.lucene.search.Query;
|
import org.apache.lucene.search.Query;
|
||||||
|
import org.elasticsearch.search.profile.ProfileResult;
|
||||||
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
|
@ -17,7 +17,7 @@
|
||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.elasticsearch.search.profile;
|
package org.elasticsearch.search.profile.query;
|
||||||
|
|
||||||
import java.util.Locale;
|
import java.util.Locale;
|
||||||
|
|
|
@ -52,10 +52,10 @@ import org.elasticsearch.search.SearchService;
|
||||||
import org.elasticsearch.search.aggregations.AggregationPhase;
|
import org.elasticsearch.search.aggregations.AggregationPhase;
|
||||||
import org.elasticsearch.search.internal.ScrollContext;
|
import org.elasticsearch.search.internal.ScrollContext;
|
||||||
import org.elasticsearch.search.internal.SearchContext;
|
import org.elasticsearch.search.internal.SearchContext;
|
||||||
import org.elasticsearch.search.profile.CollectorResult;
|
|
||||||
import org.elasticsearch.search.profile.InternalProfileCollector;
|
|
||||||
import org.elasticsearch.search.profile.ProfileShardResult;
|
import org.elasticsearch.search.profile.ProfileShardResult;
|
||||||
import org.elasticsearch.search.profile.SearchProfileShardResults;
|
import org.elasticsearch.search.profile.SearchProfileShardResults;
|
||||||
|
import org.elasticsearch.search.profile.query.CollectorResult;
|
||||||
|
import org.elasticsearch.search.profile.query.InternalProfileCollector;
|
||||||
import org.elasticsearch.search.rescore.RescorePhase;
|
import org.elasticsearch.search.rescore.RescorePhase;
|
||||||
import org.elasticsearch.search.rescore.RescoreSearchContext;
|
import org.elasticsearch.search.rescore.RescoreSearchContext;
|
||||||
import org.elasticsearch.search.sort.SortAndFormats;
|
import org.elasticsearch.search.sort.SortAndFormats;
|
||||||
|
@ -113,7 +113,7 @@ public class QueryPhase implements SearchPhase {
|
||||||
|
|
||||||
if (searchContext.getProfilers() != null) {
|
if (searchContext.getProfilers() != null) {
|
||||||
List<ProfileShardResult> shardResults = SearchProfileShardResults
|
List<ProfileShardResult> shardResults = SearchProfileShardResults
|
||||||
.buildShardResults(searchContext.getProfilers().getProfilers());
|
.buildShardResults(searchContext.getProfilers().getQueryProfilers());
|
||||||
searchContext.queryResult().profileResults(shardResults);
|
searchContext.queryResult().profileResults(shardResults);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -365,7 +365,7 @@ public class QueryPhase implements SearchPhase {
|
||||||
try {
|
try {
|
||||||
if (collector != null) {
|
if (collector != null) {
|
||||||
if (doProfile) {
|
if (doProfile) {
|
||||||
searchContext.getProfilers().getCurrent().setCollector((InternalProfileCollector) collector);
|
searchContext.getProfilers().getCurrentQueryProfiler().setCollector((InternalProfileCollector) collector);
|
||||||
}
|
}
|
||||||
searcher.search(query, collector);
|
searcher.search(query, collector);
|
||||||
}
|
}
|
||||||
|
@ -386,7 +386,7 @@ public class QueryPhase implements SearchPhase {
|
||||||
|
|
||||||
if (searchContext.getProfilers() != null) {
|
if (searchContext.getProfilers() != null) {
|
||||||
List<ProfileShardResult> shardResults = SearchProfileShardResults
|
List<ProfileShardResult> shardResults = SearchProfileShardResults
|
||||||
.buildShardResults(searchContext.getProfilers().getProfilers());
|
.buildShardResults(searchContext.getProfilers().getQueryProfilers());
|
||||||
searchContext.queryResult().profileResults(shardResults);
|
searchContext.queryResult().profileResults(shardResults);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -96,7 +96,7 @@ public class AckClusterUpdateSettingsIT extends ESIntegTestCase {
|
||||||
|
|
||||||
for (Client client : clients()) {
|
for (Client client : clients()) {
|
||||||
ClusterState clusterState = getLocalClusterState(client);
|
ClusterState clusterState = getLocalClusterState(client);
|
||||||
assertThat(clusterState.getRoutingNodes().metaData().transientSettings().get("cluster.routing.allocation.exclude._id"), equalTo(excludedNodeId));
|
assertThat(clusterState.metaData().transientSettings().get("cluster.routing.allocation.exclude._id"), equalTo(excludedNodeId));
|
||||||
for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
|
for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
|
||||||
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
|
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
|
||||||
for (ShardRouting shardRouting : indexShardRoutingTable) {
|
for (ShardRouting shardRouting : indexShardRoutingTable) {
|
||||||
|
|
|
@ -875,7 +875,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
ClusterState clusterState = ClusterState.builder(baseClusterState).routingTable(builder.build()).build();
|
ClusterState clusterState = ClusterState.builder(baseClusterState).routingTable(builder.build()).build();
|
||||||
RoutingAllocation routingAllocation = new RoutingAllocation(null, new RoutingNodes(clusterState), discoveryNodes, clusterInfo,
|
RoutingAllocation routingAllocation = new RoutingAllocation(null, new RoutingNodes(clusterState), clusterState, clusterInfo,
|
||||||
System.nanoTime());
|
System.nanoTime());
|
||||||
Decision decision = diskThresholdDecider.canRemain(firstRouting, firstRoutingNode, routingAllocation);
|
Decision decision = diskThresholdDecider.canRemain(firstRouting, firstRoutingNode, routingAllocation);
|
||||||
assertThat(decision.type(), equalTo(Decision.Type.NO));
|
assertThat(decision.type(), equalTo(Decision.Type.NO));
|
||||||
|
@ -896,7 +896,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
clusterState = ClusterState.builder(baseClusterState).routingTable(builder.build()).build();
|
clusterState = ClusterState.builder(baseClusterState).routingTable(builder.build()).build();
|
||||||
routingAllocation = new RoutingAllocation(null, new RoutingNodes(clusterState), discoveryNodes, clusterInfo, System.nanoTime());
|
routingAllocation = new RoutingAllocation(null, new RoutingNodes(clusterState), clusterState, clusterInfo, System.nanoTime());
|
||||||
decision = diskThresholdDecider.canRemain(firstRouting, firstRoutingNode, routingAllocation);
|
decision = diskThresholdDecider.canRemain(firstRouting, firstRoutingNode, routingAllocation);
|
||||||
assertThat(decision.type(), equalTo(Decision.Type.YES));
|
assertThat(decision.type(), equalTo(Decision.Type.YES));
|
||||||
|
|
||||||
|
@ -991,7 +991,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
ClusterState clusterState = ClusterState.builder(baseClusterState).routingTable(builder.build()).build();
|
ClusterState clusterState = ClusterState.builder(baseClusterState).routingTable(builder.build()).build();
|
||||||
RoutingAllocation routingAllocation = new RoutingAllocation(null, new RoutingNodes(clusterState), discoveryNodes, clusterInfo,
|
RoutingAllocation routingAllocation = new RoutingAllocation(null, new RoutingNodes(clusterState), clusterState, clusterInfo,
|
||||||
System.nanoTime());
|
System.nanoTime());
|
||||||
Decision decision = diskThresholdDecider.canRemain(firstRouting, firstRoutingNode, routingAllocation);
|
Decision decision = diskThresholdDecider.canRemain(firstRouting, firstRoutingNode, routingAllocation);
|
||||||
|
|
||||||
|
@ -1051,7 +1051,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
|
||||||
);
|
);
|
||||||
|
|
||||||
clusterState = ClusterState.builder(updateClusterState).routingTable(builder.build()).build();
|
clusterState = ClusterState.builder(updateClusterState).routingTable(builder.build()).build();
|
||||||
routingAllocation = new RoutingAllocation(null, new RoutingNodes(clusterState), discoveryNodes, clusterInfo, System.nanoTime());
|
routingAllocation = new RoutingAllocation(null, new RoutingNodes(clusterState), clusterState, clusterInfo, System.nanoTime());
|
||||||
decision = diskThresholdDecider.canRemain(firstRouting, firstRoutingNode, routingAllocation);
|
decision = diskThresholdDecider.canRemain(firstRouting, firstRoutingNode, routingAllocation);
|
||||||
assertThat(decision.type(), equalTo(Decision.Type.YES));
|
assertThat(decision.type(), equalTo(Decision.Type.YES));
|
||||||
|
|
||||||
|
|
|
@ -136,7 +136,7 @@ public class DiskThresholdDeciderUnitTests extends ESTestCase {
|
||||||
ImmutableOpenMap.Builder<String, Long> shardSizes = ImmutableOpenMap.builder();
|
ImmutableOpenMap.Builder<String, Long> shardSizes = ImmutableOpenMap.builder();
|
||||||
shardSizes.put("[test][0][p]", 10L); // 10 bytes
|
shardSizes.put("[test][0][p]", 10L); // 10 bytes
|
||||||
final ClusterInfo clusterInfo = new ClusterInfo(leastAvailableUsages.build(), mostAvailableUsage.build(), shardSizes.build(), ImmutableOpenMap.of());
|
final ClusterInfo clusterInfo = new ClusterInfo(leastAvailableUsages.build(), mostAvailableUsage.build(), shardSizes.build(), ImmutableOpenMap.of());
|
||||||
RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Settings.EMPTY, new AllocationDecider[]{decider}), clusterState.getRoutingNodes(), clusterState.nodes(), clusterInfo, System.nanoTime());
|
RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Settings.EMPTY, new AllocationDecider[]{decider}), clusterState.getRoutingNodes(), clusterState, clusterInfo, System.nanoTime());
|
||||||
assertEquals(mostAvailableUsage.toString(), Decision.YES, decider.canAllocate(test_0, new RoutingNode("node_0", node_0), allocation));
|
assertEquals(mostAvailableUsage.toString(), Decision.YES, decider.canAllocate(test_0, new RoutingNode("node_0", node_0), allocation));
|
||||||
assertEquals(mostAvailableUsage.toString(), Decision.NO, decider.canAllocate(test_0, new RoutingNode("node_1", node_1), allocation));
|
assertEquals(mostAvailableUsage.toString(), Decision.NO, decider.canAllocate(test_0, new RoutingNode("node_1", node_1), allocation));
|
||||||
}
|
}
|
||||||
|
@ -204,7 +204,7 @@ public class DiskThresholdDeciderUnitTests extends ESTestCase {
|
||||||
shardSizes.put("[test][2][p]", 10L);
|
shardSizes.put("[test][2][p]", 10L);
|
||||||
|
|
||||||
final ClusterInfo clusterInfo = new ClusterInfo(leastAvailableUsages.build(), mostAvailableUsage.build(), shardSizes.build(), shardRoutingMap.build());
|
final ClusterInfo clusterInfo = new ClusterInfo(leastAvailableUsages.build(), mostAvailableUsage.build(), shardSizes.build(), shardRoutingMap.build());
|
||||||
RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Settings.EMPTY, new AllocationDecider[]{decider}), clusterState.getRoutingNodes(), clusterState.nodes(), clusterInfo, System.nanoTime());
|
RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Settings.EMPTY, new AllocationDecider[]{decider}), clusterState.getRoutingNodes(), clusterState, clusterInfo, System.nanoTime());
|
||||||
assertEquals(Decision.YES, decider.canRemain(test_0, new RoutingNode("node_0", node_0), allocation));
|
assertEquals(Decision.YES, decider.canRemain(test_0, new RoutingNode("node_0", node_0), allocation));
|
||||||
assertEquals(Decision.NO, decider.canRemain(test_1, new RoutingNode("node_1", node_1), allocation));
|
assertEquals(Decision.NO, decider.canRemain(test_1, new RoutingNode("node_1", node_1), allocation));
|
||||||
try {
|
try {
|
||||||
|
|
|
@ -346,7 +346,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
||||||
.metaData(metaData)
|
.metaData(metaData)
|
||||||
.routingTable(routingTable)
|
.routingTable(routingTable)
|
||||||
.nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build();
|
.nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build();
|
||||||
return new RoutingAllocation(allocationDeciders, new RoutingNodes(state, false), state.nodes(), null, System.nanoTime());
|
return new RoutingAllocation(allocationDeciders, new RoutingNodes(state, false), state, null, System.nanoTime());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -425,7 +425,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
||||||
.metaData(metaData)
|
.metaData(metaData)
|
||||||
.routingTable(routingTable)
|
.routingTable(routingTable)
|
||||||
.nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build();
|
.nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build();
|
||||||
return new RoutingAllocation(allocationDeciders, new RoutingNodes(state, false), state.nodes(), null, System.nanoTime());
|
return new RoutingAllocation(allocationDeciders, new RoutingNodes(state, false), state, null, System.nanoTime());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -444,7 +444,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
||||||
.routingTable(routingTable)
|
.routingTable(routingTable)
|
||||||
.nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build();
|
.nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build();
|
||||||
|
|
||||||
RoutingAllocation allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state.nodes(), null, System.nanoTime());
|
RoutingAllocation allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state, null, System.nanoTime());
|
||||||
boolean changed = testAllocator.allocateUnassigned(allocation);
|
boolean changed = testAllocator.allocateUnassigned(allocation);
|
||||||
assertThat(changed, equalTo(false));
|
assertThat(changed, equalTo(false));
|
||||||
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1));
|
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1));
|
||||||
|
@ -452,7 +452,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
||||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(2)); // replicas
|
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(2)); // replicas
|
||||||
|
|
||||||
testAllocator.addData(node1, 1, null, randomBoolean());
|
testAllocator.addData(node1, 1, null, randomBoolean());
|
||||||
allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state.nodes(), null, System.nanoTime());
|
allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state, null, System.nanoTime());
|
||||||
changed = testAllocator.allocateUnassigned(allocation);
|
changed = testAllocator.allocateUnassigned(allocation);
|
||||||
assertThat(changed, equalTo(false));
|
assertThat(changed, equalTo(false));
|
||||||
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1));
|
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1));
|
||||||
|
@ -460,7 +460,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
||||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(2)); // replicas
|
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(2)); // replicas
|
||||||
|
|
||||||
testAllocator.addData(node2, 1, null, randomBoolean());
|
testAllocator.addData(node2, 1, null, randomBoolean());
|
||||||
allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state.nodes(), null, System.nanoTime());
|
allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state, null, System.nanoTime());
|
||||||
changed = testAllocator.allocateUnassigned(allocation);
|
changed = testAllocator.allocateUnassigned(allocation);
|
||||||
assertThat(changed, equalTo(true));
|
assertThat(changed, equalTo(true));
|
||||||
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(0));
|
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(0));
|
||||||
|
@ -485,7 +485,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
||||||
.routingTable(routingTable)
|
.routingTable(routingTable)
|
||||||
.nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build();
|
.nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build();
|
||||||
|
|
||||||
RoutingAllocation allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state.nodes(), null, System.nanoTime());
|
RoutingAllocation allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state, null, System.nanoTime());
|
||||||
boolean changed = testAllocator.allocateUnassigned(allocation);
|
boolean changed = testAllocator.allocateUnassigned(allocation);
|
||||||
assertThat(changed, equalTo(false));
|
assertThat(changed, equalTo(false));
|
||||||
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1));
|
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1));
|
||||||
|
@ -493,7 +493,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
||||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(2)); // replicas
|
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(2)); // replicas
|
||||||
|
|
||||||
testAllocator.addData(node1, 1, null, randomBoolean());
|
testAllocator.addData(node1, 1, null, randomBoolean());
|
||||||
allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state.nodes(), null, System.nanoTime());
|
allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state, null, System.nanoTime());
|
||||||
changed = testAllocator.allocateUnassigned(allocation);
|
changed = testAllocator.allocateUnassigned(allocation);
|
||||||
assertThat(changed, equalTo(false));
|
assertThat(changed, equalTo(false));
|
||||||
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1));
|
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1));
|
||||||
|
@ -501,7 +501,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
||||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(2)); // replicas
|
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(2)); // replicas
|
||||||
|
|
||||||
testAllocator.addData(node2, 2, null, randomBoolean());
|
testAllocator.addData(node2, 2, null, randomBoolean());
|
||||||
allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state.nodes(), null, System.nanoTime());
|
allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state, null, System.nanoTime());
|
||||||
changed = testAllocator.allocateUnassigned(allocation);
|
changed = testAllocator.allocateUnassigned(allocation);
|
||||||
assertThat(changed, equalTo(true));
|
assertThat(changed, equalTo(true));
|
||||||
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(0));
|
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(0));
|
||||||
|
@ -525,7 +525,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
||||||
.metaData(metaData)
|
.metaData(metaData)
|
||||||
.routingTable(routingTableBuilder.build())
|
.routingTable(routingTableBuilder.build())
|
||||||
.nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build();
|
.nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build();
|
||||||
return new RoutingAllocation(deciders, new RoutingNodes(state, false), state.nodes(), null, System.nanoTime());
|
return new RoutingAllocation(deciders, new RoutingNodes(state, false), state, null, System.nanoTime());
|
||||||
}
|
}
|
||||||
|
|
||||||
class TestAllocator extends PrimaryShardAllocator {
|
class TestAllocator extends PrimaryShardAllocator {
|
||||||
|
|
|
@ -302,7 +302,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
|
||||||
.metaData(metaData)
|
.metaData(metaData)
|
||||||
.routingTable(routingTable)
|
.routingTable(routingTable)
|
||||||
.nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build();
|
.nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build();
|
||||||
return new RoutingAllocation(deciders, new RoutingNodes(state, false), state.nodes(), ClusterInfo.EMPTY, System.nanoTime());
|
return new RoutingAllocation(deciders, new RoutingNodes(state, false), state, ClusterInfo.EMPTY, System.nanoTime());
|
||||||
}
|
}
|
||||||
|
|
||||||
private RoutingAllocation onePrimaryOnNode1And1ReplicaRecovering(AllocationDeciders deciders) {
|
private RoutingAllocation onePrimaryOnNode1And1ReplicaRecovering(AllocationDeciders deciders) {
|
||||||
|
@ -324,7 +324,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
|
||||||
.metaData(metaData)
|
.metaData(metaData)
|
||||||
.routingTable(routingTable)
|
.routingTable(routingTable)
|
||||||
.nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build();
|
.nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build();
|
||||||
return new RoutingAllocation(deciders, new RoutingNodes(state, false), state.nodes(), ClusterInfo.EMPTY, System.nanoTime());
|
return new RoutingAllocation(deciders, new RoutingNodes(state, false), state, ClusterInfo.EMPTY, System.nanoTime());
|
||||||
}
|
}
|
||||||
|
|
||||||
class TestAllocator extends ReplicaShardAllocator {
|
class TestAllocator extends ReplicaShardAllocator {
|
||||||
|
|
|
@ -103,7 +103,7 @@ public class RareClusterStateIT extends ESIntegTestCase {
|
||||||
.nodes(DiscoveryNodes.EMPTY_NODES)
|
.nodes(DiscoveryNodes.EMPTY_NODES)
|
||||||
.build(), false
|
.build(), false
|
||||||
);
|
);
|
||||||
RoutingAllocation routingAllocation = new RoutingAllocation(allocationDeciders, routingNodes, current.nodes(), ClusterInfo.EMPTY, System.nanoTime());
|
RoutingAllocation routingAllocation = new RoutingAllocation(allocationDeciders, routingNodes, current, ClusterInfo.EMPTY, System.nanoTime());
|
||||||
allocator.allocateUnassigned(routingAllocation);
|
allocator.allocateUnassigned(routingAllocation);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,282 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.ingest.processor;
|
||||||
|
|
||||||
|
import org.elasticsearch.ingest.core.IngestDocument;
|
||||||
|
import org.elasticsearch.ingest.RandomDocumentPicks;
|
||||||
|
import org.elasticsearch.ingest.core.Processor;
|
||||||
|
import org.elasticsearch.ingest.processor.SortProcessor.SortOrder;
|
||||||
|
import org.elasticsearch.test.ESTestCase;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Random;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
|
import static org.hamcrest.Matchers.containsString;
|
||||||
|
import static org.hamcrest.Matchers.equalTo;
|
||||||
|
|
||||||
|
public class SortProcessorTests extends ESTestCase {
|
||||||
|
|
||||||
|
public void testSortStrings() throws Exception {
|
||||||
|
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random());
|
||||||
|
int numItems = randomIntBetween(1, 10);
|
||||||
|
List<String> fieldValue = new ArrayList<>(numItems);
|
||||||
|
List<String> expectedResult = new ArrayList<>(numItems);
|
||||||
|
for (int j = 0; j < numItems; j++) {
|
||||||
|
String value = randomAsciiOfLengthBetween(1, 10);
|
||||||
|
fieldValue.add(value);
|
||||||
|
expectedResult.add(value);
|
||||||
|
}
|
||||||
|
Collections.sort(expectedResult);
|
||||||
|
|
||||||
|
SortOrder order = randomBoolean() ? SortOrder.ASCENDING : SortOrder.DESCENDING;
|
||||||
|
if (order.equals(SortOrder.DESCENDING)) {
|
||||||
|
Collections.reverse(expectedResult);
|
||||||
|
}
|
||||||
|
|
||||||
|
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, fieldValue);
|
||||||
|
Processor processor = new SortProcessor(randomAsciiOfLength(10), fieldName, order);
|
||||||
|
processor.execute(ingestDocument);
|
||||||
|
assertEquals(ingestDocument.getFieldValue(fieldName, List.class), expectedResult);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testSortIntegersNonRandom() throws Exception {
|
||||||
|
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random());
|
||||||
|
|
||||||
|
Integer[] expectedResult = new Integer[]{1,2,3,4,5,10,20,21,22,50,100};
|
||||||
|
List<Integer> fieldValue = new ArrayList<>(expectedResult.length);
|
||||||
|
fieldValue.addAll(Arrays.asList(expectedResult).subList(0, expectedResult.length));
|
||||||
|
Collections.shuffle(fieldValue, random());
|
||||||
|
|
||||||
|
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, fieldValue);
|
||||||
|
Processor processor = new SortProcessor(randomAsciiOfLength(10), fieldName, SortOrder.ASCENDING);
|
||||||
|
processor.execute(ingestDocument);
|
||||||
|
assertThat(ingestDocument.getFieldValue(fieldName, List.class).toArray(), equalTo(expectedResult));
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testSortIntegers() throws Exception {
|
||||||
|
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random());
|
||||||
|
int numItems = randomIntBetween(1, 10);
|
||||||
|
List<Integer> fieldValue = new ArrayList<>(numItems);
|
||||||
|
List<Integer> expectedResult = new ArrayList<>(numItems);
|
||||||
|
for (int j = 0; j < numItems; j++) {
|
||||||
|
Integer value = randomIntBetween(1, 100);
|
||||||
|
fieldValue.add(value);
|
||||||
|
expectedResult.add(value);
|
||||||
|
}
|
||||||
|
Collections.sort(expectedResult);
|
||||||
|
|
||||||
|
SortOrder order = randomBoolean() ? SortOrder.ASCENDING : SortOrder.DESCENDING;
|
||||||
|
if (order.equals(SortOrder.DESCENDING)) {
|
||||||
|
Collections.reverse(expectedResult);
|
||||||
|
}
|
||||||
|
|
||||||
|
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, fieldValue);
|
||||||
|
Processor processor = new SortProcessor(randomAsciiOfLength(10), fieldName, order);
|
||||||
|
processor.execute(ingestDocument);
|
||||||
|
assertEquals(ingestDocument.getFieldValue(fieldName, List.class), expectedResult);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testSortShorts() throws Exception {
|
||||||
|
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random());
|
||||||
|
int numItems = randomIntBetween(1, 10);
|
||||||
|
List<Short> fieldValue = new ArrayList<>(numItems);
|
||||||
|
List<Short> expectedResult = new ArrayList<>(numItems);
|
||||||
|
for (int j = 0; j < numItems; j++) {
|
||||||
|
Short value = randomShort();
|
||||||
|
fieldValue.add(value);
|
||||||
|
expectedResult.add(value);
|
||||||
|
}
|
||||||
|
Collections.sort(expectedResult);
|
||||||
|
|
||||||
|
SortOrder order = randomBoolean() ? SortOrder.ASCENDING : SortOrder.DESCENDING;
|
||||||
|
if (order.equals(SortOrder.DESCENDING)) {
|
||||||
|
Collections.reverse(expectedResult);
|
||||||
|
}
|
||||||
|
|
||||||
|
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, fieldValue);
|
||||||
|
Processor processor = new SortProcessor(randomAsciiOfLength(10), fieldName, order);
|
||||||
|
processor.execute(ingestDocument);
|
||||||
|
assertEquals(ingestDocument.getFieldValue(fieldName, List.class), expectedResult);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testSortDoubles() throws Exception {
|
||||||
|
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random());
|
||||||
|
int numItems = randomIntBetween(1, 10);
|
||||||
|
List<Double> fieldValue = new ArrayList<>(numItems);
|
||||||
|
List<Double> expectedResult = new ArrayList<>(numItems);
|
||||||
|
for (int j = 0; j < numItems; j++) {
|
||||||
|
Double value = randomDoubleBetween(0.0, 100.0, true);
|
||||||
|
fieldValue.add(value);
|
||||||
|
expectedResult.add(value);
|
||||||
|
}
|
||||||
|
Collections.sort(expectedResult);
|
||||||
|
|
||||||
|
SortOrder order = randomBoolean() ? SortOrder.ASCENDING : SortOrder.DESCENDING;
|
||||||
|
if (order.equals(SortOrder.DESCENDING)) {
|
||||||
|
Collections.reverse(expectedResult);
|
||||||
|
}
|
||||||
|
|
||||||
|
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, fieldValue);
|
||||||
|
Processor processor = new SortProcessor(randomAsciiOfLength(10), fieldName, order);
|
||||||
|
processor.execute(ingestDocument);
|
||||||
|
assertEquals(ingestDocument.getFieldValue(fieldName, List.class), expectedResult);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testSortFloats() throws Exception {
|
||||||
|
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random());
|
||||||
|
int numItems = randomIntBetween(1, 10);
|
||||||
|
List<Float> fieldValue = new ArrayList<>(numItems);
|
||||||
|
List<Float> expectedResult = new ArrayList<>(numItems);
|
||||||
|
for (int j = 0; j < numItems; j++) {
|
||||||
|
Float value = randomFloat();
|
||||||
|
fieldValue.add(value);
|
||||||
|
expectedResult.add(value);
|
||||||
|
}
|
||||||
|
Collections.sort(expectedResult);
|
||||||
|
|
||||||
|
SortOrder order = randomBoolean() ? SortOrder.ASCENDING : SortOrder.DESCENDING;
|
||||||
|
if (order.equals(SortOrder.DESCENDING)) {
|
||||||
|
Collections.reverse(expectedResult);
|
||||||
|
}
|
||||||
|
|
||||||
|
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, fieldValue);
|
||||||
|
Processor processor = new SortProcessor(randomAsciiOfLength(10), fieldName, order);
|
||||||
|
processor.execute(ingestDocument);
|
||||||
|
assertEquals(ingestDocument.getFieldValue(fieldName, List.class), expectedResult);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testSortBytes() throws Exception {
|
||||||
|
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random());
|
||||||
|
int numItems = randomIntBetween(1, 10);
|
||||||
|
List<Byte> fieldValue = new ArrayList<>(numItems);
|
||||||
|
List<Byte> expectedResult = new ArrayList<>(numItems);
|
||||||
|
for (int j = 0; j < numItems; j++) {
|
||||||
|
Byte value = randomByte();
|
||||||
|
fieldValue.add(value);
|
||||||
|
expectedResult.add(value);
|
||||||
|
}
|
||||||
|
Collections.sort(expectedResult);
|
||||||
|
|
||||||
|
SortOrder order = randomBoolean() ? SortOrder.ASCENDING : SortOrder.DESCENDING;
|
||||||
|
if (order.equals(SortOrder.DESCENDING)) {
|
||||||
|
Collections.reverse(expectedResult);
|
||||||
|
}
|
||||||
|
|
||||||
|
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, fieldValue);
|
||||||
|
Processor processor = new SortProcessor(randomAsciiOfLength(10), fieldName, order);
|
||||||
|
processor.execute(ingestDocument);
|
||||||
|
assertEquals(ingestDocument.getFieldValue(fieldName, List.class), expectedResult);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testSortBooleans() throws Exception {
|
||||||
|
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random());
|
||||||
|
int numItems = randomIntBetween(1, 10);
|
||||||
|
List<Boolean> fieldValue = new ArrayList<>(numItems);
|
||||||
|
List<Boolean> expectedResult = new ArrayList<>(numItems);
|
||||||
|
for (int j = 0; j < numItems; j++) {
|
||||||
|
Boolean value = randomBoolean();
|
||||||
|
fieldValue.add(value);
|
||||||
|
expectedResult.add(value);
|
||||||
|
}
|
||||||
|
Collections.sort(expectedResult);
|
||||||
|
|
||||||
|
SortOrder order = randomBoolean() ? SortOrder.ASCENDING : SortOrder.DESCENDING;
|
||||||
|
if (order.equals(SortOrder.DESCENDING)) {
|
||||||
|
Collections.reverse(expectedResult);
|
||||||
|
}
|
||||||
|
|
||||||
|
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, fieldValue);
|
||||||
|
Processor processor = new SortProcessor(randomAsciiOfLength(10), fieldName, order);
|
||||||
|
processor.execute(ingestDocument);
|
||||||
|
assertEquals(ingestDocument.getFieldValue(fieldName, List.class), expectedResult);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testSortMixedStrings() throws Exception {
|
||||||
|
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random());
|
||||||
|
int numItems = randomIntBetween(1, 10);
|
||||||
|
List<String> fieldValue = new ArrayList<>(numItems);
|
||||||
|
List<String> expectedResult = new ArrayList<>(numItems);
|
||||||
|
String value;
|
||||||
|
for (int j = 0; j < numItems; j++) {
|
||||||
|
if (randomBoolean()) {
|
||||||
|
value = String.valueOf(randomIntBetween(0, 100));
|
||||||
|
} else {
|
||||||
|
value = randomAsciiOfLengthBetween(1, 10);
|
||||||
|
}
|
||||||
|
fieldValue.add(value);
|
||||||
|
expectedResult.add(value);
|
||||||
|
}
|
||||||
|
Collections.sort(expectedResult);
|
||||||
|
|
||||||
|
SortOrder order = randomBoolean() ? SortOrder.ASCENDING : SortOrder.DESCENDING;
|
||||||
|
if (order.equals(SortOrder.DESCENDING)) {
|
||||||
|
Collections.reverse(expectedResult);
|
||||||
|
}
|
||||||
|
|
||||||
|
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, fieldValue);
|
||||||
|
Processor processor = new SortProcessor(randomAsciiOfLength(10), fieldName, order);
|
||||||
|
processor.execute(ingestDocument);
|
||||||
|
assertEquals(ingestDocument.getFieldValue(fieldName, List.class), expectedResult);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testSortNonListField() throws Exception {
|
||||||
|
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>());
|
||||||
|
String fieldName = RandomDocumentPicks.randomFieldName(random());
|
||||||
|
ingestDocument.setFieldValue(fieldName, randomAsciiOfLengthBetween(1, 10));
|
||||||
|
SortOrder order = randomBoolean() ? SortOrder.ASCENDING : SortOrder.DESCENDING;
|
||||||
|
Processor processor = new SortProcessor(randomAsciiOfLength(10), fieldName, order);
|
||||||
|
try {
|
||||||
|
processor.execute(ingestDocument);
|
||||||
|
} catch(IllegalArgumentException e) {
|
||||||
|
assertThat(e.getMessage(), equalTo("field [" + fieldName + "] of type [java.lang.String] cannot be cast to [java.util.List]"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testSortNonExistingField() throws Exception {
|
||||||
|
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>());
|
||||||
|
String fieldName = RandomDocumentPicks.randomFieldName(random());
|
||||||
|
SortOrder order = randomBoolean() ? SortOrder.ASCENDING : SortOrder.DESCENDING;
|
||||||
|
Processor processor = new SortProcessor(randomAsciiOfLength(10), fieldName, order);
|
||||||
|
try {
|
||||||
|
processor.execute(ingestDocument);
|
||||||
|
} catch(IllegalArgumentException e) {
|
||||||
|
assertThat(e.getMessage(), containsString("not present as part of path [" + fieldName + "]"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testSortNullValue() throws Exception {
|
||||||
|
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.singletonMap("field", null));
|
||||||
|
SortOrder order = randomBoolean() ? SortOrder.ASCENDING : SortOrder.DESCENDING;
|
||||||
|
Processor processor = new SortProcessor(randomAsciiOfLength(10), "field", order);
|
||||||
|
try {
|
||||||
|
processor.execute(ingestDocument);
|
||||||
|
} catch(IllegalArgumentException e) {
|
||||||
|
assertThat(e.getMessage(), equalTo("field [field] is null, cannot sort."));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,136 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.monitor.jvm;
|
||||||
|
|
||||||
|
import org.elasticsearch.common.logging.ESLogger;
|
||||||
|
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||||
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
|
import org.elasticsearch.test.ESTestCase;
|
||||||
|
|
||||||
|
import static org.mockito.Mockito.mock;
|
||||||
|
import static org.mockito.Mockito.verify;
|
||||||
|
import static org.mockito.Mockito.verifyNoMoreInteractions;
|
||||||
|
import static org.mockito.Mockito.when;
|
||||||
|
|
||||||
|
public class JvmGcMonitorServiceTests extends ESTestCase {
|
||||||
|
|
||||||
|
public void testSlowGcLogging() {
|
||||||
|
final ESLogger logger = mock(ESLogger.class);
|
||||||
|
when(logger.isWarnEnabled()).thenReturn(true);
|
||||||
|
when(logger.isInfoEnabled()).thenReturn(true);
|
||||||
|
when(logger.isDebugEnabled()).thenReturn(true);
|
||||||
|
final JvmGcMonitorService.JvmMonitor.Threshold threshold = randomFrom(JvmGcMonitorService.JvmMonitor.Threshold.values());
|
||||||
|
final String name = randomAsciiOfLength(16);
|
||||||
|
final long seq = randomIntBetween(1, 1 << 30);
|
||||||
|
final int elapsedValue = randomIntBetween(1, 1 << 10);
|
||||||
|
final long totalCollectionCount = randomIntBetween(1, 16);
|
||||||
|
final long currentCollectionCount = randomIntBetween(1, 16);
|
||||||
|
final TimeValue totalCollectionTime = TimeValue.timeValueMillis(randomIntBetween(1, elapsedValue));
|
||||||
|
final TimeValue currentCollectionTime = TimeValue.timeValueMillis(randomIntBetween(1, elapsedValue));
|
||||||
|
|
||||||
|
final ByteSizeValue lastHeapUsed = new ByteSizeValue(randomIntBetween(1, 1 << 10));
|
||||||
|
JvmStats lastJvmStats = mock(JvmStats.class);
|
||||||
|
JvmStats.Mem lastMem = mock(JvmStats.Mem.class);
|
||||||
|
when(lastMem.getHeapUsed()).thenReturn(lastHeapUsed);
|
||||||
|
when(lastJvmStats.getMem()).thenReturn(lastMem);
|
||||||
|
when(lastJvmStats.toString()).thenReturn("last");
|
||||||
|
|
||||||
|
final ByteSizeValue currentHeapUsed = new ByteSizeValue(randomIntBetween(1, 1 << 10));
|
||||||
|
JvmStats currentJvmStats = mock(JvmStats.class);
|
||||||
|
JvmStats.Mem currentMem = mock(JvmStats.Mem.class);
|
||||||
|
when(currentMem.getHeapUsed()).thenReturn(currentHeapUsed);
|
||||||
|
when(currentJvmStats.getMem()).thenReturn(currentMem);
|
||||||
|
when(currentJvmStats.toString()).thenReturn("current");
|
||||||
|
|
||||||
|
JvmStats.GarbageCollector gc = mock(JvmStats.GarbageCollector.class);
|
||||||
|
when(gc.getName()).thenReturn(name);
|
||||||
|
when(gc.getCollectionCount()).thenReturn(totalCollectionCount);
|
||||||
|
when(gc.getCollectionTime()).thenReturn(totalCollectionTime);
|
||||||
|
|
||||||
|
final ByteSizeValue maxHeapUsed = new ByteSizeValue(Math.max(lastHeapUsed.bytes(), currentHeapUsed.bytes()) + 1 << 10);
|
||||||
|
|
||||||
|
JvmGcMonitorService.JvmMonitor.SlowGcEvent slowGcEvent = new JvmGcMonitorService.JvmMonitor.SlowGcEvent(
|
||||||
|
gc,
|
||||||
|
currentCollectionCount,
|
||||||
|
currentCollectionTime,
|
||||||
|
elapsedValue,
|
||||||
|
lastJvmStats,
|
||||||
|
currentJvmStats,
|
||||||
|
maxHeapUsed);
|
||||||
|
|
||||||
|
JvmGcMonitorService.logSlowGc(logger, threshold, seq, slowGcEvent, (l, c) -> l.toString() + ", " + c.toString());
|
||||||
|
|
||||||
|
switch (threshold) {
|
||||||
|
case WARN:
|
||||||
|
verify(logger).isWarnEnabled();
|
||||||
|
verify(logger).warn(
|
||||||
|
"[gc][{}][{}][{}] duration [{}], collections [{}]/[{}], total [{}]/[{}], memory [{}]->[{}]/[{}], all_pools {}",
|
||||||
|
name,
|
||||||
|
seq,
|
||||||
|
totalCollectionCount,
|
||||||
|
currentCollectionTime,
|
||||||
|
currentCollectionCount,
|
||||||
|
TimeValue.timeValueMillis(elapsedValue),
|
||||||
|
currentCollectionTime,
|
||||||
|
totalCollectionTime,
|
||||||
|
lastHeapUsed,
|
||||||
|
currentHeapUsed,
|
||||||
|
maxHeapUsed,
|
||||||
|
"last, current");
|
||||||
|
break;
|
||||||
|
case INFO:
|
||||||
|
verify(logger).isInfoEnabled();
|
||||||
|
verify(logger).info(
|
||||||
|
"[gc][{}][{}][{}] duration [{}], collections [{}]/[{}], total [{}]/[{}], memory [{}]->[{}]/[{}], all_pools {}",
|
||||||
|
name,
|
||||||
|
seq,
|
||||||
|
totalCollectionCount,
|
||||||
|
currentCollectionTime,
|
||||||
|
currentCollectionCount,
|
||||||
|
TimeValue.timeValueMillis(elapsedValue),
|
||||||
|
currentCollectionTime,
|
||||||
|
totalCollectionTime,
|
||||||
|
lastHeapUsed,
|
||||||
|
currentHeapUsed,
|
||||||
|
maxHeapUsed,
|
||||||
|
"last, current");
|
||||||
|
break;
|
||||||
|
case DEBUG:
|
||||||
|
verify(logger).isDebugEnabled();
|
||||||
|
verify(logger).debug(
|
||||||
|
"[gc][{}][{}][{}] duration [{}], collections [{}]/[{}], total [{}]/[{}], memory [{}]->[{}]/[{}], all_pools {}",
|
||||||
|
name,
|
||||||
|
seq,
|
||||||
|
totalCollectionCount,
|
||||||
|
currentCollectionTime,
|
||||||
|
currentCollectionCount,
|
||||||
|
TimeValue.timeValueMillis(elapsedValue),
|
||||||
|
currentCollectionTime,
|
||||||
|
totalCollectionTime,
|
||||||
|
lastHeapUsed,
|
||||||
|
currentHeapUsed,
|
||||||
|
maxHeapUsed,
|
||||||
|
"last, current");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
verifyNoMoreInteractions(logger);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,248 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.monitor.jvm;
|
||||||
|
|
||||||
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
|
import org.elasticsearch.test.ESTestCase;
|
||||||
|
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
|
import java.util.concurrent.atomic.AtomicLong;
|
||||||
|
import java.util.concurrent.atomic.AtomicReference;
|
||||||
|
|
||||||
|
import static org.hamcrest.CoreMatchers.anyOf;
|
||||||
|
import static org.hamcrest.CoreMatchers.containsString;
|
||||||
|
import static org.hamcrest.CoreMatchers.equalTo;
|
||||||
|
import static org.hamcrest.CoreMatchers.instanceOf;
|
||||||
|
import static org.hamcrest.Matchers.hasToString;
|
||||||
|
import static org.mockito.Mockito.mock;
|
||||||
|
import static org.mockito.Mockito.when;
|
||||||
|
|
||||||
|
public class JvmMonitorTests extends ESTestCase {
|
||||||
|
|
||||||
|
public void testMonitorFailure() {
|
||||||
|
AtomicBoolean shouldFail = new AtomicBoolean();
|
||||||
|
AtomicBoolean invoked = new AtomicBoolean();
|
||||||
|
JvmGcMonitorService.JvmMonitor monitor = new JvmGcMonitorService.JvmMonitor(Collections.emptyMap()) {
|
||||||
|
@Override
|
||||||
|
void onMonitorFailure(Throwable t) {
|
||||||
|
invoked.set(true);
|
||||||
|
assertThat(t, instanceOf(RuntimeException.class));
|
||||||
|
assertThat(t, hasToString(containsString("simulated")));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
synchronized void monitorLongGc() {
|
||||||
|
if (shouldFail.get()) {
|
||||||
|
throw new RuntimeException("simulated");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
void onSlowGc(final Threshold threshold, final long seq, final SlowGcEvent slowGcEvent) {
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
monitor.run();
|
||||||
|
assertFalse(invoked.get());
|
||||||
|
|
||||||
|
shouldFail.set(true);
|
||||||
|
monitor.run();
|
||||||
|
assertTrue(invoked.get());
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testSlowGc() {
|
||||||
|
|
||||||
|
final int initialYoungCollectionCount = randomIntBetween(1, 4);
|
||||||
|
final int initialYoungCollectionTime = randomIntBetween(initialYoungCollectionCount * 100, initialYoungCollectionCount * 200);
|
||||||
|
|
||||||
|
final int initialOldCollectionCount = randomIntBetween(1, 4);
|
||||||
|
final int initialOldCollectionTime = randomIntBetween(initialYoungCollectionCount * 1000, initialYoungCollectionCount * 2000);
|
||||||
|
|
||||||
|
final JvmStats.GarbageCollector initialYoungCollector = mock(JvmStats.GarbageCollector.class);
|
||||||
|
when(initialYoungCollector.getName()).thenReturn("young");
|
||||||
|
when(initialYoungCollector.getCollectionCount()).thenReturn((long) initialYoungCollectionCount);
|
||||||
|
when(initialYoungCollector.getCollectionTime()).thenReturn(TimeValue.timeValueMillis(initialYoungCollectionTime));
|
||||||
|
|
||||||
|
final JvmStats.GarbageCollector initialOldCollector = mock(JvmStats.GarbageCollector.class);
|
||||||
|
when(initialOldCollector.getName()).thenReturn("old");
|
||||||
|
when(initialOldCollector.getCollectionCount()).thenReturn((long) initialOldCollectionCount);
|
||||||
|
when(initialOldCollector.getCollectionTime()).thenReturn(TimeValue.timeValueMillis(initialOldCollectionTime));
|
||||||
|
JvmStats initialJvmStats = jvmStats(initialYoungCollector, initialOldCollector);
|
||||||
|
|
||||||
|
final Map<String, JvmGcMonitorService.GcThreshold> gcThresholds = new HashMap<>();
|
||||||
|
|
||||||
|
// fake debug threshold, info will be double this and warn will
|
||||||
|
// be triple
|
||||||
|
final int youngDebugThreshold = randomIntBetween(1, 10) * 100;
|
||||||
|
gcThresholds.put(
|
||||||
|
"young",
|
||||||
|
new JvmGcMonitorService.GcThreshold("young", youngDebugThreshold * 3, youngDebugThreshold * 2, youngDebugThreshold));
|
||||||
|
|
||||||
|
final boolean youngGcThreshold = randomBoolean();
|
||||||
|
final JvmGcMonitorService.JvmMonitor.Threshold youngThresholdLevel = randomFrom(JvmGcMonitorService.JvmMonitor.Threshold.values());
|
||||||
|
final int youngMultiplier = 1 + youngThresholdLevel.ordinal();
|
||||||
|
final int youngCollections = randomIntBetween(1, 4);
|
||||||
|
|
||||||
|
final JvmStats.GarbageCollector youngCollector;
|
||||||
|
youngCollector = mock(JvmStats.GarbageCollector.class);
|
||||||
|
when(youngCollector.getName()).thenReturn("young");
|
||||||
|
when(youngCollector.getCollectionCount()).thenReturn((long) (initialYoungCollectionCount + youngCollections));
|
||||||
|
|
||||||
|
final int youngIncrement;
|
||||||
|
if (youngGcThreshold) {
|
||||||
|
// we are faking that youngCollections collections occurred
|
||||||
|
// this number is chosen so that we squeak over the
|
||||||
|
// random threshold when computing the average collection
|
||||||
|
// time: note that average collection time will just be
|
||||||
|
// youngMultiplier * youngDebugThreshold + 1 which ensures
|
||||||
|
// that we are over the right threshold but below the next
|
||||||
|
// threshold
|
||||||
|
youngIncrement = youngCollections * youngMultiplier * youngDebugThreshold + youngCollections;
|
||||||
|
} else {
|
||||||
|
// fake that we did not exceed the threshold
|
||||||
|
youngIncrement = randomIntBetween(1, youngDebugThreshold);
|
||||||
|
}
|
||||||
|
when(youngCollector.getCollectionTime()).thenReturn(TimeValue.timeValueMillis(initialYoungCollectionTime + youngIncrement));
|
||||||
|
|
||||||
|
// fake debug threshold, info will be double this and warn will
|
||||||
|
// be triple
|
||||||
|
final int oldDebugThreshold = randomIntBetween(1, 10) * 100;
|
||||||
|
gcThresholds.put(
|
||||||
|
"old",
|
||||||
|
new JvmGcMonitorService.GcThreshold("old", oldDebugThreshold * 3, oldDebugThreshold * 2, oldDebugThreshold));
|
||||||
|
|
||||||
|
final boolean oldGcThreshold = randomBoolean();
|
||||||
|
final JvmGcMonitorService.JvmMonitor.Threshold oldThresholdLevel = randomFrom(JvmGcMonitorService.JvmMonitor.Threshold.values());
|
||||||
|
final int oldMultiplier = 1 + oldThresholdLevel.ordinal();
|
||||||
|
final int oldCollections = randomIntBetween(1, 4);
|
||||||
|
|
||||||
|
final JvmStats.GarbageCollector oldCollector = mock(JvmStats.GarbageCollector.class);
|
||||||
|
when(oldCollector.getName()).thenReturn("old");
|
||||||
|
when(oldCollector.getCollectionCount()).thenReturn((long) (initialOldCollectionCount + oldCollections));
|
||||||
|
final int oldIncrement;
|
||||||
|
if (oldGcThreshold) {
|
||||||
|
// we are faking that oldCollections collections occurred
|
||||||
|
// this number is chosen so that we squeak over the
|
||||||
|
// random threshold when computing the average collection
|
||||||
|
// time: note that average collection time will just be
|
||||||
|
// oldMultiplier * oldDebugThreshold + 1 which ensures
|
||||||
|
// that we are over the right threshold but below the next
|
||||||
|
// threshold
|
||||||
|
oldIncrement = oldCollections * oldMultiplier * oldDebugThreshold + oldCollections;
|
||||||
|
} else {
|
||||||
|
// fake that we did not exceed the threshold
|
||||||
|
oldIncrement = randomIntBetween(1, oldDebugThreshold);
|
||||||
|
}
|
||||||
|
when(oldCollector.getCollectionTime()).thenReturn(TimeValue.timeValueMillis(initialOldCollectionTime + oldIncrement));
|
||||||
|
|
||||||
|
final long start = randomIntBetween(1, 1 << 30);
|
||||||
|
final long expectedElapsed = randomIntBetween(1, 1000);
|
||||||
|
final AtomicLong now = new AtomicLong(start);
|
||||||
|
|
||||||
|
final AtomicReference<JvmStats> jvmStats = new AtomicReference<>();
|
||||||
|
jvmStats.set(initialJvmStats);
|
||||||
|
|
||||||
|
final AtomicInteger count = new AtomicInteger();
|
||||||
|
|
||||||
|
JvmGcMonitorService.JvmMonitor monitor = new JvmGcMonitorService.JvmMonitor(gcThresholds) {
|
||||||
|
@Override
|
||||||
|
void onMonitorFailure(Throwable t) {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
void onSlowGc(final Threshold threshold, final long seq, final SlowGcEvent slowGcEvent) {
|
||||||
|
count.incrementAndGet();
|
||||||
|
assertThat(seq, equalTo(1L));
|
||||||
|
assertThat(slowGcEvent.elapsed, equalTo(expectedElapsed));
|
||||||
|
assertThat(slowGcEvent.currentGc.getName(), anyOf(equalTo("young"), equalTo("old")));
|
||||||
|
if ("young".equals(slowGcEvent.currentGc.getName())) {
|
||||||
|
assertCollection(
|
||||||
|
threshold,
|
||||||
|
youngThresholdLevel,
|
||||||
|
slowGcEvent,
|
||||||
|
initialYoungCollectionCount,
|
||||||
|
youngCollections,
|
||||||
|
initialYoungCollectionTime,
|
||||||
|
youngIncrement);
|
||||||
|
} else if ("old".equals(slowGcEvent.currentGc.getName())) {
|
||||||
|
assertCollection(
|
||||||
|
threshold,
|
||||||
|
oldThresholdLevel,
|
||||||
|
slowGcEvent,
|
||||||
|
initialOldCollectionCount,
|
||||||
|
oldCollections,
|
||||||
|
initialOldCollectionTime,
|
||||||
|
oldIncrement);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
long now() {
|
||||||
|
return now.get();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
JvmStats jvmStats() {
|
||||||
|
return jvmStats.get();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
final JvmStats monitorJvmStats = jvmStats(youngCollector, oldCollector);
|
||||||
|
|
||||||
|
now.set(start + TimeUnit.NANOSECONDS.convert(expectedElapsed, TimeUnit.MILLISECONDS));
|
||||||
|
jvmStats.set(monitorJvmStats);
|
||||||
|
monitor.monitorLongGc();
|
||||||
|
|
||||||
|
assertThat(count.get(), equalTo((youngGcThreshold ? 1 : 0) + (oldGcThreshold ? 1 : 0)));
|
||||||
|
}
|
||||||
|
|
||||||
|
private void assertCollection(
|
||||||
|
final JvmGcMonitorService.JvmMonitor.Threshold actualThreshold,
|
||||||
|
final JvmGcMonitorService.JvmMonitor.Threshold expectedThreshold,
|
||||||
|
final JvmGcMonitorService.JvmMonitor.SlowGcEvent slowGcEvent,
|
||||||
|
final int initialCollectionCount,
|
||||||
|
final int collections,
|
||||||
|
final int initialCollectionTime,
|
||||||
|
final int increment) {
|
||||||
|
assertThat(actualThreshold, equalTo(expectedThreshold));
|
||||||
|
assertThat(slowGcEvent.currentGc.getCollectionCount(), equalTo((long) (initialCollectionCount + collections)));
|
||||||
|
assertThat(slowGcEvent.collectionCount, equalTo((long) collections));
|
||||||
|
assertThat(slowGcEvent.collectionTime, equalTo(TimeValue.timeValueMillis(increment)));
|
||||||
|
assertThat(slowGcEvent.currentGc.getCollectionTime(), equalTo(TimeValue.timeValueMillis(initialCollectionTime + increment)));
|
||||||
|
}
|
||||||
|
|
||||||
|
private JvmStats jvmStats(JvmStats.GarbageCollector youngCollector, JvmStats.GarbageCollector oldCollector) {
|
||||||
|
final JvmStats jvmStats = mock(JvmStats.class);
|
||||||
|
final JvmStats.GarbageCollectors initialGcs = mock(JvmStats.GarbageCollectors.class);
|
||||||
|
final JvmStats.GarbageCollector[] initialCollectors = new JvmStats.GarbageCollector[2];
|
||||||
|
initialCollectors[0] = youngCollector;
|
||||||
|
initialCollectors[1] = oldCollector;
|
||||||
|
when(initialGcs.getCollectors()).thenReturn(initialCollectors);
|
||||||
|
when(jvmStats.getGc()).thenReturn(initialGcs);
|
||||||
|
when(jvmStats.getMem()).thenReturn(JvmStats.jvmStats().getMem());
|
||||||
|
return jvmStats;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -17,7 +17,7 @@
|
||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.elasticsearch.search.profile;
|
package org.elasticsearch.search.profile.query;
|
||||||
|
|
||||||
import org.apache.lucene.document.Document;
|
import org.apache.lucene.document.Document;
|
||||||
import org.apache.lucene.document.Field.Store;
|
import org.apache.lucene.document.Field.Store;
|
||||||
|
@ -37,6 +37,9 @@ import org.apache.lucene.util.IOUtils;
|
||||||
import org.apache.lucene.util.TestUtil;
|
import org.apache.lucene.util.TestUtil;
|
||||||
import org.elasticsearch.index.engine.Engine;
|
import org.elasticsearch.index.engine.Engine;
|
||||||
import org.elasticsearch.search.internal.ContextIndexSearcher;
|
import org.elasticsearch.search.internal.ContextIndexSearcher;
|
||||||
|
import org.elasticsearch.search.profile.ProfileResult;
|
||||||
|
import org.elasticsearch.search.profile.query.QueryProfiler;
|
||||||
|
import org.elasticsearch.search.profile.query.QueryTimingType;
|
||||||
import org.elasticsearch.test.ESTestCase;
|
import org.elasticsearch.test.ESTestCase;
|
||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
import org.junit.BeforeClass;
|
import org.junit.BeforeClass;
|
|
@ -17,7 +17,7 @@
|
||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.elasticsearch.search.profile;
|
package org.elasticsearch.search.profile.query;
|
||||||
|
|
||||||
import org.apache.lucene.util.English;
|
import org.apache.lucene.util.English;
|
||||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||||
|
@ -29,6 +29,9 @@ import org.elasticsearch.action.search.ShardSearchFailure;
|
||||||
import org.elasticsearch.index.query.QueryBuilder;
|
import org.elasticsearch.index.query.QueryBuilder;
|
||||||
import org.elasticsearch.index.query.QueryBuilders;
|
import org.elasticsearch.index.query.QueryBuilders;
|
||||||
import org.elasticsearch.search.SearchHit;
|
import org.elasticsearch.search.SearchHit;
|
||||||
|
import org.elasticsearch.search.profile.ProfileResult;
|
||||||
|
import org.elasticsearch.search.profile.ProfileShardResult;
|
||||||
|
import org.elasticsearch.search.profile.query.CollectorResult;
|
||||||
import org.elasticsearch.search.sort.SortOrder;
|
import org.elasticsearch.search.sort.SortOrder;
|
||||||
import org.elasticsearch.test.ESIntegTestCase;
|
import org.elasticsearch.test.ESIntegTestCase;
|
||||||
|
|
||||||
|
@ -36,7 +39,7 @@ import java.util.Arrays;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
import static org.elasticsearch.search.profile.RandomQueryGenerator.randomQueryBuilder;
|
import static org.elasticsearch.search.profile.query.RandomQueryGenerator.randomQueryBuilder;
|
||||||
import static org.elasticsearch.test.hamcrest.DoubleMatcher.nearlyEqual;
|
import static org.elasticsearch.test.hamcrest.DoubleMatcher.nearlyEqual;
|
||||||
import static org.hamcrest.Matchers.equalTo;
|
import static org.hamcrest.Matchers.equalTo;
|
||||||
import static org.hamcrest.Matchers.greaterThan;
|
import static org.hamcrest.Matchers.greaterThan;
|
||||||
|
@ -160,7 +163,8 @@ public class QueryProfilerIT extends ESIntegTestCase {
|
||||||
nearlyEqual(vanillaMaxScore, profileMaxScore, 0.001));
|
nearlyEqual(vanillaMaxScore, profileMaxScore, 0.001));
|
||||||
}
|
}
|
||||||
|
|
||||||
assertThat("Profile totalHits of [" + profileResponse.getHits().totalHits() + "] is not close to Vanilla totalHits [" + vanillaResponse.getHits().totalHits() + "]",
|
assertThat("Profile totalHits of [" + profileResponse.getHits().totalHits() + "] is not close to Vanilla totalHits ["
|
||||||
|
+ vanillaResponse.getHits().totalHits() + "]",
|
||||||
vanillaResponse.getHits().getTotalHits(), equalTo(profileResponse.getHits().getTotalHits()));
|
vanillaResponse.getHits().getTotalHits(), equalTo(profileResponse.getHits().getTotalHits()));
|
||||||
|
|
||||||
SearchHit[] vanillaHits = vanillaResponse.getHits().getHits();
|
SearchHit[] vanillaHits = vanillaResponse.getHits().getHits();
|
||||||
|
@ -237,7 +241,8 @@ public class QueryProfilerIT extends ESIntegTestCase {
|
||||||
|
|
||||||
indexRandom(true, docs);
|
indexRandom(true, docs);
|
||||||
|
|
||||||
QueryBuilder q = QueryBuilders.boolQuery().must(QueryBuilders.matchQuery("field1", "one")).must(QueryBuilders.matchQuery("field1", "two"));
|
QueryBuilder q = QueryBuilders.boolQuery().must(QueryBuilders.matchQuery("field1", "one"))
|
||||||
|
.must(QueryBuilders.matchQuery("field1", "two"));
|
||||||
|
|
||||||
SearchResponse resp = client().prepareSearch()
|
SearchResponse resp = client().prepareSearch()
|
||||||
.setQuery(q)
|
.setQuery(q)
|
||||||
|
@ -355,7 +360,8 @@ public class QueryProfilerIT extends ESIntegTestCase {
|
||||||
|
|
||||||
refresh();
|
refresh();
|
||||||
|
|
||||||
QueryBuilder q = QueryBuilders.boolQuery().must(QueryBuilders.boolQuery().must(QueryBuilders.boolQuery().must(QueryBuilders.matchQuery("field1", "one"))));
|
QueryBuilder q = QueryBuilders.boolQuery()
|
||||||
|
.must(QueryBuilders.boolQuery().must(QueryBuilders.boolQuery().must(QueryBuilders.matchQuery("field1", "one"))));
|
||||||
|
|
||||||
logger.info("Query: {}", q);
|
logger.info("Query: {}", q);
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.elasticsearch.search.profile;
|
package org.elasticsearch.search.profile.query;
|
||||||
|
|
||||||
import org.apache.lucene.util.English;
|
import org.apache.lucene.util.English;
|
||||||
import org.elasticsearch.common.unit.Fuzziness;
|
import org.elasticsearch.common.unit.Fuzziness;
|
|
@ -33,7 +33,10 @@ That will return something like this:
|
||||||
"batches": 1,
|
"batches": 1,
|
||||||
"version_conflicts": 0,
|
"version_conflicts": 0,
|
||||||
"noops": 0,
|
"noops": 0,
|
||||||
"retries": 0,
|
"retries": {
|
||||||
|
"bulk": 0,
|
||||||
|
"search": 0
|
||||||
|
},
|
||||||
"throttled_millis": 0,
|
"throttled_millis": 0,
|
||||||
"requests_per_second": "unlimited",
|
"requests_per_second": "unlimited",
|
||||||
"throttled_until_millis": 0,
|
"throttled_until_millis": 0,
|
||||||
|
@ -386,7 +389,10 @@ The JSON response looks like this:
|
||||||
"created": 123,
|
"created": 123,
|
||||||
"batches": 1,
|
"batches": 1,
|
||||||
"version_conflicts": 2,
|
"version_conflicts": 2,
|
||||||
"retries": 0,
|
"retries": {
|
||||||
|
"bulk": 0,
|
||||||
|
"search": 0
|
||||||
|
}
|
||||||
"throttled_millis": 0,
|
"throttled_millis": 0,
|
||||||
"failures" : [ ]
|
"failures" : [ ]
|
||||||
}
|
}
|
||||||
|
@ -414,7 +420,8 @@ The number of version conflicts that reindex hit.
|
||||||
|
|
||||||
`retries`::
|
`retries`::
|
||||||
|
|
||||||
The number of retries that the reindex did in response to a full queue.
|
The number of retries attempted by reindex. `bulk` is the number of bulk
|
||||||
|
actions retried and `search` is the number of search actions retried.
|
||||||
|
|
||||||
`throttled_millis`::
|
`throttled_millis`::
|
||||||
|
|
||||||
|
@ -468,7 +475,10 @@ The responses looks like:
|
||||||
"batches" : 4,
|
"batches" : 4,
|
||||||
"version_conflicts" : 0,
|
"version_conflicts" : 0,
|
||||||
"noops" : 0,
|
"noops" : 0,
|
||||||
"retries": 0,
|
"retries": {
|
||||||
|
"bulk": 0,
|
||||||
|
"search": 0
|
||||||
|
},
|
||||||
"throttled_millis": 0
|
"throttled_millis": 0
|
||||||
},
|
},
|
||||||
"description" : ""
|
"description" : ""
|
||||||
|
|
|
@ -26,7 +26,10 @@ That will return something like this:
|
||||||
"batches": 1,
|
"batches": 1,
|
||||||
"version_conflicts": 0,
|
"version_conflicts": 0,
|
||||||
"noops": 0,
|
"noops": 0,
|
||||||
"retries": 0,
|
"retries": {
|
||||||
|
"bulk": 0,
|
||||||
|
"search": 0
|
||||||
|
},
|
||||||
"throttled_millis": 0,
|
"throttled_millis": 0,
|
||||||
"requests_per_second": "unlimited",
|
"requests_per_second": "unlimited",
|
||||||
"throttled_until_millis": 0,
|
"throttled_until_millis": 0,
|
||||||
|
@ -220,7 +223,10 @@ The JSON response looks like this:
|
||||||
"updated": 0,
|
"updated": 0,
|
||||||
"batches": 1,
|
"batches": 1,
|
||||||
"version_conflicts": 2,
|
"version_conflicts": 2,
|
||||||
"retries": 0,
|
"retries": {
|
||||||
|
"bulk": 0,
|
||||||
|
"search": 0
|
||||||
|
}
|
||||||
"throttled_millis": 0,
|
"throttled_millis": 0,
|
||||||
"failures" : [ ]
|
"failures" : [ ]
|
||||||
}
|
}
|
||||||
|
@ -244,7 +250,8 @@ The number of version conflicts that the update by query hit.
|
||||||
|
|
||||||
`retries`::
|
`retries`::
|
||||||
|
|
||||||
The number of retries that the update by query did in response to a full queue.
|
The number of retries attempted by update-by-query. `bulk` is the number of bulk
|
||||||
|
actions retried and `search` is the number of search actions retried.
|
||||||
|
|
||||||
`throttled_millis`::
|
`throttled_millis`::
|
||||||
|
|
||||||
|
@ -299,7 +306,10 @@ The responses looks like:
|
||||||
"batches" : 4,
|
"batches" : 4,
|
||||||
"version_conflicts" : 0,
|
"version_conflicts" : 0,
|
||||||
"noops" : 0,
|
"noops" : 0,
|
||||||
"retries": 0,
|
"retries": {
|
||||||
|
"bulk": 0,
|
||||||
|
"search": 0
|
||||||
|
}
|
||||||
"throttled_millis": 0
|
"throttled_millis": 0
|
||||||
},
|
},
|
||||||
"description" : ""
|
"description" : ""
|
||||||
|
|
|
@ -1282,6 +1282,31 @@ Splits a field into an array using a separator character. Only works on string f
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
<1> Treat all consecutive whitespace characters as a single separator
|
<1> Treat all consecutive whitespace characters as a single separator
|
||||||
|
|
||||||
|
[[sort-processor]]
|
||||||
|
=== Sort Processor
|
||||||
|
Sorts the elements of an array ascending or descending. Homogeneous arrays of numbers will be sorted
|
||||||
|
numerically, while arrays of strings or heterogeneous arrays of strings + numbers will be sorted lexicographically.
|
||||||
|
Throws an error when the field is not an array.
|
||||||
|
|
||||||
|
[[sort-options]]
|
||||||
|
.Sort Options
|
||||||
|
[options="header"]
|
||||||
|
|======
|
||||||
|
| Name | Required | Default | Description
|
||||||
|
| `field` | yes | - | The field to be sorted
|
||||||
|
| `order` | no | `"asc"` | The sort order to use. Accepts `"asc"` or `"desc"`.
|
||||||
|
|======
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
--------------------------------------------------
|
||||||
|
{
|
||||||
|
"sort": {
|
||||||
|
"field": "field_to_sort",
|
||||||
|
"order": "desc"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
--------------------------------------------------
|
||||||
|
|
||||||
[[trim-processor]]
|
[[trim-processor]]
|
||||||
=== Trim Processor
|
=== Trim Processor
|
||||||
Trims whitespace from field.
|
Trims whitespace from field.
|
||||||
|
|
|
@ -34,6 +34,7 @@ way to do this is to upgrade to Elasticsearch 2.3 or later and to use the
|
||||||
* <<breaking_50_percolator>>
|
* <<breaking_50_percolator>>
|
||||||
* <<breaking_50_suggester>>
|
* <<breaking_50_suggester>>
|
||||||
* <<breaking_50_index_apis>>
|
* <<breaking_50_index_apis>>
|
||||||
|
* <<breaking_50_document_api_changes>>
|
||||||
* <<breaking_50_settings_changes>>
|
* <<breaking_50_settings_changes>>
|
||||||
* <<breaking_50_allocation>>
|
* <<breaking_50_allocation>>
|
||||||
* <<breaking_50_http_changes>>
|
* <<breaking_50_http_changes>>
|
||||||
|
|
|
@ -0,0 +1,33 @@
|
||||||
|
[[breaking_50_document_api_changes]]
|
||||||
|
=== Document API changes
|
||||||
|
|
||||||
|
==== Reindex and Update By Query
|
||||||
|
Before 5.0.0 `_reindex` and `_update_by_query` only retried bulk failures so
|
||||||
|
they used the following response format:
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
----------------------
|
||||||
|
{
|
||||||
|
...
|
||||||
|
"retries": 10
|
||||||
|
...
|
||||||
|
}
|
||||||
|
----------------------
|
||||||
|
|
||||||
|
Where `retries` counts the number of bulk retries. Now they retry on search
|
||||||
|
failures as well and use this response format:
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
----------------------
|
||||||
|
{
|
||||||
|
...
|
||||||
|
"retries": {
|
||||||
|
"bulk": 10,
|
||||||
|
"search": 1
|
||||||
|
}
|
||||||
|
...
|
||||||
|
}
|
||||||
|
----------------------
|
||||||
|
|
||||||
|
Where `bulk` counts the number of bulk retries and `search` counts the number
|
||||||
|
of search retries.
|
|
@ -52,7 +52,7 @@ with a timeout defaulting at 20 times the ping timeout.
|
||||||
|
|
||||||
When the master node stops or has encountered a problem, the cluster nodes
|
When the master node stops or has encountered a problem, the cluster nodes
|
||||||
start pinging again and will elect a new master. This pinging round also
|
start pinging again and will elect a new master. This pinging round also
|
||||||
serves as a protection against (partial) network failures where node may unjustly
|
serves as a protection against (partial) network failures where a node may unjustly
|
||||||
think that the master has failed. In this case the node will simply hear from
|
think that the master has failed. In this case the node will simply hear from
|
||||||
other nodes about the currently active master.
|
other nodes about the currently active master.
|
||||||
|
|
||||||
|
|
|
@ -189,7 +189,7 @@ POST hockey/player/1/_update
|
||||||
{
|
{
|
||||||
"script": {
|
"script": {
|
||||||
"lang": "painless",
|
"lang": "painless",
|
||||||
"inline": "ctx._source.last = params.last ctx._source.nick = params.nick",
|
"inline": "ctx._source.last = params.last; ctx._source.nick = params.nick",
|
||||||
"params": {
|
"params": {
|
||||||
"last": "gaudreau",
|
"last": "gaudreau",
|
||||||
"nick": "hockey"
|
"nick": "hockey"
|
||||||
|
|
|
@ -22,6 +22,7 @@
|
||||||
- match: { nodes.$master.ingest.processors.10.type: remove }
|
- match: { nodes.$master.ingest.processors.10.type: remove }
|
||||||
- match: { nodes.$master.ingest.processors.11.type: rename }
|
- match: { nodes.$master.ingest.processors.11.type: rename }
|
||||||
- match: { nodes.$master.ingest.processors.12.type: set }
|
- match: { nodes.$master.ingest.processors.12.type: set }
|
||||||
- match: { nodes.$master.ingest.processors.13.type: split }
|
- match: { nodes.$master.ingest.processors.13.type: sort }
|
||||||
- match: { nodes.$master.ingest.processors.14.type: trim }
|
- match: { nodes.$master.ingest.processors.14.type: split }
|
||||||
- match: { nodes.$master.ingest.processors.15.type: uppercase }
|
- match: { nodes.$master.ingest.processors.15.type: trim }
|
||||||
|
- match: { nodes.$master.ingest.processors.16.type: uppercase }
|
||||||
|
|
|
@ -28,15 +28,15 @@ source
|
||||||
statement
|
statement
|
||||||
: IF LP expression RP block ( ELSE block )? # if
|
: IF LP expression RP block ( ELSE block )? # if
|
||||||
| WHILE LP expression RP ( block | empty ) # while
|
| WHILE LP expression RP ( block | empty ) # while
|
||||||
| DO block WHILE LP expression RP SEMICOLON? # do
|
| DO block WHILE LP expression RP ( SEMICOLON | EOF ) # do
|
||||||
| FOR LP initializer? SEMICOLON expression? SEMICOLON afterthought? RP ( block | empty ) # for
|
| FOR LP initializer? SEMICOLON expression? SEMICOLON afterthought? RP ( block | empty ) # for
|
||||||
| declaration SEMICOLON? # decl
|
| declaration ( SEMICOLON | EOF ) # decl
|
||||||
| CONTINUE SEMICOLON? # continue
|
| CONTINUE ( SEMICOLON | EOF ) # continue
|
||||||
| BREAK SEMICOLON? # break
|
| BREAK ( SEMICOLON | EOF ) # break
|
||||||
| RETURN expression SEMICOLON? # return
|
| RETURN expression ( SEMICOLON | EOF ) # return
|
||||||
| TRY block trap+ # try
|
| TRY block trap+ # try
|
||||||
| THROW expression SEMICOLON? # throw
|
| THROW expression ( SEMICOLON | EOF ) # throw
|
||||||
| expression SEMICOLON? # expr
|
| expression ( SEMICOLON | EOF ) # expr
|
||||||
;
|
;
|
||||||
|
|
||||||
block
|
block
|
||||||
|
|
|
@ -133,37 +133,41 @@ public final class Def {
|
||||||
* <p>
|
* <p>
|
||||||
* @param receiverClass Class of the object to invoke the method on.
|
* @param receiverClass Class of the object to invoke the method on.
|
||||||
* @param name Name of the method.
|
* @param name Name of the method.
|
||||||
|
* @param type Callsite signature. Need not match exactly, except the number of parameters.
|
||||||
* @param definition Whitelist to check.
|
* @param definition Whitelist to check.
|
||||||
* @return pointer to matching method to invoke. never returns null.
|
* @return pointer to matching method to invoke. never returns null.
|
||||||
* @throws IllegalArgumentException if no matching whitelisted method was found.
|
* @throws IllegalArgumentException if no matching whitelisted method was found.
|
||||||
*/
|
*/
|
||||||
static MethodHandle lookupMethod(Class<?> receiverClass, String name, Definition definition) {
|
static MethodHandle lookupMethod(Class<?> receiverClass, String name, MethodType type, Definition definition) {
|
||||||
// check whitelist for matching method
|
// we don't consider receiver an argument/counting towards arity
|
||||||
for (Class<?> clazz = receiverClass; clazz != null; clazz = clazz.getSuperclass()) {
|
type = type.dropParameterTypes(0, 1);
|
||||||
RuntimeClass struct = definition.runtimeMap.get(clazz);
|
Definition.MethodKey key = new Definition.MethodKey(name, type.parameterCount());
|
||||||
|
// check whitelist for matching method
|
||||||
if (struct != null) {
|
for (Class<?> clazz = receiverClass; clazz != null; clazz = clazz.getSuperclass()) {
|
||||||
Method method = struct.methods.get(name);
|
RuntimeClass struct = definition.runtimeMap.get(clazz);
|
||||||
if (method != null) {
|
|
||||||
return method.handle;
|
if (struct != null) {
|
||||||
}
|
Method method = struct.methods.get(key);
|
||||||
}
|
if (method != null) {
|
||||||
|
return method.handle;
|
||||||
for (final Class<?> iface : clazz.getInterfaces()) {
|
}
|
||||||
struct = definition.runtimeMap.get(iface);
|
}
|
||||||
|
|
||||||
if (struct != null) {
|
for (final Class<?> iface : clazz.getInterfaces()) {
|
||||||
Method method = struct.methods.get(name);
|
struct = definition.runtimeMap.get(iface);
|
||||||
if (method != null) {
|
|
||||||
return method.handle;
|
if (struct != null) {
|
||||||
}
|
Method method = struct.methods.get(key);
|
||||||
}
|
if (method != null) {
|
||||||
}
|
return method.handle;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
// no matching methods in whitelist found
|
}
|
||||||
throw new IllegalArgumentException("Unable to find dynamic method [" + name + "] " +
|
}
|
||||||
"for class [" + receiverClass.getCanonicalName() + "].");
|
|
||||||
|
// no matching methods in whitelist found
|
||||||
|
throw new IllegalArgumentException("Unable to find dynamic method [" + name + "] with signature [" + type + "] " +
|
||||||
|
"for class [" + receiverClass.getCanonicalName() + "].");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -91,10 +91,10 @@ public final class DefBootstrap {
|
||||||
/**
|
/**
|
||||||
* Does a slow lookup against the whitelist.
|
* Does a slow lookup against the whitelist.
|
||||||
*/
|
*/
|
||||||
private static MethodHandle lookup(int flavor, Class<?> clazz, String name) {
|
private static MethodHandle lookup(int flavor, Class<?> clazz, String name, MethodType type) {
|
||||||
switch(flavor) {
|
switch(flavor) {
|
||||||
case METHOD_CALL:
|
case METHOD_CALL:
|
||||||
return Def.lookupMethod(clazz, name, Definition.INSTANCE);
|
return Def.lookupMethod(clazz, name, type, Definition.INSTANCE);
|
||||||
case LOAD:
|
case LOAD:
|
||||||
return Def.lookupGetter(clazz, name, Definition.INSTANCE);
|
return Def.lookupGetter(clazz, name, Definition.INSTANCE);
|
||||||
case STORE:
|
case STORE:
|
||||||
|
@ -115,7 +115,7 @@ public final class DefBootstrap {
|
||||||
final MethodType type = type();
|
final MethodType type = type();
|
||||||
final Object receiver = args[0];
|
final Object receiver = args[0];
|
||||||
final Class<?> receiverClass = receiver.getClass();
|
final Class<?> receiverClass = receiver.getClass();
|
||||||
final MethodHandle target = lookup(flavor, receiverClass, name).asType(type);
|
final MethodHandle target = lookup(flavor, receiverClass, name, type).asType(type);
|
||||||
|
|
||||||
if (depth >= MAX_DEPTH) {
|
if (depth >= MAX_DEPTH) {
|
||||||
// revert to a vtable call
|
// revert to a vtable call
|
||||||
|
|
|
@ -33,6 +33,7 @@ import java.util.HashSet;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.Objects;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -194,17 +195,72 @@ public final class Definition {
|
||||||
this.setter = setter;
|
this.setter = setter;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: instead of hashing on this, we could have a 'next' pointer in Method itself, but it would make code more complex
|
||||||
|
// please do *NOT* under any circumstances change this to be the crappy Tuple from elasticsearch!
|
||||||
|
/**
|
||||||
|
* Key for looking up a method.
|
||||||
|
* <p>
|
||||||
|
* Methods are keyed on both name and arity, and can be overloaded once per arity.
|
||||||
|
* This allows signatures such as {@code String.indexOf(String) vs String.indexOf(String, int)}.
|
||||||
|
* <p>
|
||||||
|
* It is less flexible than full signature overloading where types can differ too, but
|
||||||
|
* better than just the name, and overloading types adds complexity to users, too.
|
||||||
|
*/
|
||||||
|
public static final class MethodKey {
|
||||||
|
public final String name;
|
||||||
|
public final int arity;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a new lookup key
|
||||||
|
* @param name name of the method
|
||||||
|
* @param arity number of parameters
|
||||||
|
*/
|
||||||
|
public MethodKey(String name, int arity) {
|
||||||
|
this.name = Objects.requireNonNull(name);
|
||||||
|
this.arity = arity;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int hashCode() {
|
||||||
|
final int prime = 31;
|
||||||
|
int result = 1;
|
||||||
|
result = prime * result + arity;
|
||||||
|
result = prime * result + name.hashCode();
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean equals(Object obj) {
|
||||||
|
if (this == obj) return true;
|
||||||
|
if (obj == null) return false;
|
||||||
|
if (getClass() != obj.getClass()) return false;
|
||||||
|
MethodKey other = (MethodKey) obj;
|
||||||
|
if (arity != other.arity) return false;
|
||||||
|
if (!name.equals(other.name)) return false;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
StringBuilder sb = new StringBuilder();
|
||||||
|
sb.append(name);
|
||||||
|
sb.append('/');
|
||||||
|
sb.append(arity);
|
||||||
|
return sb.toString();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public static final class Struct {
|
public static final class Struct {
|
||||||
public final String name;
|
public final String name;
|
||||||
public final Class<?> clazz;
|
public final Class<?> clazz;
|
||||||
public final org.objectweb.asm.Type type;
|
public final org.objectweb.asm.Type type;
|
||||||
|
|
||||||
public final Map<String, Constructor> constructors;
|
public final Map<MethodKey, Constructor> constructors;
|
||||||
public final Map<String, Method> functions;
|
public final Map<MethodKey, Method> staticMethods;
|
||||||
public final Map<String, Method> methods;
|
public final Map<MethodKey, Method> methods;
|
||||||
|
|
||||||
public final Map<String, Field> statics;
|
public final Map<String, Field> staticMembers;
|
||||||
public final Map<String, Field> members;
|
public final Map<String, Field> members;
|
||||||
|
|
||||||
private Struct(final String name, final Class<?> clazz, final org.objectweb.asm.Type type) {
|
private Struct(final String name, final Class<?> clazz, final org.objectweb.asm.Type type) {
|
||||||
|
@ -213,10 +269,10 @@ public final class Definition {
|
||||||
this.type = type;
|
this.type = type;
|
||||||
|
|
||||||
constructors = new HashMap<>();
|
constructors = new HashMap<>();
|
||||||
functions = new HashMap<>();
|
staticMethods = new HashMap<>();
|
||||||
methods = new HashMap<>();
|
methods = new HashMap<>();
|
||||||
|
|
||||||
statics = new HashMap<>();
|
staticMembers = new HashMap<>();
|
||||||
members = new HashMap<>();
|
members = new HashMap<>();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -226,10 +282,10 @@ public final class Definition {
|
||||||
type = struct.type;
|
type = struct.type;
|
||||||
|
|
||||||
constructors = Collections.unmodifiableMap(struct.constructors);
|
constructors = Collections.unmodifiableMap(struct.constructors);
|
||||||
functions = Collections.unmodifiableMap(struct.functions);
|
staticMethods = Collections.unmodifiableMap(struct.staticMethods);
|
||||||
methods = Collections.unmodifiableMap(struct.methods);
|
methods = Collections.unmodifiableMap(struct.methods);
|
||||||
|
|
||||||
statics = Collections.unmodifiableMap(struct.statics);
|
staticMembers = Collections.unmodifiableMap(struct.staticMembers);
|
||||||
members = Collections.unmodifiableMap(struct.members);
|
members = Collections.unmodifiableMap(struct.members);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -304,11 +360,11 @@ public final class Definition {
|
||||||
}
|
}
|
||||||
|
|
||||||
public static final class RuntimeClass {
|
public static final class RuntimeClass {
|
||||||
public final Map<String, Method> methods;
|
public final Map<MethodKey, Method> methods;
|
||||||
public final Map<String, MethodHandle> getters;
|
public final Map<String, MethodHandle> getters;
|
||||||
public final Map<String, MethodHandle> setters;
|
public final Map<String, MethodHandle> setters;
|
||||||
|
|
||||||
private RuntimeClass(final Map<String, Method> methods,
|
private RuntimeClass(final Map<MethodKey, Method> methods,
|
||||||
final Map<String, MethodHandle> getters, final Map<String, MethodHandle> setters) {
|
final Map<String, MethodHandle> getters, final Map<String, MethodHandle> setters) {
|
||||||
this.methods = methods;
|
this.methods = methods;
|
||||||
this.getters = getters;
|
this.getters = getters;
|
||||||
|
@ -395,6 +451,9 @@ public final class Definition {
|
||||||
public final Type longsType;
|
public final Type longsType;
|
||||||
public final Type doublesType;
|
public final Type doublesType;
|
||||||
public final Type geoPointsType;
|
public final Type geoPointsType;
|
||||||
|
|
||||||
|
// for testing features not currently "used" by the whitelist (we should not rush the API for that!)
|
||||||
|
public final Type featureTestType;
|
||||||
|
|
||||||
private Definition() {
|
private Definition() {
|
||||||
structsMap = new HashMap<>();
|
structsMap = new HashMap<>();
|
||||||
|
@ -476,6 +535,8 @@ public final class Definition {
|
||||||
longsType = getType("Longs");
|
longsType = getType("Longs");
|
||||||
doublesType = getType("Doubles");
|
doublesType = getType("Doubles");
|
||||||
geoPointsType = getType("GeoPoints");
|
geoPointsType = getType("GeoPoints");
|
||||||
|
|
||||||
|
featureTestType = getType("FeatureTest");
|
||||||
|
|
||||||
addElements();
|
addElements();
|
||||||
copyStructs();
|
copyStructs();
|
||||||
|
@ -567,6 +628,8 @@ public final class Definition {
|
||||||
this.longsType = definition.longsType;
|
this.longsType = definition.longsType;
|
||||||
this.doublesType = definition.doublesType;
|
this.doublesType = definition.doublesType;
|
||||||
this.geoPointsType = definition.geoPointsType;
|
this.geoPointsType = definition.geoPointsType;
|
||||||
|
|
||||||
|
this.featureTestType = definition.featureTestType;
|
||||||
}
|
}
|
||||||
|
|
||||||
private void addStructs() {
|
private void addStructs() {
|
||||||
|
@ -643,6 +706,8 @@ public final class Definition {
|
||||||
addStruct( "Longs" , ScriptDocValues.Longs.class);
|
addStruct( "Longs" , ScriptDocValues.Longs.class);
|
||||||
addStruct( "Doubles" , ScriptDocValues.Doubles.class);
|
addStruct( "Doubles" , ScriptDocValues.Doubles.class);
|
||||||
addStruct( "GeoPoints" , ScriptDocValues.GeoPoints.class);
|
addStruct( "GeoPoints" , ScriptDocValues.GeoPoints.class);
|
||||||
|
|
||||||
|
addStruct( "FeatureTest", FeatureTest.class);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void addElements() {
|
private void addElements() {
|
||||||
|
@ -765,6 +830,7 @@ public final class Definition {
|
||||||
addMethod("String", "compareTo", null, false, intType, new Type[] {stringType}, null, null);
|
addMethod("String", "compareTo", null, false, intType, new Type[] {stringType}, null, null);
|
||||||
addMethod("String", "concat", null, false, stringType, new Type[] {stringType}, null, null);
|
addMethod("String", "concat", null, false, stringType, new Type[] {stringType}, null, null);
|
||||||
addMethod("String", "endsWith", null, false, booleanType, new Type[] {stringType}, null, null);
|
addMethod("String", "endsWith", null, false, booleanType, new Type[] {stringType}, null, null);
|
||||||
|
addMethod("String", "indexOf", null, false, intType, new Type[] {stringType}, null, null);
|
||||||
addMethod("String", "indexOf", null, false, intType, new Type[] {stringType, intType}, null, null);
|
addMethod("String", "indexOf", null, false, intType, new Type[] {stringType, intType}, null, null);
|
||||||
addMethod("String", "isEmpty", null, false, booleanType, new Type[] {}, null, null);
|
addMethod("String", "isEmpty", null, false, booleanType, new Type[] {}, null, null);
|
||||||
addMethod("String", "replace", null, false, stringType, new Type[] {charseqType, charseqType}, null, null);
|
addMethod("String", "replace", null, false, stringType, new Type[] {charseqType, charseqType}, null, null);
|
||||||
|
@ -1112,6 +1178,16 @@ public final class Definition {
|
||||||
new Type[] { stringType }, null, null);
|
new Type[] { stringType }, null, null);
|
||||||
addMethod("GeoPoints", "geohashDistanceInMiles", null, false, doubleType,
|
addMethod("GeoPoints", "geohashDistanceInMiles", null, false, doubleType,
|
||||||
new Type[] { stringType }, null, null);
|
new Type[] { stringType }, null, null);
|
||||||
|
|
||||||
|
// currently FeatureTest exposes overloaded constructor, field load store, and overloaded static methods
|
||||||
|
addConstructor("FeatureTest", "new", new Type[] {}, null);
|
||||||
|
addConstructor("FeatureTest", "new", new Type[] {intType, intType}, null);
|
||||||
|
addMethod("FeatureTest", "getX", null, false, intType, new Type[] {}, null, null);
|
||||||
|
addMethod("FeatureTest", "getY", null, false, intType, new Type[] {}, null, null);
|
||||||
|
addMethod("FeatureTest", "setX", null, false, voidType, new Type[] {intType}, null, null);
|
||||||
|
addMethod("FeatureTest", "setY", null, false, voidType, new Type[] {intType}, null, null);
|
||||||
|
addMethod("FeatureTest", "overloadedStatic", null, true, booleanType, new Type[] {}, null, null);
|
||||||
|
addMethod("FeatureTest", "overloadedStatic", null, true, booleanType, new Type[] {booleanType}, null, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void copyStructs() {
|
private void copyStructs() {
|
||||||
|
@ -1165,6 +1241,8 @@ public final class Definition {
|
||||||
copyStruct("Longs", "List", "Collection", "Object");
|
copyStruct("Longs", "List", "Collection", "Object");
|
||||||
copyStruct("Doubles", "List", "Collection", "Object");
|
copyStruct("Doubles", "List", "Collection", "Object");
|
||||||
copyStruct("GeoPoints", "List", "Collection", "Object");
|
copyStruct("GeoPoints", "List", "Collection", "Object");
|
||||||
|
|
||||||
|
copyStruct("FeatureTest", "Object");
|
||||||
}
|
}
|
||||||
|
|
||||||
private void addTransforms() {
|
private void addTransforms() {
|
||||||
|
@ -1482,6 +1560,8 @@ public final class Definition {
|
||||||
addRuntimeClass(longsType.struct);
|
addRuntimeClass(longsType.struct);
|
||||||
addRuntimeClass(doublesType.struct);
|
addRuntimeClass(doublesType.struct);
|
||||||
addRuntimeClass(geoPointsType.struct);
|
addRuntimeClass(geoPointsType.struct);
|
||||||
|
|
||||||
|
addRuntimeClass(featureTestType.struct);
|
||||||
}
|
}
|
||||||
|
|
||||||
private final void addStruct(final String name, final Class<?> clazz) {
|
private final void addStruct(final String name, final Class<?> clazz) {
|
||||||
|
@ -1510,20 +1590,22 @@ public final class Definition {
|
||||||
throw new IllegalArgumentException(
|
throw new IllegalArgumentException(
|
||||||
"Invalid constructor name [" + name + "] with the struct [" + owner.name + "].");
|
"Invalid constructor name [" + name + "] with the struct [" + owner.name + "].");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
MethodKey methodKey = new MethodKey(name, args.length);
|
||||||
|
|
||||||
if (owner.constructors.containsKey(name)) {
|
if (owner.constructors.containsKey(methodKey)) {
|
||||||
throw new IllegalArgumentException(
|
throw new IllegalArgumentException(
|
||||||
"Duplicate constructor name [" + name + "] found within the struct [" + owner.name + "].");
|
"Duplicate constructor [" + methodKey + "] found within the struct [" + owner.name + "].");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (owner.statics.containsKey(name)) {
|
if (owner.staticMethods.containsKey(methodKey)) {
|
||||||
throw new IllegalArgumentException("Constructors and functions may not have the same name" +
|
throw new IllegalArgumentException("Constructors and static methods may not have the same signature" +
|
||||||
" [" + name + "] within the same struct [" + owner.name + "].");
|
" [" + methodKey + "] within the same struct [" + owner.name + "].");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (owner.methods.containsKey(name)) {
|
if (owner.methods.containsKey(methodKey)) {
|
||||||
throw new IllegalArgumentException("Constructors and methods may not have the same name" +
|
throw new IllegalArgumentException("Constructors and methods may not have the same signature" +
|
||||||
" [" + name + "] within the same struct [" + owner.name + "].");
|
" [" + methodKey + "] within the same struct [" + owner.name + "].");
|
||||||
}
|
}
|
||||||
|
|
||||||
final Class<?>[] classes = new Class<?>[args.length];
|
final Class<?>[] classes = new Class<?>[args.length];
|
||||||
|
@ -1553,7 +1635,7 @@ public final class Definition {
|
||||||
final Constructor constructor =
|
final Constructor constructor =
|
||||||
new Constructor(name, owner, Arrays.asList(genargs != null ? genargs : args), asm, reflect);
|
new Constructor(name, owner, Arrays.asList(genargs != null ? genargs : args), asm, reflect);
|
||||||
|
|
||||||
owner.constructors.put(name, constructor);
|
owner.constructors.put(methodKey, constructor);
|
||||||
}
|
}
|
||||||
|
|
||||||
private final void addMethod(final String struct, final String name, final String alias, final boolean statik,
|
private final void addMethod(final String struct, final String name, final String alias, final boolean statik,
|
||||||
|
@ -1566,32 +1648,34 @@ public final class Definition {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!name.matches("^[_a-zA-Z][_a-zA-Z0-9]*$")) {
|
if (!name.matches("^[_a-zA-Z][_a-zA-Z0-9]*$")) {
|
||||||
throw new IllegalArgumentException("Invalid " + (statik ? "function" : "method") +
|
throw new IllegalArgumentException("Invalid " + (statik ? "static method" : "method") +
|
||||||
" name [" + name + "] with the struct [" + owner.name + "].");
|
" name [" + name + "] with the struct [" + owner.name + "].");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
MethodKey methodKey = new MethodKey(name, args.length);
|
||||||
|
|
||||||
if (owner.constructors.containsKey(name)) {
|
if (owner.constructors.containsKey(methodKey)) {
|
||||||
throw new IllegalArgumentException("Constructors and " + (statik ? "functions" : "methods") +
|
throw new IllegalArgumentException("Constructors and " + (statik ? "static methods" : "methods") +
|
||||||
" may not have the same name [" + name + "] within the same struct" +
|
" may not have the same signature [" + methodKey + "] within the same struct" +
|
||||||
" [" + owner.name + "].");
|
" [" + owner.name + "].");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (owner.statics.containsKey(name)) {
|
if (owner.staticMethods.containsKey(methodKey)) {
|
||||||
if (statik) {
|
if (statik) {
|
||||||
throw new IllegalArgumentException(
|
throw new IllegalArgumentException(
|
||||||
"Duplicate function name [" + name + "] found within the struct [" + owner.name + "].");
|
"Duplicate static method signature [" + methodKey + "] found within the struct [" + owner.name + "].");
|
||||||
} else {
|
} else {
|
||||||
throw new IllegalArgumentException("Functions and methods may not have the same name" +
|
throw new IllegalArgumentException("Static methods and methods may not have the same signature" +
|
||||||
" [" + name + "] within the same struct [" + owner.name + "].");
|
" [" + methodKey + "] within the same struct [" + owner.name + "].");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (owner.methods.containsKey(name)) {
|
if (owner.methods.containsKey(methodKey)) {
|
||||||
if (statik) {
|
if (statik) {
|
||||||
throw new IllegalArgumentException("Functions and methods may not have the same name" +
|
throw new IllegalArgumentException("Static methods and methods may not have the same signature" +
|
||||||
" [" + name + "] within the same struct [" + owner.name + "].");
|
" [" + methodKey + "] within the same struct [" + owner.name + "].");
|
||||||
} else {
|
} else {
|
||||||
throw new IllegalArgumentException("Duplicate method name [" + name + "]" +
|
throw new IllegalArgumentException("Duplicate method signature [" + methodKey + "]" +
|
||||||
" found within the struct [" + owner.name + "].");
|
" found within the struct [" + owner.name + "].");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1663,14 +1747,14 @@ public final class Definition {
|
||||||
" within the struct [" + owner.name + "] is not linked to a static Java method.");
|
" within the struct [" + owner.name + "] is not linked to a static Java method.");
|
||||||
}
|
}
|
||||||
|
|
||||||
owner.functions.put(name, method);
|
owner.staticMethods.put(methodKey, method);
|
||||||
} else {
|
} else {
|
||||||
if (java.lang.reflect.Modifier.isStatic(modifiers)) {
|
if (java.lang.reflect.Modifier.isStatic(modifiers)) {
|
||||||
throw new IllegalArgumentException("Method [" + name + "]" +
|
throw new IllegalArgumentException("Method [" + name + "]" +
|
||||||
" within the struct [" + owner.name + "] is not linked to a non-static Java method.");
|
" within the struct [" + owner.name + "] is not linked to a non-static Java method.");
|
||||||
}
|
}
|
||||||
|
|
||||||
owner.methods.put(name, method);
|
owner.methods.put(methodKey, method);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1688,7 +1772,7 @@ public final class Definition {
|
||||||
" name [" + name + "] with the struct [" + owner.name + "].");
|
" name [" + name + "] with the struct [" + owner.name + "].");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (owner.statics.containsKey(name)) {
|
if (owner.staticMembers.containsKey(name)) {
|
||||||
if (statik) {
|
if (statik) {
|
||||||
throw new IllegalArgumentException("Duplicate static name [" + name + "]" +
|
throw new IllegalArgumentException("Duplicate static name [" + name + "]" +
|
||||||
" found within the struct [" + owner.name + "].");
|
" found within the struct [" + owner.name + "].");
|
||||||
|
@ -1751,7 +1835,7 @@ public final class Definition {
|
||||||
" within the struct [" + owner.name + "] is not linked to static Java field.");
|
" within the struct [" + owner.name + "] is not linked to static Java field.");
|
||||||
}
|
}
|
||||||
|
|
||||||
owner.statics.put(alias == null ? name : alias, field);
|
owner.staticMembers.put(alias == null ? name : alias, field);
|
||||||
} else {
|
} else {
|
||||||
if (java.lang.reflect.Modifier.isStatic(modifiers)) {
|
if (java.lang.reflect.Modifier.isStatic(modifiers)) {
|
||||||
throw new IllegalArgumentException("Member [" + name + "]" +
|
throw new IllegalArgumentException("Member [" + name + "]" +
|
||||||
|
@ -1785,8 +1869,10 @@ public final class Definition {
|
||||||
final boolean object = child.clazz.equals(Object.class) &&
|
final boolean object = child.clazz.equals(Object.class) &&
|
||||||
java.lang.reflect.Modifier.isInterface(owner.clazz.getModifiers());
|
java.lang.reflect.Modifier.isInterface(owner.clazz.getModifiers());
|
||||||
|
|
||||||
for (final Method method : child.methods.values()) {
|
for (Map.Entry<MethodKey,Method> kvPair : child.methods.entrySet()) {
|
||||||
if (owner.methods.get(method.name) == null) {
|
MethodKey methodKey = kvPair.getKey();
|
||||||
|
Method method = kvPair.getValue();
|
||||||
|
if (owner.methods.get(methodKey) == null) {
|
||||||
final Class<?> clazz = object ? Object.class : owner.clazz;
|
final Class<?> clazz = object ? Object.class : owner.clazz;
|
||||||
|
|
||||||
java.lang.reflect.Method reflect;
|
java.lang.reflect.Method reflect;
|
||||||
|
@ -1808,7 +1894,7 @@ public final class Definition {
|
||||||
Arrays.toString(method.reflect.getParameterTypes()) + ".");
|
Arrays.toString(method.reflect.getParameterTypes()) + ".");
|
||||||
}
|
}
|
||||||
|
|
||||||
owner.methods.put(method.name,
|
owner.methods.put(methodKey,
|
||||||
new Method(method.name, owner, method.rtn, method.arguments, method.method, reflect, handle));
|
new Method(method.name, owner, method.rtn, method.arguments, method.method, reflect, handle));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1866,8 +1952,11 @@ public final class Definition {
|
||||||
Type upcast = null;
|
Type upcast = null;
|
||||||
Type downcast = null;
|
Type downcast = null;
|
||||||
|
|
||||||
|
// transforms are implicitly arity of 0, unless a static method where its 1 (receiver passed)
|
||||||
|
MethodKey methodKey = new MethodKey(name, statik ? 1 : 0);
|
||||||
|
|
||||||
if (statik) {
|
if (statik) {
|
||||||
method = owner.functions.get(name);
|
method = owner.staticMethods.get(methodKey);
|
||||||
|
|
||||||
if (method == null) {
|
if (method == null) {
|
||||||
throw new IllegalArgumentException("Transform with owner struct [" + owner.name + "]" +
|
throw new IllegalArgumentException("Transform with owner struct [" + owner.name + "]" +
|
||||||
|
@ -1905,7 +1994,7 @@ public final class Definition {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
method = owner.methods.get(name);
|
method = owner.methods.get(methodKey);
|
||||||
|
|
||||||
if (method == null) {
|
if (method == null) {
|
||||||
throw new IllegalArgumentException("Transform with owner struct [" + owner.name + "]" +
|
throw new IllegalArgumentException("Transform with owner struct [" + owner.name + "]" +
|
||||||
|
@ -1950,7 +2039,7 @@ public final class Definition {
|
||||||
* Precomputes a more efficient structure for dynamic method/field access.
|
* Precomputes a more efficient structure for dynamic method/field access.
|
||||||
*/
|
*/
|
||||||
private void addRuntimeClass(final Struct struct) {
|
private void addRuntimeClass(final Struct struct) {
|
||||||
final Map<String, Method> methods = struct.methods;
|
final Map<MethodKey, Method> methods = struct.methods;
|
||||||
final Map<String, MethodHandle> getters = new HashMap<>();
|
final Map<String, MethodHandle> getters = new HashMap<>();
|
||||||
final Map<String, MethodHandle> setters = new HashMap<>();
|
final Map<String, MethodHandle> setters = new HashMap<>();
|
||||||
|
|
||||||
|
@ -1961,8 +2050,8 @@ public final class Definition {
|
||||||
}
|
}
|
||||||
|
|
||||||
// add all getters/setters
|
// add all getters/setters
|
||||||
for (final Map.Entry<String, Method> method : methods.entrySet()) {
|
for (final Map.Entry<MethodKey, Method> method : methods.entrySet()) {
|
||||||
final String name = method.getKey();
|
final String name = method.getKey().name;
|
||||||
final Method m = method.getValue();
|
final Method m = method.getValue();
|
||||||
|
|
||||||
if (m.arguments.size() == 0 &&
|
if (m.arguments.size() == 0 &&
|
||||||
|
|
|
@ -0,0 +1,66 @@
|
||||||
|
package org.elasticsearch.painless;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/** Currently just a dummy class for testing a few features not yet exposed by whitelist! */
|
||||||
|
public class FeatureTest {
|
||||||
|
private int x;
|
||||||
|
private int y;
|
||||||
|
|
||||||
|
/** empty ctor */
|
||||||
|
public FeatureTest() {
|
||||||
|
}
|
||||||
|
|
||||||
|
/** ctor with params */
|
||||||
|
public FeatureTest(int x, int y) {
|
||||||
|
this.x = x;
|
||||||
|
this.y = y;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** getter for x */
|
||||||
|
public int getX() {
|
||||||
|
return x;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** setter for x */
|
||||||
|
public void setX(int x) {
|
||||||
|
this.x = x;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** getter for y */
|
||||||
|
public int getY() {
|
||||||
|
return y;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** setter for y */
|
||||||
|
public void setY(int y) {
|
||||||
|
this.y = y;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** static method that returns true */
|
||||||
|
public static boolean overloadedStatic() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** static method that returns what you ask it */
|
||||||
|
public static boolean overloadedStatic(boolean whatToReturn) {
|
||||||
|
return whatToReturn;
|
||||||
|
}
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
|
@ -55,15 +55,11 @@ public final class LCall extends ALink {
|
||||||
throw new IllegalArgumentException(error("Cannot assign a value to a call [" + name + "]."));
|
throw new IllegalArgumentException(error("Cannot assign a value to a call [" + name + "]."));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Definition.MethodKey methodKey = new Definition.MethodKey(name, arguments.size());
|
||||||
final Struct struct = before.struct;
|
final Struct struct = before.struct;
|
||||||
method = statik ? struct.functions.get(name) : struct.methods.get(name);
|
method = statik ? struct.staticMethods.get(methodKey) : struct.methods.get(methodKey);
|
||||||
|
|
||||||
if (method != null) {
|
if (method != null) {
|
||||||
if (method.arguments.size() != arguments.size()) {
|
|
||||||
throw new IllegalArgumentException(error("When calling [" + name + "] on type [" + struct.name + "]" +
|
|
||||||
" expected [" + method.arguments.size() + "] arguments, but found [" + arguments.size() + "]."));
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int argument = 0; argument < arguments.size(); ++argument) {
|
for (int argument = 0; argument < arguments.size(); ++argument) {
|
||||||
final AExpression expression = arguments.get(argument);
|
final AExpression expression = arguments.get(argument);
|
||||||
|
|
||||||
|
@ -83,7 +79,8 @@ public final class LCall extends ALink {
|
||||||
return link.analyze(settings, definition, variables);
|
return link.analyze(settings, definition, variables);
|
||||||
}
|
}
|
||||||
|
|
||||||
throw new IllegalArgumentException(error("Unknown call [" + name + "] on type [" + struct.name + "]."));
|
throw new IllegalArgumentException(error("Unknown call [" + name + "] with [" + arguments.size() +
|
||||||
|
"] arguments on type [" + struct.name + "]."));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -60,7 +60,7 @@ public final class LField extends ALink {
|
||||||
}
|
}
|
||||||
|
|
||||||
final Struct struct = before.struct;
|
final Struct struct = before.struct;
|
||||||
field = statik ? struct.statics.get(value) : struct.members.get(value);
|
field = statik ? struct.staticMembers.get(value) : struct.members.get(value);
|
||||||
|
|
||||||
if (field != null) {
|
if (field != null) {
|
||||||
if (store && java.lang.reflect.Modifier.isFinal(field.reflect.getModifiers())) {
|
if (store && java.lang.reflect.Modifier.isFinal(field.reflect.getModifiers())) {
|
||||||
|
@ -72,9 +72,12 @@ public final class LField extends ALink {
|
||||||
|
|
||||||
return this;
|
return this;
|
||||||
} else {
|
} else {
|
||||||
|
// TODO: improve this: the isXXX case seems missing???
|
||||||
final boolean shortcut =
|
final boolean shortcut =
|
||||||
struct.methods.containsKey("get" + Character.toUpperCase(value.charAt(0)) + value.substring(1)) ||
|
struct.methods.containsKey(new Definition.MethodKey("get" +
|
||||||
struct.methods.containsKey("set" + Character.toUpperCase(value.charAt(0)) + value.substring(1));
|
Character.toUpperCase(value.charAt(0)) + value.substring(1), 0)) ||
|
||||||
|
struct.methods.containsKey(new Definition.MethodKey("set" +
|
||||||
|
Character.toUpperCase(value.charAt(0)) + value.substring(1), 1));
|
||||||
|
|
||||||
if (shortcut) {
|
if (shortcut) {
|
||||||
return new LShortcut(line, location, value).copy(this).analyze(settings, definition, variables);
|
return new LShortcut(line, location, value).copy(this).analyze(settings, definition, variables);
|
||||||
|
|
|
@ -43,8 +43,8 @@ final class LListShortcut extends ALink {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
ALink analyze(final CompilerSettings settings, final Definition definition, final Variables variables) {
|
ALink analyze(final CompilerSettings settings, final Definition definition, final Variables variables) {
|
||||||
getter = before.struct.methods.get("get");
|
getter = before.struct.methods.get(new Definition.MethodKey("get", 1));
|
||||||
setter = before.struct.methods.get("set");
|
setter = before.struct.methods.get(new Definition.MethodKey("set", 2));
|
||||||
|
|
||||||
if (getter != null && (getter.rtn.sort == Sort.VOID || getter.arguments.size() != 1 ||
|
if (getter != null && (getter.rtn.sort == Sort.VOID || getter.arguments.size() != 1 ||
|
||||||
getter.arguments.get(0).sort != Sort.INT)) {
|
getter.arguments.get(0).sort != Sort.INT)) {
|
||||||
|
|
|
@ -43,8 +43,8 @@ final class LMapShortcut extends ALink {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
ALink analyze(final CompilerSettings settings, final Definition definition, final Variables variables) {
|
ALink analyze(final CompilerSettings settings, final Definition definition, final Variables variables) {
|
||||||
getter = before.struct.methods.get("get");
|
getter = before.struct.methods.get(new Definition.MethodKey("get", 1));
|
||||||
setter = before.struct.methods.get("put");
|
setter = before.struct.methods.get(new Definition.MethodKey("put", 2));
|
||||||
|
|
||||||
if (getter != null && (getter.rtn.sort == Sort.VOID || getter.arguments.size() != 1)) {
|
if (getter != null && (getter.rtn.sort == Sort.VOID || getter.arguments.size() != 1)) {
|
||||||
throw new IllegalArgumentException(error("Illegal map get shortcut for type [" + before.name + "]."));
|
throw new IllegalArgumentException(error("Illegal map get shortcut for type [" + before.name + "]."));
|
||||||
|
|
|
@ -63,7 +63,7 @@ public final class LNewObj extends ALink {
|
||||||
}
|
}
|
||||||
|
|
||||||
final Struct struct = type.struct;
|
final Struct struct = type.struct;
|
||||||
constructor = struct.constructors.get("new");
|
constructor = struct.constructors.get(new Definition.MethodKey("new", arguments.size()));
|
||||||
|
|
||||||
if (constructor != null) {
|
if (constructor != null) {
|
||||||
final Type[] types = new Type[constructor.arguments.size()];
|
final Type[] types = new Type[constructor.arguments.size()];
|
||||||
|
|
|
@ -47,8 +47,8 @@ final class LShortcut extends ALink {
|
||||||
ALink analyze(final CompilerSettings settings, final Definition definition, final Variables variables) {
|
ALink analyze(final CompilerSettings settings, final Definition definition, final Variables variables) {
|
||||||
final Struct struct = before.struct;
|
final Struct struct = before.struct;
|
||||||
|
|
||||||
getter = struct.methods.get("get" + Character.toUpperCase(value.charAt(0)) + value.substring(1));
|
getter = struct.methods.get(new Definition.MethodKey("get" + Character.toUpperCase(value.charAt(0)) + value.substring(1), 0));
|
||||||
setter = struct.methods.get("set" + Character.toUpperCase(value.charAt(0)) + value.substring(1));
|
setter = struct.methods.get(new Definition.MethodKey("set" + Character.toUpperCase(value.charAt(0)) + value.substring(1), 1));
|
||||||
|
|
||||||
if (getter != null && (getter.rtn.sort == Sort.VOID || !getter.arguments.isEmpty())) {
|
if (getter != null && (getter.rtn.sort == Sort.VOID || !getter.arguments.isEmpty())) {
|
||||||
throw new IllegalArgumentException(error(
|
throw new IllegalArgumentException(error(
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -19,161 +19,60 @@
|
||||||
|
|
||||||
package org.elasticsearch.painless;
|
package org.elasticsearch.painless;
|
||||||
|
|
||||||
|
import java.util.Collections;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
public class NoSemiColonTests extends ScriptTestCase {
|
public class NoSemiColonTests extends ScriptTestCase {
|
||||||
|
|
||||||
public void testIfStatement() {
|
|
||||||
assertEquals(1, exec("int x = 5 if (x == 5) return 1 return 0"));
|
|
||||||
assertEquals(0, exec("int x = 4 if (x == 5) return 1 else return 0"));
|
|
||||||
assertEquals(2, exec("int x = 4 if (x == 5) return 1 else if (x == 4) return 2 else return 0"));
|
|
||||||
assertEquals(1, exec("int x = 4 if (x == 5) return 1 else if (x == 4) return 1 else return 0"));
|
|
||||||
|
|
||||||
assertEquals(3, exec(
|
|
||||||
"int x = 5\n" +
|
|
||||||
"if (x == 5) {\n" +
|
|
||||||
" int y = 2\n" +
|
|
||||||
" \n" +
|
|
||||||
" if (y == 2) {\n" +
|
|
||||||
" x = 3\n" +
|
|
||||||
" }\n" +
|
|
||||||
" \n" +
|
|
||||||
"}\n" +
|
|
||||||
"\n" +
|
|
||||||
"return x\n"));
|
|
||||||
}
|
|
||||||
|
|
||||||
public void testWhileStatement() {
|
|
||||||
|
|
||||||
assertEquals("aaaaaa", exec("String c = \"a\" int x while (x < 5) { ++x c += \"a\" } return c"));
|
|
||||||
|
|
||||||
Object value = exec(
|
|
||||||
" byte[][] b = new byte[5][5] \n" +
|
|
||||||
" byte x = 0, y \n" +
|
|
||||||
" \n" +
|
|
||||||
" while (x < 5) { \n" +
|
|
||||||
" y = 0 \n" +
|
|
||||||
" \n" +
|
|
||||||
" while (y < 5) { \n" +
|
|
||||||
" b[x][y] = (byte)(x*y) \n" +
|
|
||||||
" ++y \n" +
|
|
||||||
" } \n" +
|
|
||||||
" \n" +
|
|
||||||
" ++x \n" +
|
|
||||||
" } \n" +
|
|
||||||
" \n" +
|
|
||||||
" return b \n");
|
|
||||||
|
|
||||||
byte[][] b = (byte[][])value;
|
|
||||||
|
|
||||||
for (byte x = 0; x < 5; ++x) {
|
|
||||||
for (byte y = 0; y < 5; ++y) {
|
|
||||||
assertEquals(x*y, b[x][y]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public void testDoWhileStatement() {
|
|
||||||
assertEquals("aaaaaa", exec("String c = \"a\" int x do { c += \"a\"; ++x } while (x < 5) return c"));
|
|
||||||
|
|
||||||
Object value = exec(
|
|
||||||
" long[][] l = new long[5][5] \n" +
|
|
||||||
" long x = 0, y \n" +
|
|
||||||
" \n" +
|
|
||||||
" do { \n" +
|
|
||||||
" y = 0 \n" +
|
|
||||||
" \n" +
|
|
||||||
" do { \n" +
|
|
||||||
" l[(int)x][(int)y] = x*y; \n" +
|
|
||||||
" ++y \n" +
|
|
||||||
" } while (y < 5) \n" +
|
|
||||||
" \n" +
|
|
||||||
" ++x \n" +
|
|
||||||
" } while (x < 5) \n" +
|
|
||||||
" \n" +
|
|
||||||
" return l \n");
|
|
||||||
|
|
||||||
long[][] l = (long[][])value;
|
|
||||||
|
|
||||||
for (long x = 0; x < 5; ++x) {
|
|
||||||
for (long y = 0; y < 5; ++y) {
|
|
||||||
assertEquals(x*y, l[(int)x][(int)y]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public void testForStatement() {
|
|
||||||
assertEquals("aaaaaa", exec("String c = \"a\" for (int x = 0; x < 5; ++x) c += \"a\" return c"));
|
|
||||||
|
|
||||||
Object value = exec(
|
|
||||||
" int[][] i = new int[5][5] \n" +
|
|
||||||
" for (int x = 0; x < 5; ++x) { \n" +
|
|
||||||
" for (int y = 0; y < 5; ++y) { \n" +
|
|
||||||
" i[x][y] = x*y \n" +
|
|
||||||
" } \n" +
|
|
||||||
" } \n" +
|
|
||||||
" \n" +
|
|
||||||
" return i \n");
|
|
||||||
|
|
||||||
int[][] i = (int[][])value;
|
|
||||||
|
|
||||||
for (int x = 0; x < 5; ++x) {
|
|
||||||
for (int y = 0; y < 5; ++y) {
|
|
||||||
assertEquals(x*y, i[x][y]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public void testDeclarationStatement() {
|
public void testDeclarationStatement() {
|
||||||
assertEquals((byte)2, exec("byte a = 2 return a"));
|
assertEquals((byte)2, exec("byte a = 2; return a"));
|
||||||
assertEquals((short)2, exec("short a = 2 return a"));
|
assertEquals((short)2, exec("short a = 2; return a"));
|
||||||
assertEquals((char)2, exec("char a = 2 return a"));
|
assertEquals((char)2, exec("char a = 2; return a"));
|
||||||
assertEquals(2, exec("int a = 2 return a"));
|
assertEquals(2, exec("int a = 2; return a"));
|
||||||
assertEquals(2L, exec("long a = 2 return a"));
|
assertEquals(2L, exec("long a = 2; return a"));
|
||||||
assertEquals(2F, exec("float a = 2 return a"));
|
assertEquals(2F, exec("float a = 2; return a"));
|
||||||
assertEquals(2.0, exec("double a = 2 return a"));
|
assertEquals(2.0, exec("double a = 2; return a"));
|
||||||
assertEquals(false, exec("boolean a = false return a"));
|
assertEquals(false, exec("boolean a = false; return a"));
|
||||||
assertEquals("string", exec("String a = \"string\" return a"));
|
assertEquals("string", exec("String a = \"string\"; return a"));
|
||||||
assertEquals(HashMap.class, exec("Map<String, Object> a = new HashMap<String, Object>() return a").getClass());
|
assertEquals(HashMap.class, exec("Map<String, Object> a = new HashMap<String, Object>(); return a").getClass());
|
||||||
|
|
||||||
assertEquals(byte[].class, exec("byte[] a = new byte[1] return a").getClass());
|
assertEquals(byte[].class, exec("byte[] a = new byte[1]; return a").getClass());
|
||||||
assertEquals(short[].class, exec("short[] a = new short[1] return a").getClass());
|
assertEquals(short[].class, exec("short[] a = new short[1]; return a").getClass());
|
||||||
assertEquals(char[].class, exec("char[] a = new char[1] return a").getClass());
|
assertEquals(char[].class, exec("char[] a = new char[1]; return a").getClass());
|
||||||
assertEquals(int[].class, exec("int[] a = new int[1] return a").getClass());
|
assertEquals(int[].class, exec("int[] a = new int[1]; return a").getClass());
|
||||||
assertEquals(long[].class, exec("long[] a = new long[1] return a").getClass());
|
assertEquals(long[].class, exec("long[] a = new long[1]; return a").getClass());
|
||||||
assertEquals(float[].class, exec("float[] a = new float[1] return a").getClass());
|
assertEquals(float[].class, exec("float[] a = new float[1]; return a").getClass());
|
||||||
assertEquals(double[].class, exec("double[] a = new double[1] return a").getClass());
|
assertEquals(double[].class, exec("double[] a = new double[1]; return a").getClass());
|
||||||
assertEquals(boolean[].class, exec("boolean[] a = new boolean[1] return a").getClass());
|
assertEquals(boolean[].class, exec("boolean[] a = new boolean[1]; return a").getClass());
|
||||||
assertEquals(String[].class, exec("String[] a = new String[1] return a").getClass());
|
assertEquals(String[].class, exec("String[] a = new String[1]; return a").getClass());
|
||||||
assertEquals(Map[].class, exec("Map<String,Object>[] a = new Map<String,Object>[1] return a").getClass());
|
assertEquals(Map[].class, exec("Map<String,Object>[] a = new Map<String,Object>[1]; return a").getClass());
|
||||||
|
|
||||||
assertEquals(byte[][].class, exec("byte[][] a = new byte[1][2] return a").getClass());
|
assertEquals(byte[][].class, exec("byte[][] a = new byte[1][2]; return a").getClass());
|
||||||
assertEquals(short[][][].class, exec("short[][][] a = new short[1][2][3] return a").getClass());
|
assertEquals(short[][][].class, exec("short[][][] a = new short[1][2][3]; return a").getClass());
|
||||||
assertEquals(char[][][][].class, exec("char[][][][] a = new char[1][2][3][4] return a").getClass());
|
assertEquals(char[][][][].class, exec("char[][][][] a = new char[1][2][3][4]; return a").getClass());
|
||||||
assertEquals(int[][][][][].class, exec("int[][][][][] a = new int[1][2][3][4][5] return a").getClass());
|
assertEquals(int[][][][][].class, exec("int[][][][][] a = new int[1][2][3][4][5]; return a").getClass());
|
||||||
assertEquals(long[][].class, exec("long[][] a = new long[1][2] return a").getClass());
|
assertEquals(long[][].class, exec("long[][] a = new long[1][2]; return a").getClass());
|
||||||
assertEquals(float[][][].class, exec("float[][][] a = new float[1][2][3] return a").getClass());
|
assertEquals(float[][][].class, exec("float[][][] a = new float[1][2][3]; return a").getClass());
|
||||||
assertEquals(double[][][][].class, exec("double[][][][] a = new double[1][2][3][4] return a").getClass());
|
assertEquals(double[][][][].class, exec("double[][][][] a = new double[1][2][3][4]; return a").getClass());
|
||||||
assertEquals(boolean[][][][][].class, exec("boolean[][][][][] a = new boolean[1][2][3][4][5] return a").getClass());
|
assertEquals(boolean[][][][][].class, exec("boolean[][][][][] a = new boolean[1][2][3][4][5]; return a").getClass());
|
||||||
assertEquals(String[][].class, exec("String[][] a = new String[1][2] return a").getClass());
|
assertEquals(String[][].class, exec("String[][] a = new String[1][2]; return a").getClass());
|
||||||
assertEquals(Map[][][].class, exec("Map<String,Object>[][][] a = new Map<String,Object>[1][2][3] return a").getClass());
|
assertEquals(Map[][][].class, exec("Map<String,Object>[][][] a = new Map<String,Object>[1][2][3]; return a").getClass());
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testContinueStatement() {
|
public void testExpression() {
|
||||||
assertEquals(9, exec("int x = 0, y = 0 while (x < 10) { ++x if (x == 1) continue ++y } return y"));
|
assertEquals(10, exec("10"));
|
||||||
}
|
assertEquals(10, exec("5 + 5"));
|
||||||
|
assertEquals(10, exec("5 + 5"));
|
||||||
public void testBreakStatement() {
|
assertEquals(10, exec("params.param == 'yes' ? 10 : 5", Collections.singletonMap("param", "yes")));
|
||||||
assertEquals(4, exec("int x = 0, y = 0 while (x < 10) { ++x if (x == 5) break ++y } return y"));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@SuppressWarnings("rawtypes")
|
@SuppressWarnings("rawtypes")
|
||||||
public void testReturnStatement() {
|
public void testReturnStatement() {
|
||||||
assertEquals(10, exec("return 10"));
|
assertEquals(10, exec("return 10"));
|
||||||
assertEquals(5, exec("int x = 5 return x"));
|
assertEquals(5, exec("int x = 5; return x"));
|
||||||
assertEquals(4, exec("int[] x = new int[2] x[1] = 4 return x[1]"));
|
assertEquals(4, exec("int[] x = new int[2]; x[1] = 4; return x[1]"));
|
||||||
assertEquals(5, ((short[])exec("short[] s = new short[3] s[1] = 5 return s"))[1]);
|
assertEquals(5, ((short[])exec("short[] s = new short[3]; s[1] = 5; return s"))[1]);
|
||||||
assertEquals(10, ((Map)exec("Map<String,Object> s = new HashMap< String,Object>() s.put(\"x\", 10) return s")).get("x"));
|
assertEquals(10, ((Map)exec("Map<String,Object> s = new HashMap< String,Object>(); s.put(\"x\", 10); return s")).get("x"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,52 @@
|
||||||
|
package org.elasticsearch.painless;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/** Tests method overloading */
|
||||||
|
public class OverloadTests extends ScriptTestCase {
|
||||||
|
|
||||||
|
public void testMethod() {
|
||||||
|
assertEquals(2, exec("return 'abc123abc'.indexOf('c');"));
|
||||||
|
assertEquals(8, exec("return 'abc123abc'.indexOf('c', 3);"));
|
||||||
|
IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> {
|
||||||
|
exec("return 'abc123abc'.indexOf('c', 3, 'bogus');");
|
||||||
|
});
|
||||||
|
assertTrue(expected.getMessage().contains("[indexOf] with [3] arguments"));
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testMethodDynamic() {
|
||||||
|
assertEquals(2, exec("def x = 'abc123abc'; return x.indexOf('c');"));
|
||||||
|
assertEquals(8, exec("def x = 'abc123abc'; return x.indexOf('c', 3);"));
|
||||||
|
IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> {
|
||||||
|
exec("def x = 'abc123abc'; return x.indexOf('c', 3, 'bogus');");
|
||||||
|
});
|
||||||
|
assertTrue(expected.getMessage().contains("dynamic method [indexOf] with signature [(String,int,String)"));
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testConstructor() {
|
||||||
|
assertEquals(true, exec("FeatureTest f = new FeatureTest(); return f.x == 0 && f.y == 0;"));
|
||||||
|
assertEquals(true, exec("FeatureTest f = new FeatureTest(1, 2); return f.x == 1 && f.y == 2;"));
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testStatic() {
|
||||||
|
assertEquals(true, exec("return FeatureTest.overloadedStatic();"));
|
||||||
|
assertEquals(false, exec("return FeatureTest.overloadedStatic(false);"));
|
||||||
|
}
|
||||||
|
}
|
|
@ -19,6 +19,8 @@
|
||||||
|
|
||||||
package org.elasticsearch.painless;
|
package org.elasticsearch.painless;
|
||||||
|
|
||||||
|
import java.util.Locale;
|
||||||
|
|
||||||
public class StringTests extends ScriptTestCase {
|
public class StringTests extends ScriptTestCase {
|
||||||
|
|
||||||
public void testAppend() {
|
public void testAppend() {
|
||||||
|
@ -63,6 +65,21 @@ public class StringTests extends ScriptTestCase {
|
||||||
assertEquals("cat" + "cat", exec("String s = 'cat'; return s + s;"));
|
assertEquals("cat" + "cat", exec("String s = 'cat'; return s + s;"));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testAppendMultiple() {
|
||||||
|
assertEquals("cat" + true + "abc" + null, exec("String s = \"cat\"; return s + true + 'abc' + null;"));
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testAppendMany() {
|
||||||
|
StringBuilder script = new StringBuilder("String s = \"cat\"; return s");
|
||||||
|
StringBuilder result = new StringBuilder("cat");
|
||||||
|
for (int i = 0; i < 200 /* indy limit */ + 10; i++) {
|
||||||
|
final String s = String.format(Locale.ROOT, "%03d", i);
|
||||||
|
script.append(" + '").append(s).append("'.toString()");
|
||||||
|
result.append(s);
|
||||||
|
}
|
||||||
|
assertEquals(result.toString(), exec(script.toString()));
|
||||||
|
}
|
||||||
|
|
||||||
public void testStringAPI() {
|
public void testStringAPI() {
|
||||||
assertEquals("", exec("return new String();"));
|
assertEquals("", exec("return new String();"));
|
||||||
assertEquals('x', exec("String s = \"x\"; return s.charAt(0);"));
|
assertEquals('x', exec("String s = \"x\"; return s.charAt(0);"));
|
||||||
|
@ -127,8 +144,8 @@ public class StringTests extends ScriptTestCase {
|
||||||
assertEquals("c", exec("return (String)(char)\"c\""));
|
assertEquals("c", exec("return (String)(char)\"c\""));
|
||||||
assertEquals("c", exec("return (String)(char)'c'"));
|
assertEquals("c", exec("return (String)(char)'c'"));
|
||||||
|
|
||||||
assertEquals('c', exec("String s = \"c\" (char)s"));
|
assertEquals('c', exec("String s = \"c\"; (char)s"));
|
||||||
assertEquals('c', exec("String s = 'c' (char)s"));
|
assertEquals('c', exec("String s = 'c'; (char)s"));
|
||||||
|
|
||||||
try {
|
try {
|
||||||
assertEquals("cc", exec("return (String)(char)\"cc\""));
|
assertEquals("cc", exec("return (String)(char)\"cc\""));
|
||||||
|
@ -145,14 +162,14 @@ public class StringTests extends ScriptTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
assertEquals('c', exec("String s = \"cc\" (char)s"));
|
assertEquals('c', exec("String s = \"cc\"; (char)s"));
|
||||||
fail();
|
fail();
|
||||||
} catch (final ClassCastException cce) {
|
} catch (final ClassCastException cce) {
|
||||||
assertTrue(cce.getMessage().contains("Cannot cast [String] with length greater than one to [char]."));
|
assertTrue(cce.getMessage().contains("Cannot cast [String] with length greater than one to [char]."));
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
assertEquals('c', exec("String s = 'cc' (char)s"));
|
assertEquals('c', exec("String s = 'cc'; (char)s"));
|
||||||
fail();
|
fail();
|
||||||
} catch (final ClassCastException cce) {
|
} catch (final ClassCastException cce) {
|
||||||
assertTrue(cce.getMessage().contains("Cannot cast [String] with length greater than one to [char]."));
|
assertTrue(cce.getMessage().contains("Cannot cast [String] with length greater than one to [char]."));
|
||||||
|
@ -163,8 +180,8 @@ public class StringTests extends ScriptTestCase {
|
||||||
assertEquals("c", exec("return (String)(Character)\"c\""));
|
assertEquals("c", exec("return (String)(Character)\"c\""));
|
||||||
assertEquals("c", exec("return (String)(Character)'c'"));
|
assertEquals("c", exec("return (String)(Character)'c'"));
|
||||||
|
|
||||||
assertEquals('c', exec("String s = \"c\" (Character)s"));
|
assertEquals('c', exec("String s = \"c\"; (Character)s"));
|
||||||
assertEquals('c', exec("String s = 'c' (Character)s"));
|
assertEquals('c', exec("String s = 'c'; (Character)s"));
|
||||||
|
|
||||||
try {
|
try {
|
||||||
assertEquals("cc", exec("return (String)(Character)\"cc\""));
|
assertEquals("cc", exec("return (String)(Character)\"cc\""));
|
||||||
|
@ -181,14 +198,14 @@ public class StringTests extends ScriptTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
assertEquals('c', exec("String s = \"cc\" (Character)s"));
|
assertEquals('c', exec("String s = \"cc\"; (Character)s"));
|
||||||
fail();
|
fail();
|
||||||
} catch (final ClassCastException cce) {
|
} catch (final ClassCastException cce) {
|
||||||
assertTrue(cce.getMessage().contains("Cannot cast [String] with length greater than one to [Character]."));
|
assertTrue(cce.getMessage().contains("Cannot cast [String] with length greater than one to [Character]."));
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
assertEquals('c', exec("String s = 'cc' (Character)s"));
|
assertEquals('c', exec("String s = 'cc'; (Character)s"));
|
||||||
fail();
|
fail();
|
||||||
} catch (final ClassCastException cce) {
|
} catch (final ClassCastException cce) {
|
||||||
assertTrue(cce.getMessage().contains("Cannot cast [String] with length greater than one to [Character]."));
|
assertTrue(cce.getMessage().contains("Cannot cast [String] with length greater than one to [Character]."));
|
||||||
|
|
|
@ -90,7 +90,7 @@ public class WhenThingsGoWrongTests extends ScriptTestCase {
|
||||||
"The maximum number of statements that can be executed in a loop has been reached."));
|
"The maximum number of statements that can be executed in a loop has been reached."));
|
||||||
|
|
||||||
expected = expectThrows(PainlessError.class, () -> {
|
expected = expectThrows(PainlessError.class, () -> {
|
||||||
exec("while (true) {int y = 5}");
|
exec("while (true) {int y = 5;}");
|
||||||
});
|
});
|
||||||
assertTrue(expected.getMessage().contains(
|
assertTrue(expected.getMessage().contains(
|
||||||
"The maximum number of statements that can be executed in a loop has been reached."));
|
"The maximum number of statements that can be executed in a loop has been reached."));
|
||||||
|
@ -116,7 +116,7 @@ public class WhenThingsGoWrongTests extends ScriptTestCase {
|
||||||
"The maximum number of statements that can be executed in a loop has been reached."));
|
"The maximum number of statements that can be executed in a loop has been reached."));
|
||||||
|
|
||||||
expected = expectThrows(PainlessError.class, () -> {
|
expected = expectThrows(PainlessError.class, () -> {
|
||||||
exec("for (;;) {int x = 5}");
|
exec("for (;;) {int x = 5;}");
|
||||||
fail("should have hit PainlessError");
|
fail("should have hit PainlessError");
|
||||||
});
|
});
|
||||||
assertTrue(expected.getMessage().contains(
|
assertTrue(expected.getMessage().contains(
|
||||||
|
@ -130,7 +130,7 @@ public class WhenThingsGoWrongTests extends ScriptTestCase {
|
||||||
"The maximum number of statements that can be executed in a loop has been reached."));
|
"The maximum number of statements that can be executed in a loop has been reached."));
|
||||||
|
|
||||||
RuntimeException parseException = expectThrows(RuntimeException.class, () -> {
|
RuntimeException parseException = expectThrows(RuntimeException.class, () -> {
|
||||||
exec("try { int x } catch (PainlessError error) {}");
|
exec("try { int x; } catch (PainlessError error) {}");
|
||||||
fail("should have hit ParseException");
|
fail("should have hit ParseException");
|
||||||
});
|
});
|
||||||
assertTrue(parseException.getMessage().contains("Not a type [PainlessError]."));
|
assertTrue(parseException.getMessage().contains("Not a type [PainlessError]."));
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
|
|
||||||
package org.elasticsearch.index.reindex;
|
package org.elasticsearch.index.reindex;
|
||||||
|
|
||||||
|
import org.elasticsearch.ExceptionsHelper;
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
|
import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
|
||||||
import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
|
import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
|
||||||
|
@ -57,6 +58,7 @@ import java.util.concurrent.ConcurrentHashMap;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
import java.util.concurrent.atomic.AtomicLong;
|
import java.util.concurrent.atomic.AtomicLong;
|
||||||
import java.util.concurrent.atomic.AtomicReference;
|
import java.util.concurrent.atomic.AtomicReference;
|
||||||
|
import java.util.function.Consumer;
|
||||||
|
|
||||||
import static java.lang.Math.max;
|
import static java.lang.Math.max;
|
||||||
import static java.lang.Math.min;
|
import static java.lang.Math.min;
|
||||||
|
@ -91,7 +93,8 @@ public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBu
|
||||||
private final ThreadPool threadPool;
|
private final ThreadPool threadPool;
|
||||||
private final SearchRequest firstSearchRequest;
|
private final SearchRequest firstSearchRequest;
|
||||||
private final ActionListener<Response> listener;
|
private final ActionListener<Response> listener;
|
||||||
private final Retry retry;
|
private final BackoffPolicy backoffPolicy;
|
||||||
|
private final Retry bulkRetry;
|
||||||
|
|
||||||
public AbstractAsyncBulkByScrollAction(BulkByScrollTask task, ESLogger logger, ParentTaskAssigningClient client,
|
public AbstractAsyncBulkByScrollAction(BulkByScrollTask task, ESLogger logger, ParentTaskAssigningClient client,
|
||||||
ThreadPool threadPool, Request mainRequest, SearchRequest firstSearchRequest, ActionListener<Response> listener) {
|
ThreadPool threadPool, Request mainRequest, SearchRequest firstSearchRequest, ActionListener<Response> listener) {
|
||||||
|
@ -102,7 +105,8 @@ public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBu
|
||||||
this.mainRequest = mainRequest;
|
this.mainRequest = mainRequest;
|
||||||
this.firstSearchRequest = firstSearchRequest;
|
this.firstSearchRequest = firstSearchRequest;
|
||||||
this.listener = listener;
|
this.listener = listener;
|
||||||
retry = Retry.on(EsRejectedExecutionException.class).policy(wrapBackoffPolicy(backoffPolicy()));
|
backoffPolicy = buildBackoffPolicy();
|
||||||
|
bulkRetry = Retry.on(EsRejectedExecutionException.class).policy(wrapBackoffPolicy(backoffPolicy));
|
||||||
}
|
}
|
||||||
|
|
||||||
protected abstract BulkRequest buildBulk(Iterable<SearchHit> docs);
|
protected abstract BulkRequest buildBulk(Iterable<SearchHit> docs);
|
||||||
|
@ -131,21 +135,14 @@ public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBu
|
||||||
firstSearchRequest.types() == null || firstSearchRequest.types().length == 0 ? ""
|
firstSearchRequest.types() == null || firstSearchRequest.types().length == 0 ? ""
|
||||||
: firstSearchRequest.types());
|
: firstSearchRequest.types());
|
||||||
}
|
}
|
||||||
client.search(firstSearchRequest, new ActionListener<SearchResponse>() {
|
|
||||||
@Override
|
|
||||||
public void onResponse(SearchResponse response) {
|
|
||||||
logger.debug("[{}] documents match query", response.getHits().getTotalHits());
|
|
||||||
onScrollResponse(timeValueSeconds(0), response);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void onFailure(Throwable e) {
|
|
||||||
finishHim(e);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
} catch (Throwable t) {
|
} catch (Throwable t) {
|
||||||
finishHim(t);
|
finishHim(t);
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
searchWithRetry(listener -> client.search(firstSearchRequest, listener), (SearchResponse response) -> {
|
||||||
|
logger.debug("[{}] documents match query", response.getHits().getTotalHits());
|
||||||
|
onScrollResponse(timeValueSeconds(0), response);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -239,7 +236,7 @@ public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBu
|
||||||
finishHim(null);
|
finishHim(null);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
retry.withAsyncBackoff(client, request, new ActionListener<BulkResponse>() {
|
bulkRetry.withAsyncBackoff(client, request, new ActionListener<BulkResponse>() {
|
||||||
@Override
|
@Override
|
||||||
public void onResponse(BulkResponse response) {
|
public void onResponse(BulkResponse response) {
|
||||||
onBulkResponse(response);
|
onBulkResponse(response);
|
||||||
|
@ -322,16 +319,8 @@ public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBu
|
||||||
SearchScrollRequest request = new SearchScrollRequest();
|
SearchScrollRequest request = new SearchScrollRequest();
|
||||||
// Add the wait time into the scroll timeout so it won't timeout while we wait for throttling
|
// Add the wait time into the scroll timeout so it won't timeout while we wait for throttling
|
||||||
request.scrollId(scroll.get()).scroll(timeValueNanos(firstSearchRequest.scroll().keepAlive().nanos() + waitTime));
|
request.scrollId(scroll.get()).scroll(timeValueNanos(firstSearchRequest.scroll().keepAlive().nanos() + waitTime));
|
||||||
client.searchScroll(request, new ActionListener<SearchResponse>() {
|
searchWithRetry(listener -> client.searchScroll(request, listener), (SearchResponse response) -> {
|
||||||
@Override
|
onScrollResponse(timeValueNanos(max(0, earliestNextBatchStartTime - System.nanoTime())), response);
|
||||||
public void onResponse(SearchResponse response) {
|
|
||||||
onScrollResponse(timeValueNanos(max(0, earliestNextBatchStartTime - System.nanoTime())), response);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void onFailure(Throwable e) {
|
|
||||||
finishHim(e);
|
|
||||||
}
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -434,9 +423,9 @@ public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBu
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Build the backoff policy for use with retries.
|
* Get the backoff policy for use with retries.
|
||||||
*/
|
*/
|
||||||
BackoffPolicy backoffPolicy() {
|
BackoffPolicy buildBackoffPolicy() {
|
||||||
return exponentialBackoff(mainRequest.getRetryBackoffInitialTime(), mainRequest.getMaxRetries());
|
return exponentialBackoff(mainRequest.getRetryBackoffInitialTime(), mainRequest.getMaxRetries());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -470,7 +459,7 @@ public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBu
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Wraps a backoffPolicy in another policy that counts the number of backoffs acquired.
|
* Wraps a backoffPolicy in another policy that counts the number of backoffs acquired. Used to count bulk backoffs.
|
||||||
*/
|
*/
|
||||||
private BackoffPolicy wrapBackoffPolicy(BackoffPolicy backoffPolicy) {
|
private BackoffPolicy wrapBackoffPolicy(BackoffPolicy backoffPolicy) {
|
||||||
return new BackoffPolicy() {
|
return new BackoffPolicy() {
|
||||||
|
@ -488,11 +477,52 @@ public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBu
|
||||||
if (false == delegate.hasNext()) {
|
if (false == delegate.hasNext()) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
task.countRetry();
|
task.countBulkRetry();
|
||||||
return delegate.next();
|
return delegate.next();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Run a search action and call onResponse when a the response comes in, retrying if the action fails with an exception caused by
|
||||||
|
* rejected execution.
|
||||||
|
*
|
||||||
|
* @param action consumes a listener and starts the action. The listener it consumes is rigged to retry on failure.
|
||||||
|
* @param onResponse consumes the response from the action
|
||||||
|
*/
|
||||||
|
private <T> void searchWithRetry(Consumer<ActionListener<T>> action, Consumer<T> onResponse) {
|
||||||
|
class RetryHelper extends AbstractRunnable implements ActionListener<T> {
|
||||||
|
private final Iterator<TimeValue> retries = backoffPolicy.iterator();
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void onResponse(T response) {
|
||||||
|
onResponse.accept(response);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void doRun() throws Exception {
|
||||||
|
action.accept(this);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void onFailure(Throwable e) {
|
||||||
|
if (ExceptionsHelper.unwrap(e, EsRejectedExecutionException.class) != null) {
|
||||||
|
if (retries.hasNext()) {
|
||||||
|
logger.trace("retrying rejected search", e);
|
||||||
|
threadPool.schedule(retries.next(), ThreadPool.Names.SAME, this);
|
||||||
|
task.countSearchRetry();
|
||||||
|
} else {
|
||||||
|
logger.warn("giving up on search because we retried {} times without success", e, retries);
|
||||||
|
finishHim(e);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
logger.warn("giving up on search because it failed with a non-retryable exception", e);
|
||||||
|
finishHim(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
new RetryHelper().run();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,9 +19,6 @@
|
||||||
|
|
||||||
package org.elasticsearch.index.reindex;
|
package org.elasticsearch.index.reindex;
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.Arrays;
|
|
||||||
|
|
||||||
import org.elasticsearch.action.ActionRequest;
|
import org.elasticsearch.action.ActionRequest;
|
||||||
import org.elasticsearch.action.ActionRequestValidationException;
|
import org.elasticsearch.action.ActionRequestValidationException;
|
||||||
import org.elasticsearch.action.WriteConsistencyLevel;
|
import org.elasticsearch.action.WriteConsistencyLevel;
|
||||||
|
@ -34,6 +31,9 @@ import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||||
import org.elasticsearch.tasks.Task;
|
import org.elasticsearch.tasks.Task;
|
||||||
import org.elasticsearch.tasks.TaskId;
|
import org.elasticsearch.tasks.TaskId;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.Arrays;
|
||||||
|
|
||||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||||
import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
|
import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
|
||||||
import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes;
|
import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes;
|
||||||
|
|
|
@ -107,6 +107,23 @@ public abstract class AbstractBulkByScrollRequestBuilder<
|
||||||
return self();
|
return self();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Initial delay after a rejection before retrying a bulk request. With the default maxRetries the total backoff for retrying rejections
|
||||||
|
* is about one minute per bulk request. Once the entire bulk request is successful the retry counter resets.
|
||||||
|
*/
|
||||||
|
public Self setRetryBackoffInitialTime(TimeValue retryBackoffInitialTime) {
|
||||||
|
request.setRetryBackoffInitialTime(retryBackoffInitialTime);
|
||||||
|
return self();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Total number of retries attempted for rejections. There is no way to ask for unlimited retries.
|
||||||
|
*/
|
||||||
|
public Self setMaxRetries(int maxRetries) {
|
||||||
|
request.setMaxRetries(maxRetries);
|
||||||
|
return self();
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Set the throttle for this request in sub-requests per second. {@link Float#POSITIVE_INFINITY} means set no throttle and that is the
|
* Set the throttle for this request in sub-requests per second. {@link Float#POSITIVE_INFINITY} means set no throttle and that is the
|
||||||
* default. Throttling is done between batches, as we start the next scroll requests. That way we can increase the scroll's timeout to
|
* default. Throttling is done between batches, as we start the next scroll requests. That way we can increase the scroll's timeout to
|
||||||
|
|
|
@ -26,12 +26,11 @@ import org.elasticsearch.script.Script;
|
||||||
|
|
||||||
public abstract class AbstractBulkIndexByScrollRequestBuilder<
|
public abstract class AbstractBulkIndexByScrollRequestBuilder<
|
||||||
Request extends AbstractBulkIndexByScrollRequest<Request>,
|
Request extends AbstractBulkIndexByScrollRequest<Request>,
|
||||||
Response extends BulkIndexByScrollResponse,
|
Self extends AbstractBulkIndexByScrollRequestBuilder<Request, Self>>
|
||||||
Self extends AbstractBulkIndexByScrollRequestBuilder<Request, Response, Self>>
|
extends AbstractBulkByScrollRequestBuilder<Request, BulkIndexByScrollResponse, Self> {
|
||||||
extends AbstractBulkByScrollRequestBuilder<Request, Response, Self> {
|
|
||||||
|
|
||||||
protected AbstractBulkIndexByScrollRequestBuilder(ElasticsearchClient client,
|
protected AbstractBulkIndexByScrollRequestBuilder(ElasticsearchClient client,
|
||||||
Action<Request, Response, Self> action, SearchRequestBuilder search, Request request) {
|
Action<Request, BulkIndexByScrollResponse, Self> action, SearchRequestBuilder search, Request request) {
|
||||||
super(client, action, search, request);
|
super(client, action, search, request);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -58,7 +58,8 @@ public class BulkByScrollTask extends CancellableTask {
|
||||||
private final AtomicLong noops = new AtomicLong(0);
|
private final AtomicLong noops = new AtomicLong(0);
|
||||||
private final AtomicInteger batch = new AtomicInteger(0);
|
private final AtomicInteger batch = new AtomicInteger(0);
|
||||||
private final AtomicLong versionConflicts = new AtomicLong(0);
|
private final AtomicLong versionConflicts = new AtomicLong(0);
|
||||||
private final AtomicLong retries = new AtomicLong(0);
|
private final AtomicLong bulkRetries = new AtomicLong(0);
|
||||||
|
private final AtomicLong searchRetries = new AtomicLong(0);
|
||||||
private final AtomicLong throttledNanos = new AtomicLong();
|
private final AtomicLong throttledNanos = new AtomicLong();
|
||||||
/**
|
/**
|
||||||
* The number of requests per second to which to throttle the request that this task represents. The other variables are all AtomicXXX
|
* The number of requests per second to which to throttle the request that this task represents. The other variables are all AtomicXXX
|
||||||
|
@ -84,7 +85,8 @@ public class BulkByScrollTask extends CancellableTask {
|
||||||
@Override
|
@Override
|
||||||
public Status getStatus() {
|
public Status getStatus() {
|
||||||
return new Status(total.get(), updated.get(), created.get(), deleted.get(), batch.get(), versionConflicts.get(), noops.get(),
|
return new Status(total.get(), updated.get(), created.get(), deleted.get(), batch.get(), versionConflicts.get(), noops.get(),
|
||||||
retries.get(), timeValueNanos(throttledNanos.get()), getRequestsPerSecond(), getReasonCancelled(), throttledUntil());
|
bulkRetries.get(), searchRetries.get(), timeValueNanos(throttledNanos.get()), getRequestsPerSecond(), getReasonCancelled(),
|
||||||
|
throttledUntil());
|
||||||
}
|
}
|
||||||
|
|
||||||
private TimeValue throttledUntil() {
|
private TimeValue throttledUntil() {
|
||||||
|
@ -133,14 +135,16 @@ public class BulkByScrollTask extends CancellableTask {
|
||||||
private final int batches;
|
private final int batches;
|
||||||
private final long versionConflicts;
|
private final long versionConflicts;
|
||||||
private final long noops;
|
private final long noops;
|
||||||
private final long retries;
|
private final long bulkRetries;
|
||||||
|
private final long searchRetries;
|
||||||
private final TimeValue throttled;
|
private final TimeValue throttled;
|
||||||
private final float requestsPerSecond;
|
private final float requestsPerSecond;
|
||||||
private final String reasonCancelled;
|
private final String reasonCancelled;
|
||||||
private final TimeValue throttledUntil;
|
private final TimeValue throttledUntil;
|
||||||
|
|
||||||
public Status(long total, long updated, long created, long deleted, int batches, long versionConflicts, long noops, long retries,
|
public Status(long total, long updated, long created, long deleted, int batches, long versionConflicts, long noops,
|
||||||
TimeValue throttled, float requestsPerSecond, @Nullable String reasonCancelled, TimeValue throttledUntil) {
|
long bulkRetries, long searchRetries, TimeValue throttled, float requestsPerSecond, @Nullable String reasonCancelled,
|
||||||
|
TimeValue throttledUntil) {
|
||||||
this.total = checkPositive(total, "total");
|
this.total = checkPositive(total, "total");
|
||||||
this.updated = checkPositive(updated, "updated");
|
this.updated = checkPositive(updated, "updated");
|
||||||
this.created = checkPositive(created, "created");
|
this.created = checkPositive(created, "created");
|
||||||
|
@ -148,7 +152,8 @@ public class BulkByScrollTask extends CancellableTask {
|
||||||
this.batches = checkPositive(batches, "batches");
|
this.batches = checkPositive(batches, "batches");
|
||||||
this.versionConflicts = checkPositive(versionConflicts, "versionConflicts");
|
this.versionConflicts = checkPositive(versionConflicts, "versionConflicts");
|
||||||
this.noops = checkPositive(noops, "noops");
|
this.noops = checkPositive(noops, "noops");
|
||||||
this.retries = checkPositive(retries, "retries");
|
this.bulkRetries = checkPositive(bulkRetries, "bulkRetries");
|
||||||
|
this.searchRetries = checkPositive(searchRetries, "searchRetries");
|
||||||
this.throttled = throttled;
|
this.throttled = throttled;
|
||||||
this.requestsPerSecond = requestsPerSecond;
|
this.requestsPerSecond = requestsPerSecond;
|
||||||
this.reasonCancelled = reasonCancelled;
|
this.reasonCancelled = reasonCancelled;
|
||||||
|
@ -163,7 +168,8 @@ public class BulkByScrollTask extends CancellableTask {
|
||||||
batches = in.readVInt();
|
batches = in.readVInt();
|
||||||
versionConflicts = in.readVLong();
|
versionConflicts = in.readVLong();
|
||||||
noops = in.readVLong();
|
noops = in.readVLong();
|
||||||
retries = in.readVLong();
|
bulkRetries = in.readVLong();
|
||||||
|
searchRetries = in.readVLong();
|
||||||
throttled = TimeValue.readTimeValue(in);
|
throttled = TimeValue.readTimeValue(in);
|
||||||
requestsPerSecond = in.readFloat();
|
requestsPerSecond = in.readFloat();
|
||||||
reasonCancelled = in.readOptionalString();
|
reasonCancelled = in.readOptionalString();
|
||||||
|
@ -179,7 +185,8 @@ public class BulkByScrollTask extends CancellableTask {
|
||||||
out.writeVInt(batches);
|
out.writeVInt(batches);
|
||||||
out.writeVLong(versionConflicts);
|
out.writeVLong(versionConflicts);
|
||||||
out.writeVLong(noops);
|
out.writeVLong(noops);
|
||||||
out.writeVLong(retries);
|
out.writeVLong(bulkRetries);
|
||||||
|
out.writeVLong(searchRetries);
|
||||||
throttled.writeTo(out);
|
throttled.writeTo(out);
|
||||||
out.writeFloat(requestsPerSecond);
|
out.writeFloat(requestsPerSecond);
|
||||||
out.writeOptionalString(reasonCancelled);
|
out.writeOptionalString(reasonCancelled);
|
||||||
|
@ -208,7 +215,11 @@ public class BulkByScrollTask extends CancellableTask {
|
||||||
builder.field("batches", batches);
|
builder.field("batches", batches);
|
||||||
builder.field("version_conflicts", versionConflicts);
|
builder.field("version_conflicts", versionConflicts);
|
||||||
builder.field("noops", noops);
|
builder.field("noops", noops);
|
||||||
builder.field("retries", retries);
|
builder.startObject("retries"); {
|
||||||
|
builder.field("bulk", bulkRetries);
|
||||||
|
builder.field("search", searchRetries);
|
||||||
|
}
|
||||||
|
builder.endObject();
|
||||||
builder.timeValueField("throttled_millis", "throttled", throttled);
|
builder.timeValueField("throttled_millis", "throttled", throttled);
|
||||||
builder.field("requests_per_second", requestsPerSecond == Float.POSITIVE_INFINITY ? "unlimited" : requestsPerSecond);
|
builder.field("requests_per_second", requestsPerSecond == Float.POSITIVE_INFINITY ? "unlimited" : requestsPerSecond);
|
||||||
if (reasonCancelled != null) {
|
if (reasonCancelled != null) {
|
||||||
|
@ -233,7 +244,7 @@ public class BulkByScrollTask extends CancellableTask {
|
||||||
builder.append(",batches=").append(batches);
|
builder.append(",batches=").append(batches);
|
||||||
builder.append(",versionConflicts=").append(versionConflicts);
|
builder.append(",versionConflicts=").append(versionConflicts);
|
||||||
builder.append(",noops=").append(noops);
|
builder.append(",noops=").append(noops);
|
||||||
builder.append(",retries=").append(retries);
|
builder.append(",retries=").append(bulkRetries);
|
||||||
if (reasonCancelled != null) {
|
if (reasonCancelled != null) {
|
||||||
builder.append(",canceled=").append(reasonCancelled);
|
builder.append(",canceled=").append(reasonCancelled);
|
||||||
}
|
}
|
||||||
|
@ -296,10 +307,17 @@ public class BulkByScrollTask extends CancellableTask {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Number of retries that had to be attempted due to rejected executions.
|
* Number of retries that had to be attempted due to bulk actions being rejected.
|
||||||
*/
|
*/
|
||||||
public long getRetries() {
|
public long getBulkRetries() {
|
||||||
return retries;
|
return bulkRetries;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Number of retries that had to be attempted due to search actions being rejected.
|
||||||
|
*/
|
||||||
|
public long getSearchRetries() {
|
||||||
|
return searchRetries;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -373,8 +391,12 @@ public class BulkByScrollTask extends CancellableTask {
|
||||||
versionConflicts.incrementAndGet();
|
versionConflicts.incrementAndGet();
|
||||||
}
|
}
|
||||||
|
|
||||||
void countRetry() {
|
void countBulkRetry() {
|
||||||
retries.incrementAndGet();
|
bulkRetries.incrementAndGet();
|
||||||
|
}
|
||||||
|
|
||||||
|
void countSearchRetry() {
|
||||||
|
searchRetries.incrementAndGet();
|
||||||
}
|
}
|
||||||
|
|
||||||
float getRequestsPerSecond() {
|
float getRequestsPerSecond() {
|
||||||
|
|
|
@ -98,6 +98,20 @@ public class BulkIndexByScrollResponse extends ActionResponse implements ToXCont
|
||||||
return status.getReasonCancelled();
|
return status.getReasonCancelled();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The number of times that the request had retry bulk actions.
|
||||||
|
*/
|
||||||
|
public long getBulkRetries() {
|
||||||
|
return status.getBulkRetries();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The number of times that the request had retry search actions.
|
||||||
|
*/
|
||||||
|
public long getSearchRetries() {
|
||||||
|
return status.getSearchRetries();
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* All of the indexing failures. Version conflicts are only included if the request sets abortOnVersionConflict to true (the
|
* All of the indexing failures. Version conflicts are only included if the request sets abortOnVersionConflict to true (the
|
||||||
* default).
|
* default).
|
||||||
|
|
|
@ -19,10 +19,6 @@
|
||||||
|
|
||||||
package org.elasticsearch.index.reindex;
|
package org.elasticsearch.index.reindex;
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.List;
|
|
||||||
|
|
||||||
import org.elasticsearch.action.ActionRequestValidationException;
|
import org.elasticsearch.action.ActionRequestValidationException;
|
||||||
import org.elasticsearch.action.CompositeIndicesRequest;
|
import org.elasticsearch.action.CompositeIndicesRequest;
|
||||||
import org.elasticsearch.action.IndicesRequest;
|
import org.elasticsearch.action.IndicesRequest;
|
||||||
|
@ -32,8 +28,11 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
import org.elasticsearch.common.lucene.uid.Versions;
|
import org.elasticsearch.common.lucene.uid.Versions;
|
||||||
|
|
||||||
import static java.util.Collections.unmodifiableList;
|
import java.io.IOException;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
import static java.util.Collections.unmodifiableList;
|
||||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||||
import static org.elasticsearch.index.VersionType.INTERNAL;
|
import static org.elasticsearch.index.VersionType.INTERNAL;
|
||||||
|
|
||||||
|
|
|
@ -27,7 +27,7 @@ import org.elasticsearch.action.search.SearchRequestBuilder;
|
||||||
import org.elasticsearch.client.ElasticsearchClient;
|
import org.elasticsearch.client.ElasticsearchClient;
|
||||||
|
|
||||||
public class ReindexRequestBuilder extends
|
public class ReindexRequestBuilder extends
|
||||||
AbstractBulkIndexByScrollRequestBuilder<ReindexRequest, BulkIndexByScrollResponse, ReindexRequestBuilder> {
|
AbstractBulkIndexByScrollRequestBuilder<ReindexRequest, ReindexRequestBuilder> {
|
||||||
private final IndexRequestBuilder destination;
|
private final IndexRequestBuilder destination;
|
||||||
|
|
||||||
public ReindexRequestBuilder(ElasticsearchClient client,
|
public ReindexRequestBuilder(ElasticsearchClient client,
|
||||||
|
|
|
@ -19,14 +19,14 @@
|
||||||
|
|
||||||
package org.elasticsearch.index.reindex;
|
package org.elasticsearch.index.reindex;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.List;
|
|
||||||
|
|
||||||
import org.elasticsearch.action.CompositeIndicesRequest;
|
import org.elasticsearch.action.CompositeIndicesRequest;
|
||||||
import org.elasticsearch.action.IndicesRequest;
|
import org.elasticsearch.action.IndicesRequest;
|
||||||
import org.elasticsearch.action.index.IndexRequest;
|
import org.elasticsearch.action.index.IndexRequest;
|
||||||
import org.elasticsearch.action.search.SearchRequest;
|
import org.elasticsearch.action.search.SearchRequest;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
import static java.util.Collections.unmodifiableList;
|
import static java.util.Collections.unmodifiableList;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -25,7 +25,7 @@ import org.elasticsearch.action.search.SearchRequestBuilder;
|
||||||
import org.elasticsearch.client.ElasticsearchClient;
|
import org.elasticsearch.client.ElasticsearchClient;
|
||||||
|
|
||||||
public class UpdateByQueryRequestBuilder extends
|
public class UpdateByQueryRequestBuilder extends
|
||||||
AbstractBulkIndexByScrollRequestBuilder<UpdateByQueryRequest, BulkIndexByScrollResponse, UpdateByQueryRequestBuilder> {
|
AbstractBulkIndexByScrollRequestBuilder<UpdateByQueryRequest, UpdateByQueryRequestBuilder> {
|
||||||
|
|
||||||
public UpdateByQueryRequestBuilder(ElasticsearchClient client,
|
public UpdateByQueryRequestBuilder(ElasticsearchClient client,
|
||||||
Action<UpdateByQueryRequest, BulkIndexByScrollResponse, UpdateByQueryRequestBuilder> action) {
|
Action<UpdateByQueryRequest, BulkIndexByScrollResponse, UpdateByQueryRequestBuilder> action) {
|
||||||
|
|
|
@ -19,6 +19,8 @@
|
||||||
|
|
||||||
package org.elasticsearch.index.reindex;
|
package org.elasticsearch.index.reindex;
|
||||||
|
|
||||||
|
import org.elasticsearch.ElasticsearchException;
|
||||||
|
import org.elasticsearch.ExceptionsHelper;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.action.Action;
|
import org.elasticsearch.action.Action;
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
|
@ -38,6 +40,8 @@ import org.elasticsearch.action.index.IndexRequest;
|
||||||
import org.elasticsearch.action.index.IndexResponse;
|
import org.elasticsearch.action.index.IndexResponse;
|
||||||
import org.elasticsearch.action.search.ClearScrollRequest;
|
import org.elasticsearch.action.search.ClearScrollRequest;
|
||||||
import org.elasticsearch.action.search.ClearScrollResponse;
|
import org.elasticsearch.action.search.ClearScrollResponse;
|
||||||
|
import org.elasticsearch.action.search.ReduceSearchPhaseException;
|
||||||
|
import org.elasticsearch.action.search.SearchPhaseExecutionException;
|
||||||
import org.elasticsearch.action.search.SearchRequest;
|
import org.elasticsearch.action.search.SearchRequest;
|
||||||
import org.elasticsearch.action.search.SearchResponse;
|
import org.elasticsearch.action.search.SearchResponse;
|
||||||
import org.elasticsearch.action.search.SearchScrollRequest;
|
import org.elasticsearch.action.search.SearchScrollRequest;
|
||||||
|
@ -98,6 +102,7 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes;
|
||||||
import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
|
import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
|
||||||
import static org.hamcrest.Matchers.closeTo;
|
import static org.hamcrest.Matchers.closeTo;
|
||||||
import static org.hamcrest.Matchers.contains;
|
import static org.hamcrest.Matchers.contains;
|
||||||
|
import static org.hamcrest.Matchers.containsString;
|
||||||
import static org.hamcrest.Matchers.either;
|
import static org.hamcrest.Matchers.either;
|
||||||
import static org.hamcrest.Matchers.emptyCollectionOf;
|
import static org.hamcrest.Matchers.emptyCollectionOf;
|
||||||
import static org.hamcrest.Matchers.equalTo;
|
import static org.hamcrest.Matchers.equalTo;
|
||||||
|
@ -122,8 +127,8 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
|
||||||
public void setupForTest() {
|
public void setupForTest() {
|
||||||
client = new MyMockClient(new NoOpClient(getTestName()));
|
client = new MyMockClient(new NoOpClient(getTestName()));
|
||||||
threadPool = new ThreadPool(getTestName());
|
threadPool = new ThreadPool(getTestName());
|
||||||
testRequest = new DummyAbstractBulkByScrollRequest();
|
firstSearchRequest = new SearchRequest();
|
||||||
firstSearchRequest = new SearchRequest().scroll(timeValueSeconds(10));
|
testRequest = new DummyAbstractBulkByScrollRequest(firstSearchRequest);
|
||||||
listener = new PlainActionFuture<>();
|
listener = new PlainActionFuture<>();
|
||||||
scrollId = null;
|
scrollId = null;
|
||||||
taskManager = new TaskManager(Settings.EMPTY);
|
taskManager = new TaskManager(Settings.EMPTY);
|
||||||
|
@ -150,10 +155,62 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
|
||||||
* random scroll id so it is checked instead.
|
* random scroll id so it is checked instead.
|
||||||
*/
|
*/
|
||||||
private String scrollId() {
|
private String scrollId() {
|
||||||
scrollId = randomSimpleString(random(), 1, 1000); // Empty string's get special behavior we don't want
|
scrollId = randomSimpleString(random(), 1, 1000); // Empty strings get special behavior we don't want
|
||||||
return scrollId;
|
return scrollId;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testStartRetriesOnRejectionAndSucceeds() throws Exception {
|
||||||
|
client.searchesToReject = randomIntBetween(0, testRequest.getMaxRetries() - 1);
|
||||||
|
DummyAbstractAsyncBulkByScrollAction action = new DummyActionWithoutBackoff();
|
||||||
|
action.start();
|
||||||
|
assertBusy(() -> assertEquals(client.searchesToReject + 1, client.searchAttempts.get()));
|
||||||
|
if (listener.isDone()) {
|
||||||
|
Object result = listener.get();
|
||||||
|
fail("Expected listener not to be done but it was and had " + result);
|
||||||
|
}
|
||||||
|
assertBusy(() -> assertNotNull("There should be a search attempt pending that we didn't reject", client.lastSearch.get()));
|
||||||
|
assertEquals(client.searchesToReject, testTask.getStatus().getSearchRetries());
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testStartRetriesOnRejectionButFailsOnTooManyRejections() throws Exception {
|
||||||
|
client.searchesToReject = testRequest.getMaxRetries() + randomIntBetween(1, 100);
|
||||||
|
DummyAbstractAsyncBulkByScrollAction action = new DummyActionWithoutBackoff();
|
||||||
|
action.start();
|
||||||
|
assertBusy(() -> assertEquals(testRequest.getMaxRetries() + 1, client.searchAttempts.get()));
|
||||||
|
assertBusy(() -> assertTrue(listener.isDone()));
|
||||||
|
ExecutionException e = expectThrows(ExecutionException.class, () -> listener.get());
|
||||||
|
assertThat(ExceptionsHelper.stackTrace(e), containsString(EsRejectedExecutionException.class.getSimpleName()));
|
||||||
|
assertNull("There shouldn't be a search attempt pending that we didn't reject", client.lastSearch.get());
|
||||||
|
assertEquals(testRequest.getMaxRetries(), testTask.getStatus().getSearchRetries());
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testStartNextScrollRetriesOnRejectionAndSucceeds() throws Exception {
|
||||||
|
client.scrollsToReject = randomIntBetween(0, testRequest.getMaxRetries() - 1);
|
||||||
|
DummyAbstractAsyncBulkByScrollAction action = new DummyActionWithoutBackoff();
|
||||||
|
action.setScroll(scrollId());
|
||||||
|
action.startNextScroll(0);
|
||||||
|
assertBusy(() -> assertEquals(client.scrollsToReject + 1, client.scrollAttempts.get()));
|
||||||
|
if (listener.isDone()) {
|
||||||
|
Object result = listener.get();
|
||||||
|
fail("Expected listener not to be done but it was and had " + result);
|
||||||
|
}
|
||||||
|
assertBusy(() -> assertNotNull("There should be a scroll attempt pending that we didn't reject", client.lastScroll.get()));
|
||||||
|
assertEquals(client.scrollsToReject, testTask.getStatus().getSearchRetries());
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testStartNextScrollRetriesOnRejectionButFailsOnTooManyRejections() throws Exception {
|
||||||
|
client.scrollsToReject = testRequest.getMaxRetries() + randomIntBetween(1, 100);
|
||||||
|
DummyAbstractAsyncBulkByScrollAction action = new DummyActionWithoutBackoff();
|
||||||
|
action.setScroll(scrollId());
|
||||||
|
action.startNextScroll(0);
|
||||||
|
assertBusy(() -> assertEquals(testRequest.getMaxRetries() + 1, client.scrollAttempts.get()));
|
||||||
|
assertBusy(() -> assertTrue(listener.isDone()));
|
||||||
|
ExecutionException e = expectThrows(ExecutionException.class, () -> listener.get());
|
||||||
|
assertThat(ExceptionsHelper.stackTrace(e), containsString(EsRejectedExecutionException.class.getSimpleName()));
|
||||||
|
assertNull("There shouldn't be a scroll attempt pending that we didn't reject", client.lastScroll.get());
|
||||||
|
assertEquals(testRequest.getMaxRetries(), testTask.getStatus().getSearchRetries());
|
||||||
|
}
|
||||||
|
|
||||||
public void testScrollResponseSetsTotal() {
|
public void testScrollResponseSetsTotal() {
|
||||||
// Default is 0, meaning unstarted
|
// Default is 0, meaning unstarted
|
||||||
assertEquals(0, testTask.getStatus().getTotal());
|
assertEquals(0, testTask.getStatus().getTotal());
|
||||||
|
@ -354,8 +411,9 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
|
||||||
int bulksToTry = randomIntBetween(1, 10);
|
int bulksToTry = randomIntBetween(1, 10);
|
||||||
long retryAttempts = 0;
|
long retryAttempts = 0;
|
||||||
for (int i = 0; i < bulksToTry; i++) {
|
for (int i = 0; i < bulksToTry; i++) {
|
||||||
retryAttempts += retryTestCase(false);
|
bulkRetryTestCase(false);
|
||||||
assertEquals(retryAttempts, testTask.getStatus().getRetries());
|
retryAttempts += testRequest.getMaxRetries();
|
||||||
|
assertEquals(retryAttempts, testTask.getStatus().getBulkRetries());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -363,8 +421,8 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
|
||||||
* Mimicks bulk rejections. These should be retried but we fail anyway because we run out of retries.
|
* Mimicks bulk rejections. These should be retried but we fail anyway because we run out of retries.
|
||||||
*/
|
*/
|
||||||
public void testBulkRejectionsRetryAndFailAnyway() throws Exception {
|
public void testBulkRejectionsRetryAndFailAnyway() throws Exception {
|
||||||
long retryAttempts = retryTestCase(true);
|
bulkRetryTestCase(true);
|
||||||
assertEquals(retryAttempts, testTask.getStatus().getRetries());
|
assertEquals(testRequest.getMaxRetries(), testTask.getStatus().getBulkRetries());
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testPerfectlyThrottledBatchTime() {
|
public void testPerfectlyThrottledBatchTime() {
|
||||||
|
@ -398,6 +456,9 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
|
||||||
DummyAbstractAsyncBulkByScrollAction action = new DummyAbstractAsyncBulkByScrollAction();
|
DummyAbstractAsyncBulkByScrollAction action = new DummyAbstractAsyncBulkByScrollAction();
|
||||||
action.setScroll(scrollId());
|
action.setScroll(scrollId());
|
||||||
|
|
||||||
|
// Set the base for the scroll to wait - this is added to the figure we calculate below
|
||||||
|
firstSearchRequest.scroll(timeValueSeconds(10));
|
||||||
|
|
||||||
// We'd like to get about 1 request a second
|
// We'd like to get about 1 request a second
|
||||||
testTask.rethrottle(1f);
|
testTask.rethrottle(1f);
|
||||||
// Make the last scroll look nearly instant
|
// Make the last scroll look nearly instant
|
||||||
|
@ -405,7 +466,7 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
|
||||||
// The last batch had 100 documents
|
// The last batch had 100 documents
|
||||||
action.startNextScroll(100);
|
action.startNextScroll(100);
|
||||||
|
|
||||||
// So the next request is going to have to wait an extra 100 seconds or so (base was 10, so 110ish)
|
// So the next request is going to have to wait an extra 100 seconds or so (base was 10 seconds, so 110ish)
|
||||||
assertThat(client.lastScroll.get().request.scroll().keepAlive().seconds(), either(equalTo(110L)).or(equalTo(109L)));
|
assertThat(client.lastScroll.get().request.scroll().keepAlive().seconds(), either(equalTo(110L)).or(equalTo(109L)));
|
||||||
|
|
||||||
// Now we can simulate a response and check the delay that we used for the task
|
// Now we can simulate a response and check the delay that we used for the task
|
||||||
|
@ -422,10 +483,14 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
|
||||||
assertEquals(capturedDelay.get(), testTask.getStatus().getThrottled());
|
assertEquals(capturedDelay.get(), testTask.getStatus().getThrottled());
|
||||||
}
|
}
|
||||||
|
|
||||||
private long retryTestCase(boolean failWithRejection) throws Exception {
|
/**
|
||||||
|
* Execute a bulk retry test case. The total number of failures is random and the number of retries attempted is set to
|
||||||
|
* testRequest.getMaxRetries and controled by the failWithRejection parameter.
|
||||||
|
*/
|
||||||
|
private void bulkRetryTestCase(boolean failWithRejection) throws Exception {
|
||||||
int totalFailures = randomIntBetween(1, testRequest.getMaxRetries());
|
int totalFailures = randomIntBetween(1, testRequest.getMaxRetries());
|
||||||
int size = randomIntBetween(1, 100);
|
int size = randomIntBetween(1, 100);
|
||||||
int retryAttempts = totalFailures - (failWithRejection ? 1 : 0);
|
testRequest.setMaxRetries(totalFailures - (failWithRejection ? 1 : 0));
|
||||||
|
|
||||||
client.bulksToReject = client.bulksAttempts.get() + totalFailures;
|
client.bulksToReject = client.bulksAttempts.get() + totalFailures;
|
||||||
/*
|
/*
|
||||||
|
@ -433,13 +498,7 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
|
||||||
* deal with it. We just wait for it to happen.
|
* deal with it. We just wait for it to happen.
|
||||||
*/
|
*/
|
||||||
CountDownLatch successLatch = new CountDownLatch(1);
|
CountDownLatch successLatch = new CountDownLatch(1);
|
||||||
DummyAbstractAsyncBulkByScrollAction action = new DummyAbstractAsyncBulkByScrollAction() {
|
DummyAbstractAsyncBulkByScrollAction action = new DummyActionWithoutBackoff() {
|
||||||
@Override
|
|
||||||
BackoffPolicy backoffPolicy() {
|
|
||||||
// Force a backoff time of 0 to prevent sleeping
|
|
||||||
return constantBackoff(timeValueMillis(0), retryAttempts);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
void startNextScroll(int lastBatchSize) {
|
void startNextScroll(int lastBatchSize) {
|
||||||
successLatch.countDown();
|
successLatch.countDown();
|
||||||
|
@ -459,14 +518,13 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
|
||||||
} else {
|
} else {
|
||||||
successLatch.await(10, TimeUnit.SECONDS);
|
successLatch.await(10, TimeUnit.SECONDS);
|
||||||
}
|
}
|
||||||
return retryAttempts;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The default retry time matches what we say it is in the javadoc for the request.
|
* The default retry time matches what we say it is in the javadoc for the request.
|
||||||
*/
|
*/
|
||||||
public void testDefaultRetryTimes() {
|
public void testDefaultRetryTimes() {
|
||||||
Iterator<TimeValue> policy = new DummyAbstractAsyncBulkByScrollAction().backoffPolicy().iterator();
|
Iterator<TimeValue> policy = new DummyAbstractAsyncBulkByScrollAction().buildBackoffPolicy().iterator();
|
||||||
long millis = 0;
|
long millis = 0;
|
||||||
while (policy.hasNext()) {
|
while (policy.hasNext()) {
|
||||||
millis += policy.next().millis();
|
millis += policy.next().millis();
|
||||||
|
@ -625,7 +683,22 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* An extension to {@linkplain DummyAbstractAsyncBulkByScrollAction} that uses a 0 delaying backoff policy.
|
||||||
|
*/
|
||||||
|
private class DummyActionWithoutBackoff extends DummyAbstractAsyncBulkByScrollAction {
|
||||||
|
@Override
|
||||||
|
BackoffPolicy buildBackoffPolicy() {
|
||||||
|
// Force a backoff time of 0 to prevent sleeping
|
||||||
|
return constantBackoff(timeValueMillis(0), testRequest.getMaxRetries());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private static class DummyAbstractBulkByScrollRequest extends AbstractBulkByScrollRequest<DummyAbstractBulkByScrollRequest> {
|
private static class DummyAbstractBulkByScrollRequest extends AbstractBulkByScrollRequest<DummyAbstractBulkByScrollRequest> {
|
||||||
|
public DummyAbstractBulkByScrollRequest(SearchRequest searchRequest) {
|
||||||
|
super(searchRequest);
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected DummyAbstractBulkByScrollRequest self() {
|
protected DummyAbstractBulkByScrollRequest self() {
|
||||||
return this;
|
return this;
|
||||||
|
@ -635,11 +708,23 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
|
||||||
private class MyMockClient extends FilterClient {
|
private class MyMockClient extends FilterClient {
|
||||||
private final List<String> scrollsCleared = new ArrayList<>();
|
private final List<String> scrollsCleared = new ArrayList<>();
|
||||||
private final AtomicInteger bulksAttempts = new AtomicInteger();
|
private final AtomicInteger bulksAttempts = new AtomicInteger();
|
||||||
|
private final AtomicInteger searchAttempts = new AtomicInteger();
|
||||||
|
private final AtomicInteger scrollAttempts = new AtomicInteger();
|
||||||
private final AtomicReference<Map<String, String>> lastHeaders = new AtomicReference<>();
|
private final AtomicReference<Map<String, String>> lastHeaders = new AtomicReference<>();
|
||||||
private final AtomicReference<RefreshRequest> lastRefreshRequest = new AtomicReference<>();
|
private final AtomicReference<RefreshRequest> lastRefreshRequest = new AtomicReference<>();
|
||||||
|
/**
|
||||||
|
* Last search attempt that wasn't rejected outright.
|
||||||
|
*/
|
||||||
|
private final AtomicReference<RequestAndListener<SearchRequest, SearchResponse>> lastSearch = new AtomicReference<>();
|
||||||
|
/**
|
||||||
|
* Last scroll attempt that wasn't rejected outright.
|
||||||
|
*/
|
||||||
private final AtomicReference<RequestAndListener<SearchScrollRequest, SearchResponse>> lastScroll = new AtomicReference<>();
|
private final AtomicReference<RequestAndListener<SearchScrollRequest, SearchResponse>> lastScroll = new AtomicReference<>();
|
||||||
|
|
||||||
|
|
||||||
private int bulksToReject = 0;
|
private int bulksToReject = 0;
|
||||||
|
private int searchesToReject = 0;
|
||||||
|
private int scrollsToReject = 0;
|
||||||
|
|
||||||
public MyMockClient(Client in) {
|
public MyMockClient(Client in) {
|
||||||
super(in);
|
super(in);
|
||||||
|
@ -661,7 +746,19 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
|
||||||
listener.onResponse(null);
|
listener.onResponse(null);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
if (request instanceof SearchRequest) {
|
||||||
|
if (searchAttempts.incrementAndGet() <= searchesToReject) {
|
||||||
|
listener.onFailure(wrappedRejectedException());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
lastSearch.set(new RequestAndListener<>((SearchRequest) request, (ActionListener<SearchResponse>) listener));
|
||||||
|
return;
|
||||||
|
}
|
||||||
if (request instanceof SearchScrollRequest) {
|
if (request instanceof SearchScrollRequest) {
|
||||||
|
if (scrollAttempts.incrementAndGet() <= scrollsToReject) {
|
||||||
|
listener.onFailure(wrappedRejectedException());
|
||||||
|
return;
|
||||||
|
}
|
||||||
lastScroll.set(new RequestAndListener<>((SearchScrollRequest) request, (ActionListener<SearchResponse>) listener));
|
lastScroll.set(new RequestAndListener<>((SearchScrollRequest) request, (ActionListener<SearchResponse>) listener));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -715,6 +812,25 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
super.doExecute(action, request, listener);
|
super.doExecute(action, request, listener);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private Throwable wrappedRejectedException() {
|
||||||
|
Exception e = new EsRejectedExecutionException();
|
||||||
|
int wraps = randomIntBetween(0, 4);
|
||||||
|
for (int i = 0; i < wraps; i++) {
|
||||||
|
switch (randomIntBetween(0, 2)) {
|
||||||
|
case 0:
|
||||||
|
e = new SearchPhaseExecutionException("test", "test failure", e, new ShardSearchFailure[0]);
|
||||||
|
continue;
|
||||||
|
case 1:
|
||||||
|
e = new ReduceSearchPhaseException("test", "test failure", e, new ShardSearchFailure[0]);
|
||||||
|
continue;
|
||||||
|
case 2:
|
||||||
|
e = new ElasticsearchException(e);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return e;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private static class RequestAndListener<Request extends ActionRequest<Request>, Response> {
|
private static class RequestAndListener<Request extends ActionRequest<Request>, Response> {
|
||||||
|
|
|
@ -127,25 +127,28 @@ public class BulkByScrollTaskTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testStatusHatesNegatives() {
|
public void testStatusHatesNegatives() {
|
||||||
expectThrows(IllegalArgumentException.class, status(-1, 0, 0, 0, 0, 0, 0, 0));
|
checkStatusNegatives(-1, 0, 0, 0, 0, 0, 0, 0, 0, "total");
|
||||||
expectThrows(IllegalArgumentException.class, status(0, -1, 0, 0, 0, 0, 0, 0));
|
checkStatusNegatives(0, -1, 0, 0, 0, 0, 0, 0, 0, "updated");
|
||||||
expectThrows(IllegalArgumentException.class, status(0, 0, -1, 0, 0, 0, 0, 0));
|
checkStatusNegatives(0, 0, -1, 0, 0, 0, 0, 0, 0, "created");
|
||||||
expectThrows(IllegalArgumentException.class, status(0, 0, 0, -1, 0, 0, 0, 0));
|
checkStatusNegatives(0, 0, 0, -1, 0, 0, 0, 0, 0, "deleted");
|
||||||
expectThrows(IllegalArgumentException.class, status(0, 0, 0, 0, -1, 0, 0, 0));
|
checkStatusNegatives(0, 0, 0, 0, -1, 0, 0, 0, 0, "batches");
|
||||||
expectThrows(IllegalArgumentException.class, status(0, 0, 0, 0, 0, -1, 0, 0));
|
checkStatusNegatives(0, 0, 0, 0, 0, -1, 0, 0, 0, "versionConflicts");
|
||||||
expectThrows(IllegalArgumentException.class, status(0, 0, 0, 0, 0, 0, -1, 0));
|
checkStatusNegatives(0, 0, 0, 0, 0, 0, -1, 0, 0, "noops");
|
||||||
expectThrows(IllegalArgumentException.class, status(0, 0, 0, 0, 0, 0, 0, -1));
|
checkStatusNegatives(0, 0, 0, 0, 0, 0, 0, -1, 0, "bulkRetries");
|
||||||
|
checkStatusNegatives(0, 0, 0, 0, 0, 0, 0, 0, -1, "searchRetries");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Build a task status with only some values. Used for testing negative values.
|
* Build a task status with only some values. Used for testing negative values.
|
||||||
*/
|
*/
|
||||||
private ThrowingRunnable status(long total, long updated, long created, long deleted, int batches, long versionConflicts,
|
private void checkStatusNegatives(long total, long updated, long created, long deleted, int batches, long versionConflicts,
|
||||||
long noops, long retries) {
|
long noops, long bulkRetries, long searchRetries, String fieldName) {
|
||||||
TimeValue throttle = parseTimeValue(randomPositiveTimeValue(), "test");
|
TimeValue throttle = parseTimeValue(randomPositiveTimeValue(), "test");
|
||||||
TimeValue throttledUntil = parseTimeValue(randomPositiveTimeValue(), "test");
|
TimeValue throttledUntil = parseTimeValue(randomPositiveTimeValue(), "test");
|
||||||
|
|
||||||
return () -> new BulkByScrollTask.Status(-1, 0, 0, 0, 0, 0, 0, 0, throttle, 0f, null, throttledUntil);
|
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new BulkByScrollTask.Status(total, updated, created,
|
||||||
|
deleted, batches, versionConflicts, noops, bulkRetries, searchRetries, throttle, 0f, null, throttledUntil));
|
||||||
|
assertEquals(e.getMessage(), fieldName + " must be greater than 0 but was [-1]");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -118,7 +118,7 @@ public class BulkIndexByScrollResponseMatcher extends TypeSafeMatcher<BulkIndexB
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void describeTo(Description description) {
|
public void describeTo(Description description) {
|
||||||
description.appendText("indexed matches ").appendDescriptionOf(updatedMatcher);
|
description.appendText("updated matches ").appendDescriptionOf(updatedMatcher);
|
||||||
description.appendText(" and created matches ").appendDescriptionOf(createdMatcher);
|
description.appendText(" and created matches ").appendDescriptionOf(createdMatcher);
|
||||||
if (batchesMatcher != null) {
|
if (batchesMatcher != null) {
|
||||||
description.appendText(" and batches matches ").appendDescriptionOf(batchesMatcher);
|
description.appendText(" and batches matches ").appendDescriptionOf(batchesMatcher);
|
||||||
|
|
|
@ -61,16 +61,15 @@ public class CancelTestUtils {
|
||||||
private static final CyclicBarrier barrier = new CyclicBarrier(2);
|
private static final CyclicBarrier barrier = new CyclicBarrier(2);
|
||||||
|
|
||||||
public static <Request extends AbstractBulkIndexByScrollRequest<Request>,
|
public static <Request extends AbstractBulkIndexByScrollRequest<Request>,
|
||||||
Response extends BulkIndexByScrollResponse,
|
Builder extends AbstractBulkIndexByScrollRequestBuilder<Request, Builder>>
|
||||||
Builder extends AbstractBulkIndexByScrollRequestBuilder<Request, Response, Builder>>
|
BulkIndexByScrollResponse testCancel(ESIntegTestCase test, Builder request, String actionToCancel) throws Exception {
|
||||||
Response testCancel(ESIntegTestCase test, Builder request, String actionToCancel) throws Exception {
|
|
||||||
|
|
||||||
test.indexRandom(true, client().prepareIndex("source", "test", "1").setSource("foo", "a"),
|
test.indexRandom(true, client().prepareIndex("source", "test", "1").setSource("foo", "a"),
|
||||||
client().prepareIndex("source", "test", "2").setSource("foo", "a"));
|
client().prepareIndex("source", "test", "2").setSource("foo", "a"));
|
||||||
|
|
||||||
request.source("source").script(new Script("sticky", ScriptType.INLINE, "native", emptyMap()));
|
request.source("source").script(new Script("sticky", ScriptType.INLINE, "native", emptyMap()));
|
||||||
request.source().setSize(1);
|
request.source().setSize(1);
|
||||||
ListenableActionFuture<Response> response = request.execute();
|
ListenableActionFuture<BulkIndexByScrollResponse> response = request.execute();
|
||||||
|
|
||||||
// Wait until the script is on the first document.
|
// Wait until the script is on the first document.
|
||||||
barrier.await(30, TimeUnit.SECONDS);
|
barrier.await(30, TimeUnit.SECONDS);
|
||||||
|
|
|
@ -43,7 +43,7 @@ public class RethrottleTests extends ReindexTestCase {
|
||||||
testCase(updateByQuery().source("test"), UpdateByQueryAction.NAME);
|
testCase(updateByQuery().source("test"), UpdateByQueryAction.NAME);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void testCase(AbstractBulkIndexByScrollRequestBuilder<?, ? extends BulkIndexByScrollResponse, ?> request, String actionName)
|
private void testCase(AbstractBulkIndexByScrollRequestBuilder<?, ?> request, String actionName)
|
||||||
throws Exception {
|
throws Exception {
|
||||||
// Use a single shard so the reindex has to happen in multiple batches
|
// Use a single shard so the reindex has to happen in multiple batches
|
||||||
client().admin().indices().prepareCreate("test").setSettings("index.number_of_shards", 1).get();
|
client().admin().indices().prepareCreate("test").setSettings("index.number_of_shards", 1).get();
|
||||||
|
|
|
@ -0,0 +1,155 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.index.reindex;
|
||||||
|
|
||||||
|
import org.elasticsearch.action.ListenableActionFuture;
|
||||||
|
import org.elasticsearch.action.bulk.BackoffPolicy;
|
||||||
|
import org.elasticsearch.action.bulk.BulkRequestBuilder;
|
||||||
|
import org.elasticsearch.action.bulk.BulkResponse;
|
||||||
|
import org.elasticsearch.action.bulk.Retry;
|
||||||
|
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||||
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
|
||||||
|
import org.elasticsearch.plugins.Plugin;
|
||||||
|
import org.elasticsearch.search.MockSearchService;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Collection;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.function.IntFunction;
|
||||||
|
|
||||||
|
import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
|
||||||
|
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||||
|
import static org.hamcrest.Matchers.greaterThan;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Integration test for retry behavior. Useful because retrying relies on the way that the rest of Elasticsearch throws exceptions and unit
|
||||||
|
* tests won't verify that.
|
||||||
|
*/
|
||||||
|
public class RetryTests extends ReindexTestCase {
|
||||||
|
/**
|
||||||
|
* The number of concurrent requests to test.
|
||||||
|
*/
|
||||||
|
private static final int CONCURRENT = 12;
|
||||||
|
/**
|
||||||
|
* Enough docs that the requests will likely step on each other.
|
||||||
|
*/
|
||||||
|
private static final int DOC_COUNT = 200;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Lower the queue sizes to be small enough that both bulk and searches will time out and have to be retried.
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
protected Settings nodeSettings(int nodeOrdinal) {
|
||||||
|
Settings.Builder settings = Settings.builder().put(super.nodeSettings(nodeOrdinal));
|
||||||
|
settings.put("threadpool.bulk.queue_size", 1);
|
||||||
|
settings.put("threadpool.bulk.size", 1);
|
||||||
|
settings.put("threadpool.search.queue_size", 1);
|
||||||
|
settings.put("threadpool.search.size", 1);
|
||||||
|
return settings.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Disable search context leak detection because we expect leaks when there is an {@link EsRejectedExecutionException} queueing the
|
||||||
|
* reduce phase.
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
protected Collection<Class<? extends Plugin>> getMockPlugins() {
|
||||||
|
List<Class<? extends Plugin>> mockPlugins = new ArrayList<>();
|
||||||
|
for (Class<? extends Plugin> plugin: super.getMockPlugins()) {
|
||||||
|
if (plugin.equals(MockSearchService.TestPlugin.class)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
mockPlugins.add(plugin);
|
||||||
|
}
|
||||||
|
return mockPlugins;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testReindex() throws Exception {
|
||||||
|
setupSourceIndex("source");
|
||||||
|
testCase(true, i -> reindex().source("source").destination("dest" + i));
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testUpdateByQuery() throws Exception {
|
||||||
|
for (int i = 0; i < CONCURRENT; i++) {
|
||||||
|
setupSourceIndex("source" + i);
|
||||||
|
}
|
||||||
|
testCase(false, i -> updateByQuery().source("source" + i));
|
||||||
|
}
|
||||||
|
|
||||||
|
private void testCase(boolean expectCreated, IntFunction<AbstractBulkIndexByScrollRequestBuilder<?, ?>> requestBuilder)
|
||||||
|
throws Exception {
|
||||||
|
List<ListenableActionFuture<BulkIndexByScrollResponse>> futures = new ArrayList<>(CONCURRENT);
|
||||||
|
for (int i = 0; i < CONCURRENT; i++) {
|
||||||
|
AbstractBulkIndexByScrollRequestBuilder<?, ?> request = requestBuilder.apply(i);
|
||||||
|
// Make sure we use more than one batch so we get the full reindex behavior
|
||||||
|
request.source().setSize(DOC_COUNT / randomIntBetween(2, 10));
|
||||||
|
// Use a low, random initial wait so we are unlikely collide with others retrying.
|
||||||
|
request.setRetryBackoffInitialTime(timeValueMillis(randomIntBetween(10, 300)));
|
||||||
|
futures.add(request.execute());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finish all the requests
|
||||||
|
List<BulkIndexByScrollResponse> responses = new ArrayList<>(CONCURRENT);
|
||||||
|
for (ListenableActionFuture<BulkIndexByScrollResponse> future : futures) {
|
||||||
|
responses.add(future.get());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now check them
|
||||||
|
long bulkRetries = 0;
|
||||||
|
long searchRetries = 0;
|
||||||
|
BulkIndexByScrollResponseMatcher matcher = matcher();
|
||||||
|
if (expectCreated) {
|
||||||
|
matcher.created(DOC_COUNT);
|
||||||
|
} else {
|
||||||
|
matcher.updated(DOC_COUNT);
|
||||||
|
}
|
||||||
|
for (BulkIndexByScrollResponse response : responses) {
|
||||||
|
assertThat(response, matcher);
|
||||||
|
bulkRetries += response.getBulkRetries();
|
||||||
|
searchRetries += response.getSearchRetries();
|
||||||
|
}
|
||||||
|
|
||||||
|
// We expect at least one retry or this test isn't very useful
|
||||||
|
assertThat(bulkRetries, greaterThan(0L));
|
||||||
|
assertThat(searchRetries, greaterThan(0L));
|
||||||
|
}
|
||||||
|
|
||||||
|
private void setupSourceIndex(String name) {
|
||||||
|
try {
|
||||||
|
// Build the test index with a single shard so we can be sure that a search request *can* complete with the one thread
|
||||||
|
assertAcked(client().admin().indices().prepareCreate(name).setSettings(
|
||||||
|
"index.number_of_shards", 1,
|
||||||
|
"index.number_of_replicas", 0).get());
|
||||||
|
waitForRelocation(ClusterHealthStatus.GREEN);
|
||||||
|
// Build the test data. Don't use indexRandom because that won't work consistently with such small thread pools.
|
||||||
|
BulkRequestBuilder bulk = client().prepareBulk();
|
||||||
|
for (int i = 0; i < DOC_COUNT; i++) {
|
||||||
|
bulk.add(client().prepareIndex(name, "test").setSource("foo", "bar " + i));
|
||||||
|
}
|
||||||
|
Retry retry = Retry.on(EsRejectedExecutionException.class).policy(BackoffPolicy.exponentialBackoff());
|
||||||
|
BulkResponse response = retry.withSyncBackoff(client(), bulk.request());
|
||||||
|
assertFalse(response.buildFailureMessage(), response.hasFailures());
|
||||||
|
refresh(name);
|
||||||
|
} catch (Exception e) {
|
||||||
|
throw new RuntimeException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -138,7 +138,7 @@ public class RoundTripTests extends ESTestCase {
|
||||||
|
|
||||||
private BulkByScrollTask.Status randomStatus() {
|
private BulkByScrollTask.Status randomStatus() {
|
||||||
return new BulkByScrollTask.Status(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
return new BulkByScrollTask.Status(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
||||||
randomInt(Integer.MAX_VALUE), randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
randomInt(Integer.MAX_VALUE), randomPositiveLong(), randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
||||||
parseTimeValue(randomPositiveTimeValue(), "test"), abs(random().nextFloat()),
|
parseTimeValue(randomPositiveTimeValue(), "test"), abs(random().nextFloat()),
|
||||||
random().nextBoolean() ? null : randomSimpleString(random()), parseTimeValue(randomPositiveTimeValue(), "test"));
|
random().nextBoolean() ? null : randomSimpleString(random()), parseTimeValue(randomPositiveTimeValue(), "test"));
|
||||||
}
|
}
|
||||||
|
@ -210,7 +210,8 @@ public class RoundTripTests extends ESTestCase {
|
||||||
assertEquals(expected.getBatches(), actual.getBatches());
|
assertEquals(expected.getBatches(), actual.getBatches());
|
||||||
assertEquals(expected.getVersionConflicts(), actual.getVersionConflicts());
|
assertEquals(expected.getVersionConflicts(), actual.getVersionConflicts());
|
||||||
assertEquals(expected.getNoops(), actual.getNoops());
|
assertEquals(expected.getNoops(), actual.getNoops());
|
||||||
assertEquals(expected.getRetries(), actual.getRetries());
|
assertEquals(expected.getBulkRetries(), actual.getBulkRetries());
|
||||||
|
assertEquals(expected.getSearchRetries(), actual.getSearchRetries());
|
||||||
assertEquals(expected.getThrottled(), actual.getThrottled());
|
assertEquals(expected.getThrottled(), actual.getThrottled());
|
||||||
assertEquals(expected.getRequestsPerSecond(), actual.getRequestsPerSecond(), 0f);
|
assertEquals(expected.getRequestsPerSecond(), actual.getRequestsPerSecond(), 0f);
|
||||||
assertEquals(expected.getReasonCancelled(), actual.getReasonCancelled());
|
assertEquals(expected.getReasonCancelled(), actual.getReasonCancelled());
|
||||||
|
|
|
@ -19,12 +19,12 @@
|
||||||
|
|
||||||
package org.elasticsearch.index.reindex;
|
package org.elasticsearch.index.reindex;
|
||||||
|
|
||||||
import java.util.List;
|
|
||||||
|
|
||||||
import org.elasticsearch.action.IndicesRequest;
|
import org.elasticsearch.action.IndicesRequest;
|
||||||
import org.elasticsearch.action.search.SearchRequest;
|
import org.elasticsearch.action.search.SearchRequest;
|
||||||
import org.elasticsearch.test.ESTestCase;
|
import org.elasticsearch.test.ESTestCase;
|
||||||
|
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
import static org.apache.lucene.util.TestUtil.randomSimpleString;
|
import static org.apache.lucene.util.TestUtil.randomSimpleString;
|
||||||
import static org.hamcrest.Matchers.arrayWithSize;
|
import static org.hamcrest.Matchers.arrayWithSize;
|
||||||
import static org.hamcrest.Matchers.hasSize;
|
import static org.hamcrest.Matchers.hasSize;
|
||||||
|
|
|
@ -0,0 +1,35 @@
|
||||||
|
---
|
||||||
|
"Test sort Processor":
|
||||||
|
- do:
|
||||||
|
ingest.put_pipeline:
|
||||||
|
id: "my_pipeline"
|
||||||
|
body: >
|
||||||
|
{
|
||||||
|
"description": "_description",
|
||||||
|
"processors": [
|
||||||
|
{
|
||||||
|
"sort" : {
|
||||||
|
"field" : "values"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
- match: { acknowledged: true }
|
||||||
|
|
||||||
|
- do:
|
||||||
|
index:
|
||||||
|
index: test
|
||||||
|
type: test
|
||||||
|
id: 1
|
||||||
|
pipeline: "my_pipeline"
|
||||||
|
body: >
|
||||||
|
{
|
||||||
|
"values": ["foo", "bar", "baz"]
|
||||||
|
}
|
||||||
|
|
||||||
|
- do:
|
||||||
|
get:
|
||||||
|
index: test
|
||||||
|
type: test
|
||||||
|
id: 1
|
||||||
|
- match: { _source.values: ["bar", "baz", "foo"] }
|
Loading…
Reference in New Issue