Merge branch 'master' into feature/rank-eval
This commit is contained in:
commit
e8e65c3a1e
|
@ -347,7 +347,6 @@ These are the linux flavors the Vagrantfile currently supports:
|
|||
|
||||
* ubuntu-1204 aka precise
|
||||
* ubuntu-1404 aka trusty
|
||||
* ubuntu-1504 aka vivid
|
||||
* ubuntu-1604 aka xenial
|
||||
* debian-8 aka jessie, the current debian stable distribution
|
||||
* centos-6
|
||||
|
|
|
@ -30,13 +30,6 @@ Vagrant.configure(2) do |config|
|
|||
config.vm.box = "elastic/ubuntu-14.04-x86_64"
|
||||
ubuntu_common config
|
||||
end
|
||||
config.vm.define "ubuntu-1504" do |config|
|
||||
config.vm.box = "elastic/ubuntu-15.04-x86_64"
|
||||
ubuntu_common config, extra: <<-SHELL
|
||||
# Install Jayatana so we can work around it being present.
|
||||
[ -f /usr/share/java/jayatanaag.jar ] || install jayatana
|
||||
SHELL
|
||||
end
|
||||
config.vm.define "ubuntu-1604" do |config|
|
||||
config.vm.box = "elastic/ubuntu-16.04-x86_64"
|
||||
ubuntu_common config, extra: <<-SHELL
|
||||
|
|
|
@ -16,6 +16,7 @@ public class RunTask extends DefaultTask {
|
|||
clusterConfig.httpPort = 9200
|
||||
clusterConfig.transportPort = 9300
|
||||
clusterConfig.daemonize = false
|
||||
clusterConfig.distribution = 'zip'
|
||||
project.afterEvaluate {
|
||||
ClusterFormationTasks.setup(project, this, clusterConfig)
|
||||
}
|
||||
|
|
|
@ -52,7 +52,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.discovery.local.LocalDiscovery;
|
||||
import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -72,8 +71,7 @@ import java.util.Set;
|
|||
* single thread and controlled by the {@link ClusterService}. After every update the
|
||||
* {@link Discovery#publish} method publishes new version of the cluster state to all other nodes in the
|
||||
* cluster. The actual publishing mechanism is delegated to the {@link Discovery#publish} method and depends on
|
||||
* the type of discovery. For example, for local discovery it is implemented by the {@link LocalDiscovery#publish}
|
||||
* method. In the Zen Discovery it is handled in the {@link PublishClusterStateAction#publish} method. The
|
||||
* the type of discovery. In the Zen Discovery it is handled in the {@link PublishClusterStateAction#publish} method. The
|
||||
* publishing mechanism can be overridden by other discovery.
|
||||
* <p>
|
||||
* The cluster state implements the {@link Diffable} interface in order to support publishing of cluster state
|
||||
|
|
|
@ -579,6 +579,7 @@ public class IndexNameExpressionResolver extends AbstractComponent {
|
|||
|
||||
private Set<String> innerResolve(Context context, List<String> expressions, IndicesOptions options, MetaData metaData) {
|
||||
Set<String> result = null;
|
||||
boolean wildcardSeen = false;
|
||||
for (int i = 0; i < expressions.size(); i++) {
|
||||
String expression = expressions.get(i);
|
||||
if (aliasOrIndexExists(metaData, expression)) {
|
||||
|
@ -598,13 +599,14 @@ public class IndexNameExpressionResolver extends AbstractComponent {
|
|||
}
|
||||
expression = expression.substring(1);
|
||||
} else if (expression.charAt(0) == '-') {
|
||||
// if its the first, fill it with all the indices...
|
||||
if (i == 0) {
|
||||
List<String> concreteIndices = resolveEmptyOrTrivialWildcard(options, metaData, false);
|
||||
result = new HashSet<>(concreteIndices);
|
||||
// if there is a negation without a wildcard being previously seen, add it verbatim,
|
||||
// otherwise return the expression
|
||||
if (wildcardSeen) {
|
||||
add = false;
|
||||
expression = expression.substring(1);
|
||||
} else {
|
||||
add = true;
|
||||
}
|
||||
add = false;
|
||||
expression = expression.substring(1);
|
||||
}
|
||||
if (result == null) {
|
||||
// add all the previous ones...
|
||||
|
@ -634,6 +636,10 @@ public class IndexNameExpressionResolver extends AbstractComponent {
|
|||
if (!noIndicesAllowedOrMatches(options, matches)) {
|
||||
throw infe(expression);
|
||||
}
|
||||
|
||||
if (Regex.isSimpleMatchPattern(expression)) {
|
||||
wildcardSeen = true;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -54,7 +54,8 @@ public class MaxRetryAllocationDecider extends AllocationDecider {
|
|||
|
||||
@Override
|
||||
public Decision canAllocate(ShardRouting shardRouting, RoutingAllocation allocation) {
|
||||
UnassignedInfo unassignedInfo = shardRouting.unassignedInfo();
|
||||
final UnassignedInfo unassignedInfo = shardRouting.unassignedInfo();
|
||||
final Decision decision;
|
||||
if (unassignedInfo != null && unassignedInfo.getNumFailedAllocations() > 0) {
|
||||
final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index());
|
||||
final int maxRetry = SETTING_ALLOCATION_MAX_RETRY.get(indexMetaData.getSettings());
|
||||
|
@ -62,16 +63,21 @@ public class MaxRetryAllocationDecider extends AllocationDecider {
|
|||
// if we are called via the _reroute API we ignore the failure counter and try to allocate
|
||||
// this improves the usability since people don't need to raise the limits to issue retries since a simple _reroute call is
|
||||
// enough to manually retry.
|
||||
return allocation.decision(Decision.YES, NAME, "shard has already failed allocating ["
|
||||
decision = allocation.decision(Decision.YES, NAME, "shard has already failed allocating ["
|
||||
+ unassignedInfo.getNumFailedAllocations() + "] times vs. [" + maxRetry + "] retries allowed "
|
||||
+ unassignedInfo.toString() + " - retrying once on manual allocation");
|
||||
} else if (unassignedInfo.getNumFailedAllocations() >= maxRetry) {
|
||||
return allocation.decision(Decision.NO, NAME, "shard has already failed allocating ["
|
||||
decision = allocation.decision(Decision.NO, NAME, "shard has already failed allocating ["
|
||||
+ unassignedInfo.getNumFailedAllocations() + "] times vs. [" + maxRetry + "] retries allowed "
|
||||
+ unassignedInfo.toString() + " - manually call [/_cluster/reroute?retry_failed=true] to retry");
|
||||
} else {
|
||||
decision = allocation.decision(Decision.YES, NAME, "shard has already failed allocating ["
|
||||
+ unassignedInfo.getNumFailedAllocations() + "] times but [" + maxRetry + "] retries are allowed");
|
||||
}
|
||||
} else {
|
||||
decision = allocation.decision(Decision.YES, NAME, "shard has no previous failures");
|
||||
}
|
||||
return allocation.decision(Decision.YES, NAME, "shard has no previous failures");
|
||||
return decision;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -200,6 +200,10 @@ public abstract class ExtensionPoint {
|
|||
allocationMultibinder.addBinding().to(clazz);
|
||||
}
|
||||
}
|
||||
|
||||
public boolean isEmpty() {
|
||||
return extensions.isEmpty();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -25,7 +25,6 @@ import org.elasticsearch.common.settings.Setting;
|
|||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.ExtensionPoint;
|
||||
import org.elasticsearch.discovery.local.LocalDiscovery;
|
||||
import org.elasticsearch.discovery.zen.ElectMasterService;
|
||||
import org.elasticsearch.discovery.zen.ZenDiscovery;
|
||||
import org.elasticsearch.discovery.zen.ping.ZenPing;
|
||||
|
@ -59,11 +58,9 @@ public class DiscoveryModule extends AbstractModule {
|
|||
|
||||
public DiscoveryModule(Settings settings) {
|
||||
this.settings = settings;
|
||||
addDiscoveryType("local", LocalDiscovery.class);
|
||||
addDiscoveryType("none", NoneDiscovery.class);
|
||||
addDiscoveryType("zen", ZenDiscovery.class);
|
||||
addElectMasterService("zen", ElectMasterService.class);
|
||||
// always add the unicast hosts, or things get angry!
|
||||
addZenPing(UnicastZenPing.class);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -113,7 +110,7 @@ public class DiscoveryModule extends AbstractModule {
|
|||
throw new IllegalArgumentException("Unknown Discovery type [" + discoveryType + "]");
|
||||
}
|
||||
|
||||
if (discoveryType.equals("local") == false) {
|
||||
if (discoveryType.equals("none") == false) {
|
||||
String masterServiceTypeKey = ZEN_MASTER_SERVICE_TYPE_SETTING.get(settings);
|
||||
final Class<? extends ElectMasterService> masterService = masterServiceType.get(masterServiceTypeKey);
|
||||
if (masterService == null) {
|
||||
|
@ -130,6 +127,9 @@ public class DiscoveryModule extends AbstractModule {
|
|||
unicastHostProviders.getOrDefault(discoveryType, Collections.emptyList())) {
|
||||
unicastHostsProviderMultibinder.addBinding().to(unicastHostProvider);
|
||||
}
|
||||
if (zenPings.isEmpty()) {
|
||||
zenPings.registerExtension(UnicastZenPing.class);
|
||||
}
|
||||
zenPings.bind(binder());
|
||||
}
|
||||
bind(Discovery.class).to(discoveryClass).asEagerSingleton();
|
||||
|
|
|
@ -0,0 +1,102 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.discovery;
|
||||
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.discovery.zen.ElectMasterService;
|
||||
|
||||
/**
|
||||
* A {@link Discovery} implementation that is used by {@link org.elasticsearch.tribe.TribeService}. This implementation
|
||||
* doesn't support any clustering features. Most notably {@link #startInitialJoin()} does nothing and
|
||||
* {@link #publish(ClusterChangedEvent, AckListener)} is not supported.
|
||||
*/
|
||||
public class NoneDiscovery extends AbstractLifecycleComponent implements Discovery {
|
||||
|
||||
private final ClusterService clusterService;
|
||||
private final DiscoverySettings discoverySettings;
|
||||
|
||||
@Inject
|
||||
public NoneDiscovery(Settings settings, ClusterService clusterService, ClusterSettings clusterSettings) {
|
||||
super(settings);
|
||||
this.clusterService = clusterService;
|
||||
this.discoverySettings = new DiscoverySettings(settings, clusterSettings);
|
||||
}
|
||||
|
||||
@Override
|
||||
public DiscoveryNode localNode() {
|
||||
return clusterService.localNode();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String nodeDescription() {
|
||||
return clusterService.getClusterName().value() + "/" + clusterService.localNode().getId();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setAllocationService(AllocationService allocationService) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void publish(ClusterChangedEvent clusterChangedEvent, AckListener ackListener) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public DiscoveryStats stats() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DiscoverySettings getDiscoverySettings() {
|
||||
return discoverySettings;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void startInitialJoin() {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getMinimumMasterNodes() {
|
||||
return ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.get(settings);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStart() {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStop() {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doClose() {
|
||||
|
||||
}
|
||||
}
|
|
@ -1,422 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.discovery.local;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.Diff;
|
||||
import org.elasticsearch.cluster.IncompatibleClusterStateVersionException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.discovery.AckClusterStatePublishResponseHandler;
|
||||
import org.elasticsearch.discovery.BlockingClusterStatePublishResponseHandler;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.discovery.DiscoveryStats;
|
||||
import org.elasticsearch.discovery.zen.publish.PendingClusterStateStats;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.Optional;
|
||||
import java.util.Queue;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
|
||||
import static org.elasticsearch.cluster.ClusterState.Builder;
|
||||
|
||||
public class LocalDiscovery extends AbstractLifecycleComponent implements Discovery {
|
||||
|
||||
private static final LocalDiscovery[] NO_MEMBERS = new LocalDiscovery[0];
|
||||
|
||||
private final ClusterService clusterService;
|
||||
private AllocationService allocationService;
|
||||
private final ClusterName clusterName;
|
||||
|
||||
private final DiscoverySettings discoverySettings;
|
||||
|
||||
private volatile boolean master = false;
|
||||
|
||||
private static final ConcurrentMap<ClusterName, ClusterGroup> clusterGroups = ConcurrentCollections.newConcurrentMap();
|
||||
|
||||
private volatile ClusterState lastProcessedClusterState;
|
||||
|
||||
@Inject
|
||||
public LocalDiscovery(Settings settings, ClusterService clusterService, ClusterSettings clusterSettings) {
|
||||
super(settings);
|
||||
this.clusterName = clusterService.getClusterName();
|
||||
this.clusterService = clusterService;
|
||||
this.discoverySettings = new DiscoverySettings(settings, clusterSettings);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setAllocationService(AllocationService allocationService) {
|
||||
this.allocationService = allocationService;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStart() {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void startInitialJoin() {
|
||||
synchronized (clusterGroups) {
|
||||
ClusterGroup clusterGroup = clusterGroups.get(clusterName);
|
||||
if (clusterGroup == null) {
|
||||
clusterGroup = new ClusterGroup();
|
||||
clusterGroups.put(clusterName, clusterGroup);
|
||||
}
|
||||
logger.debug("Connected to cluster [{}]", clusterName);
|
||||
|
||||
Optional<LocalDiscovery> current = clusterGroup.members().stream().filter(other -> (
|
||||
other.localNode().equals(this.localNode()) || other.localNode().getId().equals(this.localNode().getId())
|
||||
)).findFirst();
|
||||
if (current.isPresent()) {
|
||||
throw new IllegalStateException("current cluster group already contains a node with the same id. current "
|
||||
+ current.get().localNode() + ", this node " + localNode());
|
||||
}
|
||||
|
||||
clusterGroup.members().add(this);
|
||||
|
||||
LocalDiscovery firstMaster = null;
|
||||
for (LocalDiscovery localDiscovery : clusterGroup.members()) {
|
||||
if (localDiscovery.localNode().isMasterNode()) {
|
||||
firstMaster = localDiscovery;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (firstMaster != null && firstMaster.equals(this)) {
|
||||
// we are the first master (and the master)
|
||||
master = true;
|
||||
final LocalDiscovery master = firstMaster;
|
||||
clusterService.submitStateUpdateTask("local-disco-initial_connect(master)", new ClusterStateUpdateTask() {
|
||||
|
||||
@Override
|
||||
public boolean runOnlyOnMaster() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder();
|
||||
for (LocalDiscovery discovery : clusterGroups.get(clusterName).members()) {
|
||||
nodesBuilder.add(discovery.localNode());
|
||||
}
|
||||
nodesBuilder.localNodeId(master.localNode().getId()).masterNodeId(master.localNode().getId());
|
||||
// remove the NO_MASTER block in this case
|
||||
ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()).removeGlobalBlock(discoverySettings.getNoMasterBlock());
|
||||
return ClusterState.builder(currentState).nodes(nodesBuilder).blocks(blocks).build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||
}
|
||||
});
|
||||
} else if (firstMaster != null) {
|
||||
// tell the master to send the fact that we are here
|
||||
final LocalDiscovery master = firstMaster;
|
||||
firstMaster.clusterService.submitStateUpdateTask("local-disco-receive(from node[" + localNode() + "])", new ClusterStateUpdateTask() {
|
||||
@Override
|
||||
public boolean runOnlyOnMaster() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder();
|
||||
for (LocalDiscovery discovery : clusterGroups.get(clusterName).members()) {
|
||||
nodesBuilder.add(discovery.localNode());
|
||||
}
|
||||
nodesBuilder.localNodeId(master.localNode().getId()).masterNodeId(master.localNode().getId());
|
||||
currentState = ClusterState.builder(currentState).nodes(nodesBuilder).build();
|
||||
return master.allocationService.reroute(currentState, "node_add");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||
}
|
||||
|
||||
});
|
||||
}
|
||||
} // else, no master node, the next node that will start will fill things in...
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStop() {
|
||||
synchronized (clusterGroups) {
|
||||
ClusterGroup clusterGroup = clusterGroups.get(clusterName);
|
||||
if (clusterGroup == null) {
|
||||
logger.warn("Illegal state, should not have an empty cluster group when stopping, I should be there at teh very least...");
|
||||
return;
|
||||
}
|
||||
clusterGroup.members().remove(this);
|
||||
if (clusterGroup.members().isEmpty()) {
|
||||
// no more members, remove and return
|
||||
clusterGroups.remove(clusterName);
|
||||
return;
|
||||
}
|
||||
|
||||
LocalDiscovery firstMaster = null;
|
||||
for (LocalDiscovery localDiscovery : clusterGroup.members()) {
|
||||
if (localDiscovery.localNode().isMasterNode()) {
|
||||
firstMaster = localDiscovery;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (firstMaster != null) {
|
||||
// if the removed node is the master, make the next one as the master
|
||||
if (master) {
|
||||
firstMaster.master = true;
|
||||
}
|
||||
|
||||
final Set<String> newMembers = new HashSet<>();
|
||||
for (LocalDiscovery discovery : clusterGroup.members()) {
|
||||
newMembers.add(discovery.localNode().getId());
|
||||
}
|
||||
|
||||
final LocalDiscovery master = firstMaster;
|
||||
master.clusterService.submitStateUpdateTask("local-disco-update", new ClusterStateUpdateTask() {
|
||||
@Override
|
||||
public boolean runOnlyOnMaster() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
DiscoveryNodes newNodes = currentState.nodes().removeDeadMembers(newMembers, master.localNode().getId());
|
||||
DiscoveryNodes.Delta delta = newNodes.delta(currentState.nodes());
|
||||
if (delta.added()) {
|
||||
logger.warn("No new nodes should be created when a new discovery view is accepted");
|
||||
}
|
||||
// reroute here, so we eagerly remove dead nodes from the routing
|
||||
ClusterState updatedState = ClusterState.builder(currentState).nodes(newNodes).build();
|
||||
return master.allocationService.deassociateDeadNodes(updatedState, true, "node stopped");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doClose() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public DiscoveryNode localNode() {
|
||||
return clusterService.localNode();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String nodeDescription() {
|
||||
return clusterName.value() + "/" + localNode().getId();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void publish(ClusterChangedEvent clusterChangedEvent, final Discovery.AckListener ackListener) {
|
||||
if (!master) {
|
||||
throw new IllegalStateException("Shouldn't publish state when not master");
|
||||
}
|
||||
LocalDiscovery[] members = members();
|
||||
if (members.length > 0) {
|
||||
Set<DiscoveryNode> nodesToPublishTo = new HashSet<>(members.length);
|
||||
for (LocalDiscovery localDiscovery : members) {
|
||||
if (localDiscovery.master) {
|
||||
continue;
|
||||
}
|
||||
nodesToPublishTo.add(localDiscovery.localNode());
|
||||
}
|
||||
publish(members, clusterChangedEvent, new AckClusterStatePublishResponseHandler(nodesToPublishTo, ackListener));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public DiscoveryStats stats() {
|
||||
return new DiscoveryStats((PendingClusterStateStats)null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public DiscoverySettings getDiscoverySettings() {
|
||||
return discoverySettings;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getMinimumMasterNodes() {
|
||||
return -1;
|
||||
}
|
||||
|
||||
private LocalDiscovery[] members() {
|
||||
ClusterGroup clusterGroup = clusterGroups.get(clusterName);
|
||||
if (clusterGroup == null) {
|
||||
return NO_MEMBERS;
|
||||
}
|
||||
Queue<LocalDiscovery> members = clusterGroup.members();
|
||||
return members.toArray(new LocalDiscovery[members.size()]);
|
||||
}
|
||||
|
||||
private void publish(LocalDiscovery[] members, ClusterChangedEvent clusterChangedEvent, final BlockingClusterStatePublishResponseHandler publishResponseHandler) {
|
||||
|
||||
try {
|
||||
// we do the marshaling intentionally, to check it works well...
|
||||
byte[] clusterStateBytes = null;
|
||||
byte[] clusterStateDiffBytes = null;
|
||||
|
||||
ClusterState clusterState = clusterChangedEvent.state();
|
||||
for (final LocalDiscovery discovery : members) {
|
||||
if (discovery.master) {
|
||||
continue;
|
||||
}
|
||||
ClusterState newNodeSpecificClusterState = null;
|
||||
synchronized (this) {
|
||||
// we do the marshaling intentionally, to check it works well...
|
||||
// check if we published cluster state at least once and node was in the cluster when we published cluster state the last time
|
||||
if (discovery.lastProcessedClusterState != null && clusterChangedEvent.previousState().nodes().nodeExists(discovery.localNode())) {
|
||||
// both conditions are true - which means we can try sending cluster state as diffs
|
||||
if (clusterStateDiffBytes == null) {
|
||||
Diff diff = clusterState.diff(clusterChangedEvent.previousState());
|
||||
BytesStreamOutput os = new BytesStreamOutput();
|
||||
diff.writeTo(os);
|
||||
clusterStateDiffBytes = BytesReference.toBytes(os.bytes());
|
||||
}
|
||||
try {
|
||||
newNodeSpecificClusterState = discovery.lastProcessedClusterState.readDiffFrom(StreamInput.wrap(clusterStateDiffBytes)).apply(discovery.lastProcessedClusterState);
|
||||
logger.trace("sending diff cluster state version [{}] with size {} to [{}]", clusterState.version(), clusterStateDiffBytes.length, discovery.localNode().getName());
|
||||
} catch (IncompatibleClusterStateVersionException ex) {
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("incompatible cluster state version [{}] - resending complete cluster state", clusterState.version()), ex);
|
||||
}
|
||||
}
|
||||
if (newNodeSpecificClusterState == null) {
|
||||
if (clusterStateBytes == null) {
|
||||
clusterStateBytes = Builder.toBytes(clusterState);
|
||||
}
|
||||
newNodeSpecificClusterState = ClusterState.Builder.fromBytes(clusterStateBytes, discovery.localNode());
|
||||
}
|
||||
discovery.lastProcessedClusterState = newNodeSpecificClusterState;
|
||||
}
|
||||
final ClusterState nodeSpecificClusterState = newNodeSpecificClusterState;
|
||||
|
||||
nodeSpecificClusterState.status(ClusterState.ClusterStateStatus.RECEIVED);
|
||||
// ignore cluster state messages that do not include "me", not in the game yet...
|
||||
if (nodeSpecificClusterState.nodes().getLocalNode() != null) {
|
||||
assert nodeSpecificClusterState.nodes().getMasterNode() != null : "received a cluster state without a master";
|
||||
assert !nodeSpecificClusterState.blocks().hasGlobalBlock(discoverySettings.getNoMasterBlock()) : "received a cluster state with a master block";
|
||||
|
||||
discovery.clusterService.submitStateUpdateTask("local-disco-receive(from master)", new ClusterStateUpdateTask() {
|
||||
@Override
|
||||
public boolean runOnlyOnMaster() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
if (currentState.supersedes(nodeSpecificClusterState)) {
|
||||
return currentState;
|
||||
}
|
||||
|
||||
if (currentState.blocks().hasGlobalBlock(discoverySettings.getNoMasterBlock())) {
|
||||
// its a fresh update from the master as we transition from a start of not having a master to having one
|
||||
logger.debug("got first state from fresh master [{}]", nodeSpecificClusterState.nodes().getMasterNodeId());
|
||||
return nodeSpecificClusterState;
|
||||
}
|
||||
|
||||
ClusterState.Builder builder = ClusterState.builder(nodeSpecificClusterState);
|
||||
// if the routing table did not change, use the original one
|
||||
if (nodeSpecificClusterState.routingTable().version() == currentState.routingTable().version()) {
|
||||
builder.routingTable(currentState.routingTable());
|
||||
}
|
||||
if (nodeSpecificClusterState.metaData().version() == currentState.metaData().version()) {
|
||||
builder.metaData(currentState.metaData());
|
||||
}
|
||||
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||
publishResponseHandler.onFailure(discovery.localNode(), e);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
publishResponseHandler.onResponse(discovery.localNode());
|
||||
}
|
||||
});
|
||||
} else {
|
||||
publishResponseHandler.onResponse(discovery.localNode());
|
||||
}
|
||||
}
|
||||
|
||||
TimeValue publishTimeout = discoverySettings.getPublishTimeout();
|
||||
if (publishTimeout.millis() > 0) {
|
||||
try {
|
||||
boolean awaited = publishResponseHandler.awaitAllNodes(publishTimeout);
|
||||
if (!awaited) {
|
||||
DiscoveryNode[] pendingNodes = publishResponseHandler.pendingNodes();
|
||||
// everyone may have just responded
|
||||
if (pendingNodes.length > 0) {
|
||||
logger.warn("timed out waiting for all nodes to process published state [{}] (timeout [{}], pending nodes: {})", clusterState.version(), publishTimeout, pendingNodes);
|
||||
}
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
// ignore & restore interrupt
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
} catch (Exception e) {
|
||||
// failure to marshal or un-marshal
|
||||
throw new IllegalStateException("Cluster state failed to serialize", e);
|
||||
}
|
||||
}
|
||||
|
||||
private class ClusterGroup {
|
||||
|
||||
private Queue<LocalDiscovery> members = ConcurrentCollections.newQueue();
|
||||
|
||||
Queue<LocalDiscovery> members() {
|
||||
return members;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -100,8 +100,10 @@ public class PublishClusterStateAction extends AbstractComponent {
|
|||
this.discoverySettings = discoverySettings;
|
||||
this.clusterName = clusterName;
|
||||
this.pendingStatesQueue = new PendingClusterStatesQueue(logger, settings.getAsInt(SETTINGS_MAX_PENDING_CLUSTER_STATES, 25));
|
||||
transportService.registerRequestHandler(SEND_ACTION_NAME, BytesTransportRequest::new, ThreadPool.Names.SAME, new SendClusterStateRequestHandler());
|
||||
transportService.registerRequestHandler(COMMIT_ACTION_NAME, CommitClusterStateRequest::new, ThreadPool.Names.SAME, new CommitClusterStateRequestHandler());
|
||||
transportService.registerRequestHandler(SEND_ACTION_NAME, BytesTransportRequest::new, ThreadPool.Names.SAME, false, false,
|
||||
new SendClusterStateRequestHandler());
|
||||
transportService.registerRequestHandler(COMMIT_ACTION_NAME, CommitClusterStateRequest::new, ThreadPool.Names.SAME, false, false,
|
||||
new CommitClusterStateRequestHandler());
|
||||
}
|
||||
|
||||
public PendingClusterStatesQueue pendingStatesQueue() {
|
||||
|
|
|
@ -291,4 +291,38 @@ public interface ScriptDocValues<T> extends List<T> {
|
|||
return geohashDistance(geohash);
|
||||
}
|
||||
}
|
||||
|
||||
final class Booleans extends AbstractList<Boolean> implements ScriptDocValues<Boolean> {
|
||||
|
||||
private final SortedNumericDocValues values;
|
||||
|
||||
public Booleans(SortedNumericDocValues values) {
|
||||
this.values = values;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setNextDocId(int docId) {
|
||||
values.setDocument(docId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Boolean> getValues() {
|
||||
return this;
|
||||
}
|
||||
|
||||
public boolean getValue() {
|
||||
return values.count() != 0 && values.valueAt(0) == 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Boolean get(int index) {
|
||||
return values.valueAt(index) == 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size() {
|
||||
return values.count();
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,28 +19,24 @@
|
|||
|
||||
package org.elasticsearch.index.fielddata.plain;
|
||||
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.SortedNumericDocValues;
|
||||
import org.apache.lucene.util.Accountable;
|
||||
import org.elasticsearch.index.fielddata.AtomicNumericFieldData;
|
||||
import org.elasticsearch.index.fielddata.FieldData;
|
||||
import org.elasticsearch.index.fielddata.ScriptDocValues;
|
||||
import org.elasticsearch.index.fielddata.SortedBinaryDocValues;
|
||||
import org.elasticsearch.index.fielddata.SortedNumericDoubleValues;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
|
||||
|
||||
/**
|
||||
* Specialization of {@link AtomicNumericFieldData} for integers.
|
||||
*/
|
||||
abstract class AtomicLongFieldData implements AtomicNumericFieldData {
|
||||
|
||||
private final long ramBytesUsed;
|
||||
/** True if this numeric data is for a boolean field, and so only has values 0 and 1. */
|
||||
private final boolean isBoolean;
|
||||
|
||||
AtomicLongFieldData(long ramBytesUsed) {
|
||||
AtomicLongFieldData(long ramBytesUsed, boolean isBoolean) {
|
||||
this.ramBytesUsed = ramBytesUsed;
|
||||
this.isBoolean = isBoolean;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -50,7 +46,11 @@ abstract class AtomicLongFieldData implements AtomicNumericFieldData {
|
|||
|
||||
@Override
|
||||
public final ScriptDocValues getScriptValues() {
|
||||
return new ScriptDocValues.Longs(getLongValues());
|
||||
if (isBoolean) {
|
||||
return new ScriptDocValues.Booleans(getLongValues());
|
||||
} else {
|
||||
return new ScriptDocValues.Longs(getLongValues());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -63,24 +63,6 @@ abstract class AtomicLongFieldData implements AtomicNumericFieldData {
|
|||
return FieldData.castToDouble(getLongValues());
|
||||
}
|
||||
|
||||
public static AtomicNumericFieldData empty(final int maxDoc) {
|
||||
return new AtomicLongFieldData(0) {
|
||||
|
||||
@Override
|
||||
public SortedNumericDocValues getLongValues() {
|
||||
return DocValues.emptySortedNumeric(maxDoc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<Accountable> getChildResources() {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
}
|
||||
|
||||
public void close() {}
|
||||
}
|
||||
|
|
|
@ -96,7 +96,7 @@ public class SortedNumericDVIndexFieldData extends DocValuesIndexFieldData imple
|
|||
case DOUBLE:
|
||||
return new SortedNumericDoubleFieldData(reader, field);
|
||||
default:
|
||||
return new SortedNumericLongFieldData(reader, field);
|
||||
return new SortedNumericLongFieldData(reader, field, numericType == NumericType.BOOLEAN);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -117,8 +117,8 @@ public class SortedNumericDVIndexFieldData extends DocValuesIndexFieldData imple
|
|||
final LeafReader reader;
|
||||
final String field;
|
||||
|
||||
SortedNumericLongFieldData(LeafReader reader, String field) {
|
||||
super(0L);
|
||||
SortedNumericLongFieldData(LeafReader reader, String field, boolean isBoolean) {
|
||||
super(0L, isBoolean);
|
||||
this.reader = reader;
|
||||
this.field = field;
|
||||
}
|
||||
|
|
|
@ -269,6 +269,7 @@ class InstallPluginCommand extends SettingCommand {
|
|||
URL url = new URL(urlString);
|
||||
Path zip = Files.createTempFile(tmpDir, null, ".zip");
|
||||
URLConnection urlConnection = url.openConnection();
|
||||
urlConnection.addRequestProperty("User-Agent", "elasticsearch-plugin-installer");
|
||||
int contentLength = urlConnection.getContentLength();
|
||||
try (InputStream in = new TerminalProgressInputStream(urlConnection.getInputStream(), contentLength, terminal)) {
|
||||
// must overwrite since creating the temp file above actually created the file
|
||||
|
|
|
@ -114,14 +114,14 @@ public class TransportService extends AbstractLifecycleComponent {
|
|||
private final Logger tracerLog;
|
||||
|
||||
volatile String[] tracerLogInclude;
|
||||
volatile String[] tracelLogExclude;
|
||||
volatile String[] tracerLogExclude;
|
||||
|
||||
/** if set will call requests sent to this id to shortcut and executed locally */
|
||||
volatile DiscoveryNode localNode = null;
|
||||
|
||||
/**
|
||||
* Build the service.
|
||||
*
|
||||
*
|
||||
* @param clusterSettings if non null the the {@linkplain TransportService} will register with the {@link ClusterSettings} for settings
|
||||
* updates for {@link #TRACE_LOG_EXCLUDE_SETTING} and {@link #TRACE_LOG_INCLUDE_SETTING}.
|
||||
*/
|
||||
|
@ -173,8 +173,8 @@ public class TransportService extends AbstractLifecycleComponent {
|
|||
this.tracerLogInclude = tracerLogInclude.toArray(Strings.EMPTY_ARRAY);
|
||||
}
|
||||
|
||||
void setTracerLogExclude(List<String> tracelLogExclude) {
|
||||
this.tracelLogExclude = tracelLogExclude.toArray(Strings.EMPTY_ARRAY);
|
||||
void setTracerLogExclude(List<String> tracerLogExclude) {
|
||||
this.tracerLogExclude = tracerLogExclude.toArray(Strings.EMPTY_ARRAY);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -589,8 +589,8 @@ public class TransportService extends AbstractLifecycleComponent {
|
|||
return false;
|
||||
}
|
||||
}
|
||||
if (tracelLogExclude.length > 0) {
|
||||
return !Regex.simpleMatch(tracelLogExclude, action);
|
||||
if (tracerLogExclude.length > 0) {
|
||||
return !Regex.simpleMatch(tracerLogExclude, action);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -124,7 +124,7 @@ public class TribeService extends AbstractLifecycleComponent {
|
|||
if (!NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.exists(settings)) {
|
||||
sb.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), nodesSettings.size());
|
||||
}
|
||||
sb.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local"); // a tribe node should not use zen discovery
|
||||
sb.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "none"); // a tribe node should not use zen discovery
|
||||
// nothing is going to be discovered, since no master will be elected
|
||||
sb.put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), 0);
|
||||
if (sb.get("cluster.name") == null) {
|
||||
|
|
|
@ -97,40 +97,23 @@ public class VersionTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testTooLongVersionFromString() {
|
||||
try {
|
||||
Version.fromString("1.0.0.1.3");
|
||||
fail("Expected IllegalArgumentException");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), containsString("needs to contain major, minor, and revision"));
|
||||
}
|
||||
Exception e = expectThrows(IllegalArgumentException.class, () -> Version.fromString("1.0.0.1.3"));
|
||||
assertThat(e.getMessage(), containsString("needs to contain major, minor, and revision"));
|
||||
}
|
||||
|
||||
public void testTooShortVersionFromString() {
|
||||
try {
|
||||
Version.fromString("1.0");
|
||||
fail("Expected IllegalArgumentException");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), containsString("needs to contain major, minor, and revision"));
|
||||
}
|
||||
|
||||
Exception e = expectThrows(IllegalArgumentException.class, () -> Version.fromString("1.0"));
|
||||
assertThat(e.getMessage(), containsString("needs to contain major, minor, and revision"));
|
||||
}
|
||||
|
||||
public void testWrongVersionFromString() {
|
||||
try {
|
||||
Version.fromString("WRONG.VERSION");
|
||||
fail("Expected IllegalArgumentException");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), containsString("needs to contain major, minor, and revision"));
|
||||
}
|
||||
Exception e = expectThrows(IllegalArgumentException.class, () -> Version.fromString("WRONG.VERSION"));
|
||||
assertThat(e.getMessage(), containsString("needs to contain major, minor, and revision"));
|
||||
}
|
||||
|
||||
public void testVersionNoPresentInSettings() {
|
||||
try {
|
||||
Version.indexCreated(Settings.builder().build());
|
||||
fail("Expected IllegalArgumentException");
|
||||
} catch (IllegalStateException e) {
|
||||
assertThat(e.getMessage(), containsString("[index.version.created] is not present"));
|
||||
}
|
||||
Exception e = expectThrows(IllegalStateException.class, () -> Version.indexCreated(Settings.builder().build()));
|
||||
assertThat(e.getMessage(), containsString("[index.version.created] is not present"));
|
||||
}
|
||||
|
||||
public void testIndexCreatedVersion() {
|
||||
|
|
|
@ -0,0 +1,53 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.template;
|
||||
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
|
||||
import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
|
||||
|
||||
/**
|
||||
* Rudimentary tests that the templates used by Logstash and Beats
|
||||
* prior to their 5.x releases work for newly created indices
|
||||
*/
|
||||
public class BWCTemplateTests extends ESSingleNodeTestCase {
|
||||
public void testBeatsTemplatesBWC() throws Exception {
|
||||
String metricBeat = copyToStringFromClasspath("/org/elasticsearch/action/admin/indices/template/metricbeat-5.0.template.json");
|
||||
String packetBeat = copyToStringFromClasspath("/org/elasticsearch/action/admin/indices/template/packetbeat-5.0.template.json");
|
||||
String fileBeat = copyToStringFromClasspath("/org/elasticsearch/action/admin/indices/template/filebeat-5.0.template.json");
|
||||
String winLogBeat = copyToStringFromClasspath("/org/elasticsearch/action/admin/indices/template/winlogbeat-5.0.template.json");
|
||||
client().admin().indices().preparePutTemplate("metricbeat").setSource(metricBeat).get();
|
||||
client().admin().indices().preparePutTemplate("packetbeat").setSource(packetBeat).get();
|
||||
client().admin().indices().preparePutTemplate("filebeat").setSource(fileBeat).get();
|
||||
client().admin().indices().preparePutTemplate("winlogbeat").setSource(winLogBeat).get();
|
||||
|
||||
client().prepareIndex("metricbeat-foo", "doc", "1").setSource("message", "foo").get();
|
||||
client().prepareIndex("packetbeat-foo", "doc", "1").setSource("message", "foo").get();
|
||||
client().prepareIndex("filebeat-foo", "doc", "1").setSource("message", "foo").get();
|
||||
client().prepareIndex("winlogbeat-foo", "doc", "1").setSource("message", "foo").get();
|
||||
}
|
||||
|
||||
public void testLogstashTemplatesBWC() throws Exception {
|
||||
String ls5x = copyToStringFromClasspath("/org/elasticsearch/action/admin/indices/template/logstash-5.0.template.json");
|
||||
client().admin().indices().preparePutTemplate("logstash-5x").setSource(ls5x).get();
|
||||
client().prepareIndex("logstash-foo", "doc", "1").setSource("message", "foo").get();
|
||||
}
|
||||
|
||||
}
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.action.support.master;
|
|||
import org.elasticsearch.action.DocWriteResponse;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.discovery.DiscoveryModule;
|
||||
import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.discovery.zen.ElectMasterService;
|
||||
import org.elasticsearch.discovery.zen.fd.FaultDetection;
|
||||
|
@ -46,6 +45,11 @@ import static org.hamcrest.Matchers.equalTo;
|
|||
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0)
|
||||
public class IndexingMasterFailoverIT extends ESIntegTestCase {
|
||||
|
||||
@Override
|
||||
protected boolean addMockZenPings() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
final HashSet<Class<? extends Plugin>> classes = new HashSet<>(super.nodePlugins());
|
||||
|
@ -62,7 +66,6 @@ public class IndexingMasterFailoverIT extends ESIntegTestCase {
|
|||
logger.info("--> start 4 nodes, 3 master, 1 data");
|
||||
|
||||
final Settings sharedSettings = Settings.builder()
|
||||
.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "zen")
|
||||
.put(FaultDetection.PING_TIMEOUT_SETTING.getKey(), "1s") // for hitting simulated network failures quickly
|
||||
.put(FaultDetection.PING_RETRIES_SETTING.getKey(), "1") // for hitting simulated network failures quickly
|
||||
.put("discovery.zen.join_timeout", "10s") // still long to induce failures but to long so test won't time out
|
||||
|
|
|
@ -137,6 +137,7 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase {
|
|||
THREAD_POOL = new TestThreadPool(TransportInstanceSingleOperationActionTests.class.getSimpleName());
|
||||
}
|
||||
|
||||
@Override
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
|
@ -156,6 +157,7 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase {
|
|||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
super.tearDown();
|
||||
|
|
|
@ -226,12 +226,8 @@ public class UpdateRequestTests extends ESTestCase {
|
|||
// Related to issue #15822
|
||||
public void testInvalidBodyThrowsParseException() throws Exception {
|
||||
UpdateRequest request = new UpdateRequest("test", "type", "1");
|
||||
try {
|
||||
request.fromXContent(new byte[] { (byte) '"' });
|
||||
fail("Should have thrown a ElasticsearchParseException");
|
||||
} catch (ElasticsearchParseException e) {
|
||||
assertThat(e.getMessage(), equalTo("Failed to derive xcontent"));
|
||||
}
|
||||
Exception e = expectThrows(ElasticsearchParseException.class, () -> request.fromXContent(new byte[] { (byte) '"' }));
|
||||
assertThat(e.getMessage(), equalTo("Failed to derive xcontent"));
|
||||
}
|
||||
|
||||
// Related to issue 15338
|
||||
|
|
|
@ -32,12 +32,13 @@ import org.elasticsearch.node.NodeValidationException;
|
|||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
||||
import org.elasticsearch.test.ESIntegTestCase.Scope;
|
||||
import org.elasticsearch.test.discovery.MockZenPing;
|
||||
import org.elasticsearch.transport.MockTcpTransportPlugin;
|
||||
import org.elasticsearch.transport.MockTransportClient;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Arrays;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
|
@ -65,7 +66,7 @@ public class TransportClientIT extends ESIntegTestCase {
|
|||
.put(NetworkModule.HTTP_ENABLED.getKey(), false)
|
||||
.put(Node.NODE_DATA_SETTING.getKey(), false)
|
||||
.put("cluster.name", "foobar")
|
||||
.build(), Collections.singleton(MockTcpTransportPlugin.class)).start()) {
|
||||
.build(), Arrays.asList(MockTcpTransportPlugin.class, MockZenPing.TestPlugin.class)).start()) {
|
||||
TransportAddress transportAddress = node.injector().getInstance(TransportService.class).boundAddress().publishAddress();
|
||||
client.addTransportAddress(transportAddress);
|
||||
// since we force transport clients there has to be one node started that we connect to.
|
||||
|
|
|
@ -28,7 +28,6 @@ import org.elasticsearch.common.Strings;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.discovery.DiscoveryModule;
|
||||
import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.discovery.zen.ElectMasterService;
|
||||
import org.elasticsearch.discovery.zen.ZenDiscovery;
|
||||
|
@ -75,15 +74,13 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected Settings nodeSettings(int nodeOrdinal) {
|
||||
return Settings.builder().put(super.nodeSettings(nodeOrdinal))
|
||||
.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "zen").build();
|
||||
protected boolean addMockZenPings() {
|
||||
return false;
|
||||
}
|
||||
|
||||
public void testSimpleMinimumMasterNodes() throws Exception {
|
||||
|
||||
Settings settings = Settings.builder()
|
||||
.put("discovery.type", "zen")
|
||||
.put("discovery.zen.minimum_master_nodes", 2)
|
||||
.put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "200ms")
|
||||
.put("discovery.initial_state_timeout", "500ms")
|
||||
|
@ -195,7 +192,6 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
|
|||
|
||||
public void testMultipleNodesShutdownNonMasterNodes() throws Exception {
|
||||
Settings settings = Settings.builder()
|
||||
.put("discovery.type", "zen")
|
||||
.put("discovery.zen.minimum_master_nodes", 3)
|
||||
.put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "1s")
|
||||
.put("discovery.initial_state_timeout", "500ms")
|
||||
|
@ -271,7 +267,6 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
|
|||
|
||||
public void testDynamicUpdateMinimumMasterNodes() throws Exception {
|
||||
Settings settings = Settings.builder()
|
||||
.put("discovery.type", "zen")
|
||||
.put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "400ms")
|
||||
.put("discovery.initial_state_timeout", "500ms")
|
||||
.build();
|
||||
|
@ -329,7 +324,6 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
|
|||
public void testCanNotBringClusterDown() throws ExecutionException, InterruptedException {
|
||||
int nodeCount = scaledRandomIntBetween(1, 5);
|
||||
Settings.Builder settings = Settings.builder()
|
||||
.put("discovery.type", "zen")
|
||||
.put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "200ms")
|
||||
.put("discovery.initial_state_timeout", "500ms");
|
||||
|
||||
|
@ -368,7 +362,6 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
|
|||
|
||||
public void testCanNotPublishWithoutMinMastNodes() throws Exception {
|
||||
Settings settings = Settings.builder()
|
||||
.put("discovery.type", "zen")
|
||||
.put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "200ms")
|
||||
.put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2)
|
||||
.put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "100ms") // speed things up
|
||||
|
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.cluster;
|
|||
import org.apache.lucene.search.join.ScoreMode;
|
||||
import org.elasticsearch.cluster.metadata.MappingMetaData;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.discovery.DiscoveryModule;
|
||||
import org.elasticsearch.discovery.MasterNotDiscoveredException;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.node.Node;
|
||||
|
@ -41,18 +40,9 @@ import static org.hamcrest.Matchers.nullValue;
|
|||
@ClusterScope(scope = Scope.TEST, numDataNodes = 0)
|
||||
public class SpecificMasterNodesIT extends ESIntegTestCase {
|
||||
|
||||
@Override
|
||||
protected Settings nodeSettings(int nodeOrdinal) {
|
||||
return Settings.builder().put(super.nodeSettings(nodeOrdinal))
|
||||
.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "zen").build();
|
||||
}
|
||||
protected final Settings.Builder settingsBuilder() {
|
||||
return Settings.builder().put("discovery.type", "zen");
|
||||
}
|
||||
|
||||
public void testSimpleOnlyMasterNodeElection() throws IOException {
|
||||
logger.info("--> start data node / non master node");
|
||||
internalCluster().startNode(settingsBuilder().put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false).put("discovery.initial_state_timeout", "1s"));
|
||||
internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false).put("discovery.initial_state_timeout", "1s"));
|
||||
try {
|
||||
assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("100ms").execute().actionGet().getState().nodes().getMasterNodeId(), nullValue());
|
||||
fail("should not be able to find master");
|
||||
|
@ -60,7 +50,7 @@ public class SpecificMasterNodesIT extends ESIntegTestCase {
|
|||
// all is well, no master elected
|
||||
}
|
||||
logger.info("--> start master node");
|
||||
final String masterNodeName = internalCluster().startNode(settingsBuilder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true));
|
||||
final String masterNodeName = internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true));
|
||||
assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName));
|
||||
assertThat(internalCluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName));
|
||||
|
||||
|
@ -75,14 +65,14 @@ public class SpecificMasterNodesIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
logger.info("--> start master node");
|
||||
final String nextMasterEligibleNodeName = internalCluster().startNode(settingsBuilder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true));
|
||||
final String nextMasterEligibleNodeName = internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true));
|
||||
assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligibleNodeName));
|
||||
assertThat(internalCluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligibleNodeName));
|
||||
}
|
||||
|
||||
public void testElectOnlyBetweenMasterNodes() throws IOException {
|
||||
logger.info("--> start data node / non master node");
|
||||
internalCluster().startNode(settingsBuilder().put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false).put("discovery.initial_state_timeout", "1s"));
|
||||
internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false).put("discovery.initial_state_timeout", "1s"));
|
||||
try {
|
||||
assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("100ms").execute().actionGet().getState().nodes().getMasterNodeId(), nullValue());
|
||||
fail("should not be able to find master");
|
||||
|
@ -90,12 +80,12 @@ public class SpecificMasterNodesIT extends ESIntegTestCase {
|
|||
// all is well, no master elected
|
||||
}
|
||||
logger.info("--> start master node (1)");
|
||||
final String masterNodeName = internalCluster().startNode(settingsBuilder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true));
|
||||
final String masterNodeName = internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true));
|
||||
assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName));
|
||||
assertThat(internalCluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName));
|
||||
|
||||
logger.info("--> start master node (2)");
|
||||
final String nextMasterEligableNodeName = internalCluster().startNode(settingsBuilder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true));
|
||||
final String nextMasterEligableNodeName = internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true));
|
||||
assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName));
|
||||
assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName));
|
||||
assertThat(internalCluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName));
|
||||
|
@ -112,10 +102,10 @@ public class SpecificMasterNodesIT extends ESIntegTestCase {
|
|||
*/
|
||||
public void testCustomDefaultMapping() throws Exception {
|
||||
logger.info("--> start master node / non data");
|
||||
internalCluster().startNode(settingsBuilder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true));
|
||||
internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true));
|
||||
|
||||
logger.info("--> start data node / non master node");
|
||||
internalCluster().startNode(settingsBuilder().put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false));
|
||||
internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false));
|
||||
|
||||
createIndex("test");
|
||||
assertAcked(client().admin().indices().preparePutMapping("test").setType("_default_").setSource("timestamp", "type=date"));
|
||||
|
@ -134,10 +124,10 @@ public class SpecificMasterNodesIT extends ESIntegTestCase {
|
|||
|
||||
public void testAliasFilterValidation() throws Exception {
|
||||
logger.info("--> start master node / non data");
|
||||
internalCluster().startNode(settingsBuilder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true));
|
||||
internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true));
|
||||
|
||||
logger.info("--> start data node / non master node");
|
||||
internalCluster().startNode(settingsBuilder().put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false));
|
||||
internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false));
|
||||
|
||||
assertAcked(prepareCreate("test").addMapping("type1", "{\"type1\" : {\"properties\" : {\"table_a\" : { \"type\" : \"nested\", \"properties\" : {\"field_a\" : { \"type\" : \"keyword\" },\"field_b\" :{ \"type\" : \"keyword\" }}}}}}"));
|
||||
client().admin().indices().prepareAliases().addAlias("test", "a_test", QueryBuilders.nestedQuery("table_a", QueryBuilders.termQuery("table_a.field_b", "y"), ScoreMode.Avg)).get();
|
||||
|
|
|
@ -177,43 +177,31 @@ public class DateMathExpressionResolverTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testExpressionInvalidUnescaped() throws Exception {
|
||||
try {
|
||||
expressionResolver.resolve(context, Arrays.asList("<.mar}vel-{now/d}>"));
|
||||
fail("Expected ElasticsearchParseException");
|
||||
} catch (ElasticsearchParseException e) {
|
||||
assertThat(e.getMessage(), containsString("invalid dynamic name expression"));
|
||||
assertThat(e.getMessage(), containsString("invalid character at position ["));
|
||||
}
|
||||
Exception e = expectThrows(ElasticsearchParseException.class,
|
||||
() -> expressionResolver.resolve(context, Arrays.asList("<.mar}vel-{now/d}>")));
|
||||
assertThat(e.getMessage(), containsString("invalid dynamic name expression"));
|
||||
assertThat(e.getMessage(), containsString("invalid character at position ["));
|
||||
}
|
||||
|
||||
public void testExpressionInvalidDateMathFormat() throws Exception {
|
||||
try {
|
||||
expressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{}>"));
|
||||
fail("Expected ElasticsearchParseException");
|
||||
} catch (ElasticsearchParseException e) {
|
||||
assertThat(e.getMessage(), containsString("invalid dynamic name expression"));
|
||||
assertThat(e.getMessage(), containsString("date math placeholder is open ended"));
|
||||
}
|
||||
Exception e = expectThrows(ElasticsearchParseException.class,
|
||||
() -> expressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{}>")));
|
||||
assertThat(e.getMessage(), containsString("invalid dynamic name expression"));
|
||||
assertThat(e.getMessage(), containsString("date math placeholder is open ended"));
|
||||
}
|
||||
|
||||
public void testExpressionInvalidEmptyDateMathFormat() throws Exception {
|
||||
try {
|
||||
expressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{}}>"));
|
||||
fail("Expected ElasticsearchParseException");
|
||||
} catch (ElasticsearchParseException e) {
|
||||
assertThat(e.getMessage(), containsString("invalid dynamic name expression"));
|
||||
assertThat(e.getMessage(), containsString("missing date format"));
|
||||
}
|
||||
Exception e = expectThrows(ElasticsearchParseException.class,
|
||||
() -> expressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{}}>")));
|
||||
assertThat(e.getMessage(), containsString("invalid dynamic name expression"));
|
||||
assertThat(e.getMessage(), containsString("missing date format"));
|
||||
}
|
||||
|
||||
public void testExpressionInvalidOpenEnded() throws Exception {
|
||||
try {
|
||||
expressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d>"));
|
||||
fail("Expected ElasticsearchParseException");
|
||||
} catch (ElasticsearchParseException e) {
|
||||
assertThat(e.getMessage(), containsString("invalid dynamic name expression"));
|
||||
assertThat(e.getMessage(), containsString("date math placeholder is open ended"));
|
||||
}
|
||||
Exception e = expectThrows(ElasticsearchParseException.class,
|
||||
() -> expressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d>")));
|
||||
assertThat(e.getMessage(), containsString("invalid dynamic name expression"));
|
||||
assertThat(e.getMessage(), containsString("date math placeholder is open ended"));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -305,7 +305,7 @@ public class IndexNameExpressionResolverTests extends ESTestCase {
|
|||
assertEquals(1, results.length);
|
||||
assertEquals("bar", results[0]);
|
||||
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, "-foo*");
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, "*", "-foo*");
|
||||
assertEquals(1, results.length);
|
||||
assertEquals("bar", results[0]);
|
||||
|
||||
|
@ -585,6 +585,64 @@ public class IndexNameExpressionResolverTests extends ESTestCase {
|
|||
assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "testX*")), equalTo(newHashSet("testXXX", "testXXY", "testXYY")));
|
||||
}
|
||||
|
||||
public void testConcreteIndicesWildcardWithNegation() {
|
||||
MetaData.Builder mdBuilder = MetaData.builder()
|
||||
.put(indexBuilder("testXXX").state(State.OPEN))
|
||||
.put(indexBuilder("testXXY").state(State.OPEN))
|
||||
.put(indexBuilder("testXYY").state(State.OPEN))
|
||||
.put(indexBuilder("-testXYZ").state(State.OPEN))
|
||||
.put(indexBuilder("-testXZZ").state(State.OPEN))
|
||||
.put(indexBuilder("-testYYY").state(State.OPEN))
|
||||
.put(indexBuilder("testYYY").state(State.OPEN))
|
||||
.put(indexBuilder("testYYX").state(State.OPEN));
|
||||
ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build();
|
||||
|
||||
IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state,
|
||||
IndicesOptions.fromOptions(true, true, true, true));
|
||||
assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "testX*")),
|
||||
equalTo(newHashSet("testXXX", "testXXY", "testXYY")));
|
||||
|
||||
assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "test*", "-testX*")),
|
||||
equalTo(newHashSet("testYYY", "testYYX")));
|
||||
|
||||
assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "-testX*")),
|
||||
equalTo(newHashSet("-testXYZ", "-testXZZ")));
|
||||
|
||||
assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "testXXY", "-testX*")),
|
||||
equalTo(newHashSet("testXXY", "-testXYZ", "-testXZZ")));
|
||||
|
||||
assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "*", "--testX*")),
|
||||
equalTo(newHashSet("testXXX", "testXXY", "testXYY", "testYYX", "testYYY", "-testYYY")));
|
||||
|
||||
assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "-testXXX", "test*")),
|
||||
equalTo(newHashSet("testYYX", "testXXX", "testXYY", "testYYY", "testXXY")));
|
||||
|
||||
assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "test*", "-testXXX")),
|
||||
equalTo(newHashSet("testYYX", "testXYY", "testYYY", "testXXY")));
|
||||
|
||||
assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "+testXXX", "+testXXY", "+testYYY", "-testYYY")),
|
||||
equalTo(newHashSet("testXXX", "testXXY", "testYYY", "-testYYY")));
|
||||
|
||||
assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "testYYY", "testYYX", "testX*", "-testXXX")),
|
||||
equalTo(newHashSet("testYYY", "testYYX", "testXXY", "testXYY")));
|
||||
|
||||
assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "-testXXX", "*testY*", "-testYYY")),
|
||||
equalTo(newHashSet("testYYX", "testYYY", "-testYYY")));
|
||||
|
||||
String[] indexNames = indexNameExpressionResolver.concreteIndexNames(state, IndicesOptions.lenientExpandOpen(), "-doesnotexist");
|
||||
assertEquals(0, indexNames.length);
|
||||
|
||||
assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(state, IndicesOptions.lenientExpandOpen(), "-*")),
|
||||
equalTo(newHashSet("-testXYZ", "-testXZZ", "-testYYY")));
|
||||
|
||||
assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(state, IndicesOptions.lenientExpandOpen(),
|
||||
"+testXXX", "+testXXY", "+testXYY", "-testXXY")),
|
||||
equalTo(newHashSet("testXXX", "testXYY", "testXXY")));
|
||||
|
||||
indexNames = indexNameExpressionResolver.concreteIndexNames(state, IndicesOptions.lenientExpandOpen(), "*", "-*");
|
||||
assertEquals(0, indexNames.length);
|
||||
}
|
||||
|
||||
/**
|
||||
* test resolving _all pattern (null, empty array or "_all") for random IndicesOptions
|
||||
*/
|
||||
|
|
|
@ -50,9 +50,9 @@ public class WildcardExpressionResolverTests extends ESTestCase {
|
|||
assertThat(newHashSet(resolver.resolve(context, Arrays.asList("*"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY", "kuku")));
|
||||
assertThat(newHashSet(resolver.resolve(context, Arrays.asList("*", "-kuku"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY")));
|
||||
assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "+testYYY"))), equalTo(newHashSet("testXXX", "testYYY")));
|
||||
assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "-testXXX"))).size(), equalTo(0));
|
||||
assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "-testXXX"))), equalTo(newHashSet("testXXX", "-testXXX")));
|
||||
assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "+testY*"))), equalTo(newHashSet("testXXX", "testYYY")));
|
||||
assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "-testX*"))).size(), equalTo(0));
|
||||
assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "-testX*"))), equalTo(newHashSet("testXXX")));
|
||||
}
|
||||
|
||||
public void testConvertWildcardsTests() {
|
||||
|
@ -66,7 +66,7 @@ public class WildcardExpressionResolverTests extends ESTestCase {
|
|||
|
||||
IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen());
|
||||
assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testYY*", "alias*"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY")));
|
||||
assertThat(newHashSet(resolver.resolve(context, Arrays.asList("-kuku"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY")));
|
||||
assertThat(newHashSet(resolver.resolve(context, Arrays.asList("-kuku"))), equalTo(newHashSet("-kuku")));
|
||||
assertThat(newHashSet(resolver.resolve(context, Arrays.asList("+test*", "-testYYY"))), equalTo(newHashSet("testXXX", "testXYY")));
|
||||
assertThat(newHashSet(resolver.resolve(context, Arrays.asList("+testX*", "+testYYY"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY")));
|
||||
assertThat(newHashSet(resolver.resolve(context, Arrays.asList("+testYYY", "+testX*"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY")));
|
||||
|
|
|
@ -30,7 +30,6 @@ import org.elasticsearch.cluster.routing.allocation.command.AllocateStalePrimary
|
|||
import org.elasticsearch.common.collect.ImmutableOpenIntMap;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.discovery.DiscoveryModule;
|
||||
import org.elasticsearch.gateway.GatewayAllocator;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
|
@ -63,9 +62,8 @@ public class PrimaryAllocationIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected Settings nodeSettings(int nodeOrdinal) {
|
||||
return Settings.builder().put(super.nodeSettings(nodeOrdinal))
|
||||
.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "zen").build();
|
||||
protected boolean addMockZenPings() {
|
||||
return false;
|
||||
}
|
||||
|
||||
private void createStaleReplicaScenario() throws Exception {
|
||||
|
|
|
@ -35,7 +35,6 @@ import org.elasticsearch.common.inject.Inject;
|
|||
import org.elasticsearch.common.inject.Singleton;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.discovery.DiscoveryModule;
|
||||
import org.elasticsearch.discovery.zen.ZenDiscovery;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
|
@ -68,17 +67,8 @@ public class ClusterServiceIT extends ESIntegTestCase {
|
|||
return Arrays.asList(TestPlugin.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Settings nodeSettings(int nodeOrdinal) {
|
||||
return Settings.builder().put(super.nodeSettings(nodeOrdinal))
|
||||
.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "zen").build();
|
||||
}
|
||||
|
||||
public void testAckedUpdateTask() throws Exception {
|
||||
Settings settings = Settings.builder()
|
||||
.put("discovery.type", "local")
|
||||
.build();
|
||||
internalCluster().startNode(settings);
|
||||
internalCluster().startNode();
|
||||
ClusterService clusterService = internalCluster().getInstance(ClusterService.class);
|
||||
|
||||
final AtomicBoolean allNodesAcked = new AtomicBoolean(false);
|
||||
|
@ -151,10 +141,7 @@ public class ClusterServiceIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
public void testAckedUpdateTaskSameClusterState() throws Exception {
|
||||
Settings settings = Settings.builder()
|
||||
.put("discovery.type", "local")
|
||||
.build();
|
||||
internalCluster().startNode(settings);
|
||||
internalCluster().startNode();
|
||||
ClusterService clusterService = internalCluster().getInstance(ClusterService.class);
|
||||
|
||||
final AtomicBoolean allNodesAcked = new AtomicBoolean(false);
|
||||
|
@ -222,10 +209,7 @@ public class ClusterServiceIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
public void testAckedUpdateTaskNoAckExpected() throws Exception {
|
||||
Settings settings = Settings.builder()
|
||||
.put("discovery.type", "local")
|
||||
.build();
|
||||
internalCluster().startNode(settings);
|
||||
internalCluster().startNode();
|
||||
ClusterService clusterService = internalCluster().getInstance(ClusterService.class);
|
||||
|
||||
final AtomicBoolean allNodesAcked = new AtomicBoolean(false);
|
||||
|
@ -294,10 +278,7 @@ public class ClusterServiceIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
public void testAckedUpdateTaskTimeoutZero() throws Exception {
|
||||
Settings settings = Settings.builder()
|
||||
.put("discovery.type", "local")
|
||||
.build();
|
||||
internalCluster().startNode(settings);
|
||||
internalCluster().startNode();
|
||||
ClusterService clusterService = internalCluster().getInstance(ClusterService.class);
|
||||
|
||||
final AtomicBoolean allNodesAcked = new AtomicBoolean(false);
|
||||
|
@ -371,11 +352,8 @@ public class ClusterServiceIT extends ESIntegTestCase {
|
|||
|
||||
@TestLogging("_root:debug,org.elasticsearch.action.admin.cluster.tasks:trace")
|
||||
public void testPendingUpdateTask() throws Exception {
|
||||
Settings settings = Settings.builder()
|
||||
.put("discovery.type", "local")
|
||||
.build();
|
||||
String node_0 = internalCluster().startNode(settings);
|
||||
internalCluster().startCoordinatingOnlyNode(settings);
|
||||
String node_0 = internalCluster().startNode();
|
||||
internalCluster().startCoordinatingOnlyNode(Settings.EMPTY);
|
||||
|
||||
final ClusterService clusterService = internalCluster().getInstance(ClusterService.class, node_0);
|
||||
final CountDownLatch block1 = new CountDownLatch(1);
|
||||
|
@ -507,7 +485,6 @@ public class ClusterServiceIT extends ESIntegTestCase {
|
|||
|
||||
public void testLocalNodeMasterListenerCallbacks() throws Exception {
|
||||
Settings settings = Settings.builder()
|
||||
.put("discovery.type", "zen")
|
||||
.put("discovery.zen.minimum_master_nodes", 1)
|
||||
.put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "400ms")
|
||||
.put("discovery.initial_state_timeout", "500ms")
|
||||
|
|
|
@ -28,71 +28,43 @@ import static org.hamcrest.Matchers.instanceOf;
|
|||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
public class TableTests extends ESTestCase {
|
||||
|
||||
public void testFailOnStartRowWithoutHeader() {
|
||||
Table table = new Table();
|
||||
try {
|
||||
table.startRow();
|
||||
fail("Expected IllegalStateException");
|
||||
} catch (IllegalStateException e) {
|
||||
assertThat(e.getMessage(), is("no headers added..."));
|
||||
}
|
||||
Exception e = expectThrows(IllegalStateException.class, () -> table.startRow());
|
||||
assertThat(e.getMessage(), is("no headers added..."));
|
||||
}
|
||||
|
||||
public void testFailOnEndHeadersWithoutStart() {
|
||||
Table table = new Table();
|
||||
try {
|
||||
table.endHeaders();
|
||||
fail("Expected IllegalStateException");
|
||||
} catch (IllegalStateException e) {
|
||||
assertThat(e.getMessage(), is("no headers added..."));
|
||||
}
|
||||
|
||||
Exception e = expectThrows(IllegalStateException.class, () -> table.endHeaders());
|
||||
assertThat(e.getMessage(), is("no headers added..."));
|
||||
}
|
||||
|
||||
public void testFailOnAddCellWithoutHeader() {
|
||||
Table table = new Table();
|
||||
try {
|
||||
table.addCell("error");
|
||||
fail("Expected IllegalStateException");
|
||||
} catch (IllegalStateException e) {
|
||||
assertThat(e.getMessage(), is("no block started..."));
|
||||
}
|
||||
|
||||
Exception e = expectThrows(IllegalStateException.class, () -> table.addCell("error"));
|
||||
assertThat(e.getMessage(), is("no block started..."));
|
||||
}
|
||||
|
||||
public void testFailOnAddCellWithoutRow() {
|
||||
Table table = this.getTableWithHeaders();
|
||||
try {
|
||||
table.addCell("error");
|
||||
fail("Expected IllegalStateException");
|
||||
} catch (IllegalStateException e) {
|
||||
assertThat(e.getMessage(), is("no block started..."));
|
||||
}
|
||||
|
||||
Exception e = expectThrows(IllegalStateException.class, () -> table.addCell("error"));
|
||||
assertThat(e.getMessage(), is("no block started..."));
|
||||
}
|
||||
|
||||
public void testFailOnEndRowWithoutStart() {
|
||||
Table table = this.getTableWithHeaders();
|
||||
try {
|
||||
table.endRow();
|
||||
fail("Expected IllegalStateException");
|
||||
} catch (IllegalStateException e) {
|
||||
assertThat(e.getMessage(), is("no row started..."));
|
||||
}
|
||||
|
||||
Exception e = expectThrows(IllegalStateException.class, () -> table.endRow());
|
||||
assertThat(e.getMessage(), is("no row started..."));
|
||||
}
|
||||
|
||||
public void testFailOnLessCellsThanDeclared() {
|
||||
Table table = this.getTableWithHeaders();
|
||||
table.startRow();
|
||||
table.addCell("foo");
|
||||
try {
|
||||
table.endRow(true);
|
||||
fail("Expected IllegalStateException");
|
||||
} catch (IllegalStateException e) {
|
||||
assertThat(e.getMessage(), is("mismatch on number of cells 1 in a row compared to header 2"));
|
||||
}
|
||||
|
||||
Exception e = expectThrows(IllegalStateException.class, () -> table.endRow());
|
||||
assertThat(e.getMessage(), is("mismatch on number of cells 1 in a row compared to header 2"));
|
||||
}
|
||||
|
||||
public void testOnLessCellsThanDeclaredUnchecked() {
|
||||
|
@ -107,13 +79,8 @@ public class TableTests extends ESTestCase {
|
|||
table.startRow();
|
||||
table.addCell("foo");
|
||||
table.addCell("bar");
|
||||
try {
|
||||
table.addCell("foobar");
|
||||
fail("Expected IllegalStateException");
|
||||
} catch (IllegalStateException e) {
|
||||
assertThat(e.getMessage(), is("can't add more cells to a row than the header"));
|
||||
}
|
||||
|
||||
Exception e = expectThrows(IllegalStateException.class, () -> table.addCell("foobar"));
|
||||
assertThat(e.getMessage(), is("can't add more cells to a row than the header"));
|
||||
}
|
||||
|
||||
public void testSimple() {
|
||||
|
|
|
@ -19,12 +19,6 @@
|
|||
|
||||
package org.elasticsearch.common.geo;
|
||||
|
||||
import org.locationtech.spatial4j.exception.InvalidShapeException;
|
||||
import org.locationtech.spatial4j.shape.Circle;
|
||||
import org.locationtech.spatial4j.shape.Point;
|
||||
import org.locationtech.spatial4j.shape.Rectangle;
|
||||
import org.locationtech.spatial4j.shape.Shape;
|
||||
import org.locationtech.spatial4j.shape.impl.PointImpl;
|
||||
import com.vividsolutions.jts.geom.Coordinate;
|
||||
import com.vividsolutions.jts.geom.LineString;
|
||||
import com.vividsolutions.jts.geom.Polygon;
|
||||
|
@ -35,6 +29,12 @@ import org.elasticsearch.common.geo.builders.PolygonBuilder;
|
|||
import org.elasticsearch.common.geo.builders.ShapeBuilder;
|
||||
import org.elasticsearch.common.geo.builders.ShapeBuilders;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.locationtech.spatial4j.exception.InvalidShapeException;
|
||||
import org.locationtech.spatial4j.shape.Circle;
|
||||
import org.locationtech.spatial4j.shape.Point;
|
||||
import org.locationtech.spatial4j.shape.Rectangle;
|
||||
import org.locationtech.spatial4j.shape.Shape;
|
||||
import org.locationtech.spatial4j.shape.impl.PointImpl;
|
||||
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchGeoAssertions.assertMultiLineString;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchGeoAssertions.assertMultiPolygon;
|
||||
|
@ -183,17 +183,13 @@ public class ShapeBuilderTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testPolygonSelfIntersection() {
|
||||
try {
|
||||
ShapeBuilders.newPolygon(new CoordinatesBuilder()
|
||||
PolygonBuilder newPolygon = ShapeBuilders.newPolygon(new CoordinatesBuilder()
|
||||
.coordinate(-40.0, 50.0)
|
||||
.coordinate(40.0, 50.0)
|
||||
.coordinate(-40.0, -50.0)
|
||||
.coordinate(40.0, -50.0).close())
|
||||
.build();
|
||||
fail("Expected InvalidShapeException");
|
||||
} catch (InvalidShapeException e) {
|
||||
assertThat(e.getMessage(), containsString("Self-intersection at or near point (0.0"));
|
||||
}
|
||||
.coordinate(40.0, -50.0).close());
|
||||
Exception e = expectThrows(InvalidShapeException.class, () -> newPolygon.build());
|
||||
assertThat(e.getMessage(), containsString("Self-intersection at or near point (0.0"));
|
||||
}
|
||||
|
||||
public void testGeoCircle() {
|
||||
|
@ -550,12 +546,8 @@ public class ShapeBuilderTests extends ESTestCase {
|
|||
.coordinate(179, -10)
|
||||
.coordinate(164, 0)
|
||||
));
|
||||
try {
|
||||
builder.close().build();
|
||||
fail("Expected InvalidShapeException");
|
||||
} catch (InvalidShapeException e) {
|
||||
assertThat(e.getMessage(), containsString("interior cannot share more than one point with the exterior"));
|
||||
}
|
||||
Exception e = expectThrows(InvalidShapeException.class, () -> builder.close().build());
|
||||
assertThat(e.getMessage(), containsString("interior cannot share more than one point with the exterior"));
|
||||
}
|
||||
|
||||
public void testBoundaryShapeWithTangentialHole() {
|
||||
|
@ -602,12 +594,8 @@ public class ShapeBuilderTests extends ESTestCase {
|
|||
.coordinate(176, -10)
|
||||
.coordinate(-177, 10)
|
||||
));
|
||||
try {
|
||||
builder.close().build();
|
||||
fail("Expected InvalidShapeException");
|
||||
} catch (InvalidShapeException e) {
|
||||
assertThat(e.getMessage(), containsString("interior cannot share more than one point with the exterior"));
|
||||
}
|
||||
Exception e = expectThrows(InvalidShapeException.class, () -> builder.close().build());
|
||||
assertThat(e.getMessage(), containsString("interior cannot share more than one point with the exterior"));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -659,11 +647,7 @@ public class ShapeBuilderTests extends ESTestCase {
|
|||
.coordinate(-176, 4)
|
||||
.coordinate(180, 0)
|
||||
);
|
||||
try {
|
||||
builder.close().build();
|
||||
fail("Expected InvalidShapeException");
|
||||
} catch (InvalidShapeException e) {
|
||||
assertThat(e.getMessage(), containsString("duplicate consecutive coordinates at: ("));
|
||||
}
|
||||
Exception e = expectThrows(InvalidShapeException.class, () -> builder.close().build());
|
||||
assertThat(e.getMessage(), containsString("duplicate consecutive coordinates at: ("));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -97,14 +97,7 @@ public class BytesStreamsTests extends ESTestCase {
|
|||
BytesStreamOutput out = new BytesStreamOutput();
|
||||
|
||||
// bulk-write with wrong args
|
||||
try {
|
||||
out.writeBytes(new byte[]{}, 0, 1);
|
||||
fail("expected IllegalArgumentException: length > (size-offset)");
|
||||
}
|
||||
catch (IllegalArgumentException iax1) {
|
||||
// expected
|
||||
}
|
||||
|
||||
expectThrows(IllegalArgumentException.class, () -> out.writeBytes(new byte[]{}, 0, 1));
|
||||
out.close();
|
||||
}
|
||||
|
||||
|
@ -333,18 +326,21 @@ public class BytesStreamsTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testNamedWriteable() throws IOException {
|
||||
BytesStreamOutput out = new BytesStreamOutput();
|
||||
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.singletonList(
|
||||
new NamedWriteableRegistry.Entry(BaseNamedWriteable.class, TestNamedWriteable.NAME, TestNamedWriteable::new)
|
||||
));
|
||||
TestNamedWriteable namedWriteableIn = new TestNamedWriteable(randomAsciiOfLengthBetween(1, 10), randomAsciiOfLengthBetween(1, 10));
|
||||
out.writeNamedWriteable(namedWriteableIn);
|
||||
byte[] bytes = BytesReference.toBytes(out.bytes());
|
||||
StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(bytes), namedWriteableRegistry);
|
||||
assertEquals(in.available(), bytes.length);
|
||||
BaseNamedWriteable namedWriteableOut = in.readNamedWriteable(BaseNamedWriteable.class);
|
||||
assertEquals(namedWriteableIn, namedWriteableOut);
|
||||
assertEquals(0, in.available());
|
||||
try (BytesStreamOutput out = new BytesStreamOutput()) {
|
||||
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.singletonList(
|
||||
new NamedWriteableRegistry.Entry(BaseNamedWriteable.class, TestNamedWriteable.NAME, TestNamedWriteable::new)));
|
||||
TestNamedWriteable namedWriteableIn = new TestNamedWriteable(randomAsciiOfLengthBetween(1, 10),
|
||||
randomAsciiOfLengthBetween(1, 10));
|
||||
out.writeNamedWriteable(namedWriteableIn);
|
||||
byte[] bytes = BytesReference.toBytes(out.bytes());
|
||||
|
||||
try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(bytes), namedWriteableRegistry)) {
|
||||
assertEquals(in.available(), bytes.length);
|
||||
BaseNamedWriteable namedWriteableOut = in.readNamedWriteable(BaseNamedWriteable.class);
|
||||
assertEquals(namedWriteableIn, namedWriteableOut);
|
||||
assertEquals(0, in.available());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testNamedWriteableList() throws IOException {
|
||||
|
@ -367,59 +363,61 @@ public class BytesStreamsTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testNamedWriteableNotSupportedWithoutWrapping() throws IOException {
|
||||
BytesStreamOutput out = new BytesStreamOutput();
|
||||
TestNamedWriteable testNamedWriteable = new TestNamedWriteable("test1", "test2");
|
||||
out.writeNamedWriteable(testNamedWriteable);
|
||||
StreamInput in = StreamInput.wrap(BytesReference.toBytes(out.bytes()));
|
||||
try {
|
||||
in.readNamedWriteable(BaseNamedWriteable.class);
|
||||
fail("Expected UnsupportedOperationException");
|
||||
} catch (UnsupportedOperationException e) {
|
||||
try (BytesStreamOutput out = new BytesStreamOutput()) {
|
||||
TestNamedWriteable testNamedWriteable = new TestNamedWriteable("test1", "test2");
|
||||
out.writeNamedWriteable(testNamedWriteable);
|
||||
StreamInput in = StreamInput.wrap(BytesReference.toBytes(out.bytes()));
|
||||
Exception e = expectThrows(UnsupportedOperationException.class, () -> in.readNamedWriteable(BaseNamedWriteable.class));
|
||||
assertThat(e.getMessage(), is("can't read named writeable from StreamInput"));
|
||||
}
|
||||
}
|
||||
|
||||
public void testNamedWriteableReaderReturnsNull() throws IOException {
|
||||
BytesStreamOutput out = new BytesStreamOutput();
|
||||
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.singletonList(
|
||||
new NamedWriteableRegistry.Entry(BaseNamedWriteable.class, TestNamedWriteable.NAME, (StreamInput in) -> null)
|
||||
));
|
||||
TestNamedWriteable namedWriteableIn = new TestNamedWriteable(randomAsciiOfLengthBetween(1, 10), randomAsciiOfLengthBetween(1, 10));
|
||||
out.writeNamedWriteable(namedWriteableIn);
|
||||
byte[] bytes = BytesReference.toBytes(out.bytes());
|
||||
StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(bytes), namedWriteableRegistry);
|
||||
assertEquals(in.available(), bytes.length);
|
||||
IOException e = expectThrows(IOException.class, () -> in.readNamedWriteable(BaseNamedWriteable.class));
|
||||
assertThat(e.getMessage(), endsWith("] returned null which is not allowed and probably means it screwed up the stream."));
|
||||
try (BytesStreamOutput out = new BytesStreamOutput()) {
|
||||
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.singletonList(
|
||||
new NamedWriteableRegistry.Entry(BaseNamedWriteable.class, TestNamedWriteable.NAME, (StreamInput in) -> null)));
|
||||
TestNamedWriteable namedWriteableIn = new TestNamedWriteable(randomAsciiOfLengthBetween(1, 10),
|
||||
randomAsciiOfLengthBetween(1, 10));
|
||||
out.writeNamedWriteable(namedWriteableIn);
|
||||
byte[] bytes = BytesReference.toBytes(out.bytes());
|
||||
try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(bytes), namedWriteableRegistry)) {
|
||||
assertEquals(in.available(), bytes.length);
|
||||
IOException e = expectThrows(IOException.class, () -> in.readNamedWriteable(BaseNamedWriteable.class));
|
||||
assertThat(e.getMessage(), endsWith("] returned null which is not allowed and probably means it screwed up the stream."));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testOptionalWriteableReaderReturnsNull() throws IOException {
|
||||
BytesStreamOutput out = new BytesStreamOutput();
|
||||
out.writeOptionalWriteable(new TestNamedWriteable(randomAsciiOfLengthBetween(1, 10), randomAsciiOfLengthBetween(1, 10)));
|
||||
StreamInput in = StreamInput.wrap(BytesReference.toBytes(out.bytes()));
|
||||
IOException e = expectThrows(IOException.class, () -> in.readOptionalWriteable((StreamInput ignored) -> null));
|
||||
assertThat(e.getMessage(), endsWith("] returned null which is not allowed and probably means it screwed up the stream."));
|
||||
try (BytesStreamOutput out = new BytesStreamOutput()) {
|
||||
out.writeOptionalWriteable(new TestNamedWriteable(randomAsciiOfLengthBetween(1, 10), randomAsciiOfLengthBetween(1, 10)));
|
||||
StreamInput in = StreamInput.wrap(BytesReference.toBytes(out.bytes()));
|
||||
IOException e = expectThrows(IOException.class, () -> in.readOptionalWriteable((StreamInput ignored) -> null));
|
||||
assertThat(e.getMessage(), endsWith("] returned null which is not allowed and probably means it screwed up the stream."));
|
||||
}
|
||||
}
|
||||
|
||||
public void testWriteableReaderReturnsWrongName() throws IOException {
|
||||
BytesStreamOutput out = new BytesStreamOutput();
|
||||
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.singletonList(
|
||||
new NamedWriteableRegistry.Entry(BaseNamedWriteable.class, TestNamedWriteable.NAME, (StreamInput in) ->
|
||||
new TestNamedWriteable(in) {
|
||||
@Override
|
||||
public String getWriteableName() {
|
||||
return "intentionally-broken";
|
||||
}
|
||||
})
|
||||
));
|
||||
TestNamedWriteable namedWriteableIn = new TestNamedWriteable(randomAsciiOfLengthBetween(1, 10), randomAsciiOfLengthBetween(1, 10));
|
||||
out.writeNamedWriteable(namedWriteableIn);
|
||||
byte[] bytes = BytesReference.toBytes(out.bytes());
|
||||
StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(bytes), namedWriteableRegistry);
|
||||
assertEquals(in.available(), bytes.length);
|
||||
AssertionError e = expectThrows(AssertionError.class, () -> in.readNamedWriteable(BaseNamedWriteable.class));
|
||||
assertThat(e.getMessage(),
|
||||
endsWith(" claims to have a different name [intentionally-broken] than it was read from [test-named-writeable]."));
|
||||
try (BytesStreamOutput out = new BytesStreamOutput()) {
|
||||
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(
|
||||
Collections.singletonList(new NamedWriteableRegistry.Entry(BaseNamedWriteable.class, TestNamedWriteable.NAME,
|
||||
(StreamInput in) -> new TestNamedWriteable(in) {
|
||||
@Override
|
||||
public String getWriteableName() {
|
||||
return "intentionally-broken";
|
||||
}
|
||||
})));
|
||||
TestNamedWriteable namedWriteableIn = new TestNamedWriteable(randomAsciiOfLengthBetween(1, 10),
|
||||
randomAsciiOfLengthBetween(1, 10));
|
||||
out.writeNamedWriteable(namedWriteableIn);
|
||||
byte[] bytes = BytesReference.toBytes(out.bytes());
|
||||
try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(bytes), namedWriteableRegistry)) {
|
||||
assertEquals(in.available(), bytes.length);
|
||||
AssertionError e = expectThrows(AssertionError.class, () -> in.readNamedWriteable(BaseNamedWriteable.class));
|
||||
assertThat(e.getMessage(),
|
||||
endsWith(" claims to have a different name [intentionally-broken] than it was read from [test-named-writeable]."));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testWriteStreamableList() throws IOException {
|
||||
|
@ -551,32 +549,13 @@ public class BytesStreamsTests extends ESTestCase {
|
|||
assertEquals(-1, out.position());
|
||||
|
||||
// writing a single byte must fail
|
||||
try {
|
||||
out.writeByte((byte)0);
|
||||
fail("expected IllegalStateException: stream closed");
|
||||
}
|
||||
catch (IllegalStateException iex1) {
|
||||
// expected
|
||||
}
|
||||
expectThrows(IllegalArgumentException.class, () -> out.writeByte((byte)0));
|
||||
|
||||
// writing in bulk must fail
|
||||
try {
|
||||
out.writeBytes(new byte[0], 0, 0);
|
||||
fail("expected IllegalStateException: stream closed");
|
||||
}
|
||||
catch (IllegalStateException iex1) {
|
||||
// expected
|
||||
}
|
||||
expectThrows(IllegalArgumentException.class, () -> out.writeBytes(new byte[0], 0, 0));
|
||||
|
||||
// toByteArray() must fail
|
||||
try {
|
||||
BytesReference.toBytes(out.bytes());
|
||||
fail("expected IllegalStateException: stream closed");
|
||||
}
|
||||
catch (IllegalStateException iex1) {
|
||||
// expected
|
||||
}
|
||||
|
||||
expectThrows(IllegalArgumentException.class, () -> BytesReference.toBytes(out.bytes()));
|
||||
}
|
||||
|
||||
// create & fill byte[] with randomized data
|
||||
|
@ -587,16 +566,15 @@ public class BytesStreamsTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testReadWriteGeoPoint() throws IOException {
|
||||
{
|
||||
BytesStreamOutput out = new BytesStreamOutput();
|
||||
try (BytesStreamOutput out = new BytesStreamOutput()) {;
|
||||
GeoPoint geoPoint = new GeoPoint(randomDouble(), randomDouble());
|
||||
out.writeGenericValue(geoPoint);
|
||||
StreamInput wrap = out.bytes().streamInput();
|
||||
GeoPoint point = (GeoPoint) wrap.readGenericValue();
|
||||
assertEquals(point, geoPoint);
|
||||
}
|
||||
{
|
||||
BytesStreamOutput out = new BytesStreamOutput();
|
||||
|
||||
try (BytesStreamOutput out = new BytesStreamOutput()) {
|
||||
GeoPoint geoPoint = new GeoPoint(randomDouble(), randomDouble());
|
||||
out.writeGeoPoint(geoPoint);
|
||||
StreamInput wrap = out.bytes().streamInput();
|
||||
|
@ -640,12 +618,12 @@ public class BytesStreamsTests extends ESTestCase {
|
|||
|
||||
assertNotEquals(mapKeys, reverseMapKeys);
|
||||
|
||||
BytesStreamOutput output = new BytesStreamOutput();
|
||||
BytesStreamOutput reverseMapOutput = new BytesStreamOutput();
|
||||
output.writeMapWithConsistentOrder(map);
|
||||
reverseMapOutput.writeMapWithConsistentOrder(reverseMap);
|
||||
try (BytesStreamOutput output = new BytesStreamOutput(); BytesStreamOutput reverseMapOutput = new BytesStreamOutput()) {
|
||||
output.writeMapWithConsistentOrder(map);
|
||||
reverseMapOutput.writeMapWithConsistentOrder(reverseMap);
|
||||
|
||||
assertEquals(output.bytes(), reverseMapOutput.bytes());
|
||||
assertEquals(output.bytes(), reverseMapOutput.bytes());
|
||||
}
|
||||
}
|
||||
|
||||
public void testReadMapByUsingWriteMapWithConsistentOrder() throws IOException {
|
||||
|
@ -653,18 +631,20 @@ public class BytesStreamsTests extends ESTestCase {
|
|||
randomMap(new HashMap<>(), randomIntBetween(2, 20),
|
||||
() -> randomAsciiOfLength(5),
|
||||
() -> randomAsciiOfLength(5));
|
||||
BytesStreamOutput streamOut = new BytesStreamOutput();
|
||||
streamOut.writeMapWithConsistentOrder(streamOutMap);
|
||||
StreamInput in = StreamInput.wrap(BytesReference.toBytes(streamOut.bytes()));
|
||||
Map<String, Object> streamInMap = in.readMap();
|
||||
assertEquals(streamOutMap, streamInMap);
|
||||
try (BytesStreamOutput streamOut = new BytesStreamOutput()) {
|
||||
streamOut.writeMapWithConsistentOrder(streamOutMap);
|
||||
StreamInput in = StreamInput.wrap(BytesReference.toBytes(streamOut.bytes()));
|
||||
Map<String, Object> streamInMap = in.readMap();
|
||||
assertEquals(streamOutMap, streamInMap);
|
||||
}
|
||||
}
|
||||
|
||||
public void testWriteMapWithConsistentOrderWithLinkedHashMapShouldThrowAssertError() throws IOException {
|
||||
BytesStreamOutput output = new BytesStreamOutput();
|
||||
Map<String, Object> map = new LinkedHashMap<>();
|
||||
Throwable e = expectThrows(AssertionError.class, () -> output.writeMapWithConsistentOrder(map));
|
||||
assertEquals(AssertionError.class, e.getClass());
|
||||
try (BytesStreamOutput output = new BytesStreamOutput()) {
|
||||
Map<String, Object> map = new LinkedHashMap<>();
|
||||
Throwable e = expectThrows(AssertionError.class, () -> output.writeMapWithConsistentOrder(map));
|
||||
assertEquals(AssertionError.class, e.getClass());
|
||||
}
|
||||
}
|
||||
|
||||
private static <K, V> Map<K, V> randomMap(Map<K, V> map, int size, Supplier<K> keyGenerator, Supplier<V> valueGenerator) {
|
||||
|
|
|
@ -123,48 +123,30 @@ public class ByteSizeValueTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testFailOnMissingUnits() {
|
||||
try {
|
||||
ByteSizeValue.parseBytesSizeValue("23", "test");
|
||||
fail("Expected ElasticsearchParseException");
|
||||
} catch (ElasticsearchParseException e) {
|
||||
assertThat(e.getMessage(), containsString("failed to parse setting [test]"));
|
||||
}
|
||||
Exception e = expectThrows(ElasticsearchParseException.class, () -> ByteSizeValue.parseBytesSizeValue("23", "test"));
|
||||
assertThat(e.getMessage(), containsString("failed to parse setting [test]"));
|
||||
}
|
||||
|
||||
public void testFailOnUnknownUnits() {
|
||||
try {
|
||||
ByteSizeValue.parseBytesSizeValue("23jw", "test");
|
||||
fail("Expected ElasticsearchParseException");
|
||||
} catch (ElasticsearchParseException e) {
|
||||
assertThat(e.getMessage(), containsString("failed to parse setting [test]"));
|
||||
}
|
||||
Exception e = expectThrows(ElasticsearchParseException.class, () -> ByteSizeValue.parseBytesSizeValue("23jw", "test"));
|
||||
assertThat(e.getMessage(), containsString("failed to parse setting [test]"));
|
||||
}
|
||||
|
||||
public void testFailOnEmptyParsing() {
|
||||
try {
|
||||
assertThat(ByteSizeValue.parseBytesSizeValue("", "emptyParsing").toString(), is("23kb"));
|
||||
fail("Expected ElasticsearchParseException");
|
||||
} catch (ElasticsearchParseException e) {
|
||||
assertThat(e.getMessage(), containsString("failed to parse setting [emptyParsing]"));
|
||||
}
|
||||
Exception e = expectThrows(ElasticsearchParseException.class,
|
||||
() -> assertThat(ByteSizeValue.parseBytesSizeValue("", "emptyParsing").toString(), is("23kb")));
|
||||
assertThat(e.getMessage(), containsString("failed to parse setting [emptyParsing]"));
|
||||
}
|
||||
|
||||
public void testFailOnEmptyNumberParsing() {
|
||||
try {
|
||||
assertThat(ByteSizeValue.parseBytesSizeValue("g", "emptyNumberParsing").toString(), is("23b"));
|
||||
fail("Expected ElasticsearchParseException");
|
||||
} catch (ElasticsearchParseException e) {
|
||||
assertThat(e.getMessage(), containsString("failed to parse [g]"));
|
||||
}
|
||||
Exception e = expectThrows(ElasticsearchParseException.class,
|
||||
() -> assertThat(ByteSizeValue.parseBytesSizeValue("g", "emptyNumberParsing").toString(), is("23b")));
|
||||
assertThat(e.getMessage(), containsString("failed to parse [g]"));
|
||||
}
|
||||
|
||||
public void testNoDotsAllowed() {
|
||||
try {
|
||||
ByteSizeValue.parseBytesSizeValue("42b.", null, "test");
|
||||
fail("Expected ElasticsearchParseException");
|
||||
} catch (ElasticsearchParseException e) {
|
||||
assertThat(e.getMessage(), containsString("failed to parse setting [test]"));
|
||||
}
|
||||
Exception e = expectThrows(ElasticsearchParseException.class, () -> ByteSizeValue.parseBytesSizeValue("42b.", null, "test"));
|
||||
assertThat(e.getMessage(), containsString("failed to parse setting [test]"));
|
||||
}
|
||||
|
||||
public void testCompareEquality() {
|
||||
|
|
|
@ -128,6 +128,10 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
|||
|
||||
private ClusterDiscoveryConfiguration discoveryConfig;
|
||||
|
||||
@Override
|
||||
protected boolean addMockZenPings() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Settings nodeSettings(int nodeOrdinal) {
|
||||
|
|
|
@ -24,9 +24,7 @@ import org.elasticsearch.Version;
|
|||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
|
||||
import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateListener;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
|
@ -38,7 +36,6 @@ import org.elasticsearch.common.xcontent.ToXContent;
|
|||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.discovery.DiscoveryModule;
|
||||
import org.elasticsearch.discovery.DiscoveryStats;
|
||||
import org.elasticsearch.discovery.zen.fd.FaultDetection;
|
||||
import org.elasticsearch.discovery.zen.membership.MembershipAction;
|
||||
|
@ -46,7 +43,6 @@ import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction;
|
|||
import org.elasticsearch.node.Node;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.TestCustomMetaData;
|
||||
import org.elasticsearch.test.VersionUtils;
|
||||
import org.elasticsearch.test.junit.annotations.TestLogging;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.BytesTransportRequest;
|
||||
|
@ -55,7 +51,6 @@ import org.elasticsearch.transport.TransportException;
|
|||
import org.elasticsearch.transport.TransportResponse;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.hamcrest.Matchers;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.UnknownHostException;
|
||||
|
@ -78,32 +73,10 @@ import static org.hamcrest.Matchers.notNullValue;
|
|||
@TestLogging("_root:DEBUG")
|
||||
public class ZenDiscoveryIT extends ESIntegTestCase {
|
||||
|
||||
private Version previousMajorVersion;
|
||||
|
||||
@Override
|
||||
protected Settings nodeSettings(int nodeOrdinal) {
|
||||
return Settings.builder().put(super.nodeSettings(nodeOrdinal))
|
||||
.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "zen").build();
|
||||
}
|
||||
|
||||
@Before
|
||||
public void computePrevMajorVersion() {
|
||||
Version previousMajor;
|
||||
// find a GA build whose major version is <N
|
||||
do {
|
||||
previousMajor = VersionUtils.randomVersion(random());
|
||||
} while (previousMajor.onOrAfter(Version.CURRENT.minimumCompatibilityVersion())
|
||||
|| previousMajor.isAlpha()
|
||||
|| previousMajor.isBeta()
|
||||
|| previousMajor.isRC());
|
||||
previousMajorVersion = previousMajor;
|
||||
}
|
||||
|
||||
public void testNoShardRelocationsOccurWhenElectedMasterNodeFails() throws Exception {
|
||||
Settings defaultSettings = Settings.builder()
|
||||
.put(FaultDetection.PING_TIMEOUT_SETTING.getKey(), "1s")
|
||||
.put(FaultDetection.PING_RETRIES_SETTING.getKey(), "1")
|
||||
.put("discovery.type", "zen")
|
||||
.build();
|
||||
|
||||
Settings masterNodeSettings = Settings.builder()
|
||||
|
@ -149,7 +122,6 @@ public class ZenDiscoveryIT extends ESIntegTestCase {
|
|||
Settings defaultSettings = Settings.builder()
|
||||
.put(FaultDetection.PING_TIMEOUT_SETTING.getKey(), "1s")
|
||||
.put(FaultDetection.PING_RETRIES_SETTING.getKey(), "1")
|
||||
.put("discovery.type", "zen")
|
||||
.build();
|
||||
|
||||
Settings masterNodeSettings = Settings.builder()
|
||||
|
@ -167,16 +139,13 @@ public class ZenDiscoveryIT extends ESIntegTestCase {
|
|||
ClusterService clusterService = internalCluster().getInstance(ClusterService.class, master);
|
||||
final ArrayList<ClusterState> statesFound = new ArrayList<>();
|
||||
final CountDownLatch nodesStopped = new CountDownLatch(1);
|
||||
clusterService.add(new ClusterStateListener() {
|
||||
@Override
|
||||
public void clusterChanged(ClusterChangedEvent event) {
|
||||
statesFound.add(event.state());
|
||||
try {
|
||||
// block until both nodes have stopped to accumulate node failures
|
||||
nodesStopped.await();
|
||||
} catch (InterruptedException e) {
|
||||
//meh
|
||||
}
|
||||
clusterService.add(event -> {
|
||||
statesFound.add(event.state());
|
||||
try {
|
||||
// block until both nodes have stopped to accumulate node failures
|
||||
nodesStopped.await();
|
||||
} catch (InterruptedException e) {
|
||||
//meh
|
||||
}
|
||||
});
|
||||
|
||||
|
@ -189,10 +158,7 @@ public class ZenDiscoveryIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
public void testNodeRejectsClusterStateWithWrongMasterNode() throws Exception {
|
||||
Settings settings = Settings.builder()
|
||||
.put("discovery.type", "zen")
|
||||
.build();
|
||||
List<String> nodeNames = internalCluster().startNodesAsync(2, settings).get();
|
||||
List<String> nodeNames = internalCluster().startNodesAsync(2).get();
|
||||
client().admin().cluster().prepareHealth().setWaitForNodes("2").get();
|
||||
|
||||
List<String> nonMasterNodes = new ArrayList<>(nodeNames);
|
||||
|
@ -303,10 +269,7 @@ public class ZenDiscoveryIT extends ESIntegTestCase {
|
|||
" }\n" +
|
||||
"}";
|
||||
|
||||
Settings nodeSettings = Settings.builder()
|
||||
.put("discovery.type", "zen") // <-- To override the local setting if set externally
|
||||
.build();
|
||||
internalCluster().startNode(nodeSettings);
|
||||
internalCluster().startNode();
|
||||
|
||||
logger.info("--> request node discovery stats");
|
||||
NodesStatsResponse statsResponse = client().admin().cluster().prepareNodesStats().clear().setDiscovery(true).get();
|
||||
|
|
|
@ -87,7 +87,7 @@ public class ZenDiscoveryUnitTests extends ESTestCase {
|
|||
|
||||
currentNodes = DiscoveryNodes.builder();
|
||||
currentNodes.masterNodeId("b").add(new DiscoveryNode("b", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT));
|
||||
;
|
||||
|
||||
// version isn't taken into account, so randomize it to ensure this.
|
||||
if (randomBoolean()) {
|
||||
currentState.version(2);
|
||||
|
|
|
@ -1,160 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index;
|
||||
|
||||
import org.elasticsearch.action.index.IndexAction;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.discovery.zen.fd.FaultDetection;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.transport.MockTransportService;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
|
||||
import static java.util.Collections.singleton;
|
||||
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
|
||||
import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING;
|
||||
import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
|
||||
import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
/**
|
||||
* Test failure when index replication actions fail mid-flight
|
||||
*/
|
||||
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, transportClientRatio = 0)
|
||||
public class TransportIndexFailuresIT extends ESIntegTestCase {
|
||||
|
||||
private static final Settings nodeSettings = Settings.builder()
|
||||
.put("discovery.type", "zen") // <-- To override the local setting if set externally
|
||||
.put(FaultDetection.PING_TIMEOUT_SETTING.getKey(), "1s") // <-- for hitting simulated network failures quickly
|
||||
.put(FaultDetection.PING_RETRIES_SETTING.getKey(), "1") // <-- for hitting simulated network failures quickly
|
||||
.put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") // <-- for hitting simulated network failures quickly
|
||||
.put("discovery.zen.minimum_master_nodes", 1)
|
||||
.build();
|
||||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
return Arrays.asList(MockTransportService.TestPlugin.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int numberOfShards() {
|
||||
return 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int numberOfReplicas() {
|
||||
return 1;
|
||||
}
|
||||
|
||||
public void testNetworkPartitionDuringReplicaIndexOp() throws Exception {
|
||||
final String INDEX = "testidx";
|
||||
|
||||
List<String> nodes = internalCluster().startNodesAsync(2, nodeSettings).get();
|
||||
|
||||
// Create index test with 1 shard, 1 replica and ensure it is green
|
||||
createIndex(INDEX);
|
||||
ensureGreen(INDEX);
|
||||
|
||||
// Disable allocation so the replica cannot be reallocated when it fails
|
||||
Settings s = Settings.builder().put("cluster.routing.allocation.enable", "none").build();
|
||||
client().admin().cluster().prepareUpdateSettings().setTransientSettings(s).get();
|
||||
|
||||
// Determine which node holds the primary shard
|
||||
ClusterState state = getNodeClusterState(nodes.get(0));
|
||||
IndexShardRoutingTable shard = state.getRoutingTable().index(INDEX).shard(0);
|
||||
String primaryNode;
|
||||
String replicaNode;
|
||||
if (shard.getShards().get(0).primary()) {
|
||||
primaryNode = nodes.get(0);
|
||||
replicaNode = nodes.get(1);
|
||||
} else {
|
||||
primaryNode = nodes.get(1);
|
||||
replicaNode = nodes.get(0);
|
||||
}
|
||||
logger.info("--> primary shard is on {}", primaryNode);
|
||||
|
||||
// Index a document to make sure everything works well
|
||||
IndexResponse resp = internalCluster().client(primaryNode).prepareIndex(INDEX, "doc").setSource("foo", "bar").get();
|
||||
assertThat("document exists on primary node",
|
||||
internalCluster().client(primaryNode).prepareGet(INDEX, "doc", resp.getId()).setPreference("_only_local").get().isExists(),
|
||||
equalTo(true));
|
||||
assertThat("document exists on replica node",
|
||||
internalCluster().client(replicaNode).prepareGet(INDEX, "doc", resp.getId()).setPreference("_only_local").get().isExists(),
|
||||
equalTo(true));
|
||||
|
||||
// Disrupt the network so indexing requests fail to replicate
|
||||
logger.info("--> preventing index/replica operations");
|
||||
TransportService mockTransportService = internalCluster().getInstance(TransportService.class, primaryNode);
|
||||
((MockTransportService) mockTransportService).addFailToSendNoConnectRule(
|
||||
internalCluster().getInstance(TransportService.class, replicaNode),
|
||||
singleton(IndexAction.NAME + "[r]")
|
||||
);
|
||||
mockTransportService = internalCluster().getInstance(TransportService.class, replicaNode);
|
||||
((MockTransportService) mockTransportService).addFailToSendNoConnectRule(
|
||||
internalCluster().getInstance(TransportService.class, primaryNode),
|
||||
singleton(IndexAction.NAME + "[r]")
|
||||
);
|
||||
|
||||
logger.info("--> indexing into primary");
|
||||
// the replica shard should now be marked as failed because the replication operation will fail
|
||||
resp = internalCluster().client(primaryNode).prepareIndex(INDEX, "doc").setSource("foo", "baz").get();
|
||||
// wait until the cluster reaches an exact yellow state, meaning replica has failed
|
||||
assertBusy(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
assertThat(client().admin().cluster().prepareHealth().get().getStatus(), equalTo(ClusterHealthStatus.YELLOW));
|
||||
}
|
||||
});
|
||||
assertThat("document should still be indexed and available",
|
||||
client().prepareGet(INDEX, "doc", resp.getId()).get().isExists(), equalTo(true));
|
||||
|
||||
state = getNodeClusterState(randomFrom(nodes.toArray(Strings.EMPTY_ARRAY)));
|
||||
RoutingNodes rn = state.getRoutingNodes();
|
||||
logger.info("--> counts: total: {}, unassigned: {}, initializing: {}, relocating: {}, started: {}",
|
||||
rn.shards(input -> true).size(),
|
||||
rn.shardsWithState(UNASSIGNED).size(),
|
||||
rn.shardsWithState(INITIALIZING).size(),
|
||||
rn.shardsWithState(RELOCATING).size(),
|
||||
rn.shardsWithState(STARTED).size());
|
||||
logger.info("--> unassigned: {}, initializing: {}, relocating: {}, started: {}",
|
||||
rn.shardsWithState(UNASSIGNED),
|
||||
rn.shardsWithState(INITIALIZING),
|
||||
rn.shardsWithState(RELOCATING),
|
||||
rn.shardsWithState(STARTED));
|
||||
|
||||
assertThat("only a single shard is now active (replica should be failed and not reallocated)",
|
||||
rn.shardsWithState(STARTED).size(), equalTo(1));
|
||||
}
|
||||
|
||||
private ClusterState getNodeClusterState(String node) {
|
||||
return internalCluster().client(node).admin().cluster().prepareState().setLocal(true).get().getState();
|
||||
}
|
||||
}
|
|
@ -41,7 +41,6 @@ import org.hamcrest.CoreMatchers;
|
|||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.lang.NumberFormatException;
|
||||
|
||||
import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom;
|
||||
import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE;
|
||||
|
@ -256,85 +255,60 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
|
|||
.endObject()
|
||||
.bytes());
|
||||
|
||||
try {
|
||||
expectThrows(MapperParsingException.class, () ->
|
||||
defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("point").field("lat", -91).field("lon", 1.3).endObject()
|
||||
.endObject()
|
||||
.bytes());
|
||||
fail();
|
||||
} catch (MapperParsingException e) {
|
||||
.bytes()));
|
||||
|
||||
}
|
||||
|
||||
try {
|
||||
expectThrows(MapperParsingException.class, () ->
|
||||
defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("point").field("lat", 91).field("lon", 1.3).endObject()
|
||||
.endObject()
|
||||
.bytes());
|
||||
fail();
|
||||
} catch (MapperParsingException e) {
|
||||
.bytes()));
|
||||
|
||||
}
|
||||
|
||||
try {
|
||||
expectThrows(MapperParsingException.class, () ->
|
||||
defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("point").field("lat", 1.2).field("lon", -181).endObject()
|
||||
.endObject()
|
||||
.bytes());
|
||||
fail();
|
||||
} catch (MapperParsingException e) {
|
||||
.bytes()));
|
||||
|
||||
}
|
||||
|
||||
try {
|
||||
expectThrows(MapperParsingException.class, () ->
|
||||
defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("point").field("lat", 1.2).field("lon", 181).endObject()
|
||||
.endObject()
|
||||
.bytes());
|
||||
fail();
|
||||
} catch (MapperParsingException e) {
|
||||
.bytes()));
|
||||
|
||||
}
|
||||
|
||||
try {
|
||||
MapperParsingException e = expectThrows(MapperParsingException.class, () ->
|
||||
defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("point").field("lat", "-").field("lon", 1.3).endObject()
|
||||
.endObject()
|
||||
.bytes());
|
||||
fail();
|
||||
} catch (MapperParsingException e) {
|
||||
assertThat(e.getRootCause(), instanceOf(NumberFormatException.class));
|
||||
assertThat(e.getRootCause().toString(), containsString("java.lang.NumberFormatException: For input string: \"-\""));
|
||||
}
|
||||
.bytes()));
|
||||
assertThat(e.getRootCause(), instanceOf(NumberFormatException.class));
|
||||
assertThat(e.getRootCause().toString(), containsString("java.lang.NumberFormatException: For input string: \"-\""));
|
||||
|
||||
try {
|
||||
e = expectThrows(MapperParsingException.class, () ->
|
||||
defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("point").field("lat", 1.2).field("lon", "-").endObject()
|
||||
.endObject()
|
||||
.bytes());
|
||||
fail();
|
||||
} catch (MapperParsingException e) {
|
||||
assertThat(e.getRootCause(), instanceOf(NumberFormatException.class));
|
||||
assertThat(e.getRootCause().toString(), containsString("java.lang.NumberFormatException: For input string: \"-\""));
|
||||
}
|
||||
.bytes()));
|
||||
assertThat(e.getRootCause(), instanceOf(NumberFormatException.class));
|
||||
assertThat(e.getRootCause().toString(), containsString("java.lang.NumberFormatException: For input string: \"-\""));
|
||||
|
||||
try {
|
||||
e = expectThrows(MapperParsingException.class, () ->
|
||||
defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("point").field("lat", "-").field("lon", "-").endObject()
|
||||
.endObject()
|
||||
.bytes());
|
||||
fail();
|
||||
} catch (MapperParsingException e) {
|
||||
assertThat(e.getRootCause(), instanceOf(NumberFormatException.class));
|
||||
assertThat(e.getRootCause().toString(), containsString("java.lang.NumberFormatException: For input string: \"-\""));
|
||||
}
|
||||
.bytes()));
|
||||
assertThat(e.getRootCause(), instanceOf(NumberFormatException.class));
|
||||
assertThat(e.getRootCause().toString(), containsString("java.lang.NumberFormatException: For input string: \"-\""));
|
||||
}
|
||||
|
||||
public void testNoValidateLegacyLatLonValues() throws Exception {
|
||||
|
@ -743,92 +717,84 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
|
|||
}
|
||||
|
||||
if (version.onOrAfter(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
|
||||
try {
|
||||
String normalizeMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties").startObject("point").field("type", "geo_point").field("geohash", true).endObject().endObject()
|
||||
.endObject().endObject().string();
|
||||
parser.parse("type", new CompressedXContent(normalizeMapping));
|
||||
} catch (MapperParsingException e) {
|
||||
assertEquals(e.getMessage(), "Mapping definition for [point] has unsupported parameters: [geohash : true]");
|
||||
}
|
||||
String normalizeMapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
|
||||
.startObject("point").field("type", "geo_point").field("geohash", true).endObject().endObject().endObject().endObject()
|
||||
.string();
|
||||
Exception e = expectThrows(MapperParsingException.class, () ->
|
||||
parser.parse("type", new CompressedXContent(normalizeMapping)));
|
||||
assertEquals(e.getMessage(), "Mapping definition for [point] has unsupported parameters: [geohash : true]");
|
||||
}
|
||||
|
||||
try {
|
||||
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties").startObject("point").field("type", "geo_point");
|
||||
{
|
||||
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
|
||||
.startObject("point").field("type", "geo_point");
|
||||
if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
|
||||
xContentBuilder = xContentBuilder.field("lat_lon", true).field("geohash", true);
|
||||
}
|
||||
String validateMapping = xContentBuilder.field("validate", true).endObject().endObject().endObject().endObject().string();
|
||||
parser.parse("type", new CompressedXContent(validateMapping));
|
||||
fail("process completed successfully when " + MapperParsingException.class.getName() + " expected");
|
||||
} catch (MapperParsingException e) {
|
||||
Exception e = expectThrows(MapperParsingException.class, () ->
|
||||
parser.parse("type", new CompressedXContent(validateMapping)));
|
||||
assertEquals(e.getMessage(), "Mapping definition for [point] has unsupported parameters: [validate : true]");
|
||||
}
|
||||
|
||||
try {
|
||||
{
|
||||
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties").startObject("point").field("type", "geo_point");
|
||||
if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
|
||||
xContentBuilder = xContentBuilder.field("lat_lon", true).field("geohash", true);
|
||||
}
|
||||
String validateMapping = xContentBuilder.field("validate_lat", true).endObject().endObject().endObject().endObject().string();
|
||||
parser.parse("type", new CompressedXContent(validateMapping));
|
||||
fail("process completed successfully when " + MapperParsingException.class.getName() + " expected");
|
||||
} catch (MapperParsingException e) {
|
||||
Exception e = expectThrows(MapperParsingException.class, () ->
|
||||
parser.parse("type", new CompressedXContent(validateMapping)));
|
||||
assertEquals(e.getMessage(), "Mapping definition for [point] has unsupported parameters: [validate_lat : true]");
|
||||
}
|
||||
|
||||
try {
|
||||
{
|
||||
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties").startObject("point").field("type", "geo_point");
|
||||
if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
|
||||
xContentBuilder = xContentBuilder.field("lat_lon", true).field("geohash", true);
|
||||
}
|
||||
String validateMapping = xContentBuilder.field("validate_lon", true).endObject().endObject().endObject().endObject().string();
|
||||
parser.parse("type", new CompressedXContent(validateMapping));
|
||||
fail("process completed successfully when " + MapperParsingException.class.getName() + " expected");
|
||||
} catch (MapperParsingException e) {
|
||||
Exception e = expectThrows(MapperParsingException.class, () ->
|
||||
parser.parse("type", new CompressedXContent(validateMapping)));
|
||||
assertEquals(e.getMessage(), "Mapping definition for [point] has unsupported parameters: [validate_lon : true]");
|
||||
}
|
||||
|
||||
// test deprecated normalize
|
||||
try {
|
||||
{
|
||||
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties").startObject("point").field("type", "geo_point");
|
||||
if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
|
||||
xContentBuilder = xContentBuilder.field("lat_lon", true).field("geohash", true);
|
||||
}
|
||||
String normalizeMapping = xContentBuilder.field("normalize", true).endObject().endObject().endObject().endObject().string();
|
||||
parser.parse("type", new CompressedXContent(normalizeMapping));
|
||||
fail("process completed successfully when " + MapperParsingException.class.getName() + " expected");
|
||||
} catch (MapperParsingException e) {
|
||||
Exception e = expectThrows(MapperParsingException.class, () ->
|
||||
parser.parse("type", new CompressedXContent(normalizeMapping)));
|
||||
assertEquals(e.getMessage(), "Mapping definition for [point] has unsupported parameters: [normalize : true]");
|
||||
}
|
||||
|
||||
try {
|
||||
{
|
||||
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties").startObject("point").field("type", "geo_point");
|
||||
if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
|
||||
xContentBuilder = xContentBuilder.field("lat_lon", true).field("geohash", true);
|
||||
}
|
||||
String normalizeMapping = xContentBuilder.field("normalize_lat", true).endObject().endObject().endObject().endObject().string();
|
||||
parser.parse("type", new CompressedXContent(normalizeMapping));
|
||||
fail("process completed successfully when " + MapperParsingException.class.getName() + " expected");
|
||||
} catch (MapperParsingException e) {
|
||||
Exception e = expectThrows(MapperParsingException.class, () ->
|
||||
parser.parse("type", new CompressedXContent(normalizeMapping)));
|
||||
assertEquals(e.getMessage(), "Mapping definition for [point] has unsupported parameters: [normalize_lat : true]");
|
||||
}
|
||||
|
||||
try {
|
||||
{
|
||||
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties").startObject("point").field("type", "geo_point");
|
||||
if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
|
||||
xContentBuilder = xContentBuilder.field("lat_lon", true).field("geohash", true);
|
||||
}
|
||||
String normalizeMapping = xContentBuilder.field("normalize_lon", true).endObject().endObject().endObject().endObject().string();
|
||||
parser.parse("type", new CompressedXContent(normalizeMapping));
|
||||
fail("process completed successfully when " + MapperParsingException.class.getName() + " expected");
|
||||
} catch (MapperParsingException e) {
|
||||
Exception e = expectThrows(MapperParsingException.class, () ->
|
||||
parser.parse("type", new CompressedXContent(normalizeMapping)));
|
||||
assertEquals(e.getMessage(), "Mapping definition for [point] has unsupported parameters: [normalize_lon : true]");
|
||||
}
|
||||
}
|
||||
|
@ -844,20 +810,17 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
|
|||
String stage2Mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", false)
|
||||
.field("geohash", false).endObject().endObject().endObject().endObject().string();
|
||||
try {
|
||||
mapperService.merge("type", new CompressedXContent(stage2Mapping), MapperService.MergeReason.MAPPING_UPDATE, false);
|
||||
fail();
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), containsString("mapper [point] has different [lat_lon]"));
|
||||
assertThat(e.getMessage(), containsString("mapper [point] has different [geohash]"));
|
||||
assertThat(e.getMessage(), containsString("mapper [point] has different [geohash_precision]"));
|
||||
}
|
||||
Exception e = expectThrows(IllegalArgumentException.class, () ->
|
||||
mapperService.merge("type", new CompressedXContent(stage2Mapping), MapperService.MergeReason.MAPPING_UPDATE, false));
|
||||
assertThat(e.getMessage(), containsString("mapper [point] has different [lat_lon]"));
|
||||
assertThat(e.getMessage(), containsString("mapper [point] has different [geohash]"));
|
||||
assertThat(e.getMessage(), containsString("mapper [point] has different [geohash_precision]"));
|
||||
|
||||
// correct mapping and ensure no failures
|
||||
stage2Mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
String stage2MappingCorrect = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true)
|
||||
.field("geohash", true).endObject().endObject().endObject().endObject().string();
|
||||
mapperService.merge("type", new CompressedXContent(stage2Mapping), MapperService.MergeReason.MAPPING_UPDATE, false);
|
||||
mapperService.merge("type", new CompressedXContent(stage2MappingCorrect), MapperService.MergeReason.MAPPING_UPDATE, false);
|
||||
}
|
||||
|
||||
public void testLegacyGeoHashSearch() throws Exception {
|
||||
|
|
|
@ -22,13 +22,8 @@ package org.elasticsearch.index.mapper;
|
|||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.ObjectMapper;
|
||||
import org.elasticsearch.index.mapper.MapperService.MergeReason;
|
||||
import org.elasticsearch.index.mapper.ObjectMapper.Dynamic;
|
||||
import org.elasticsearch.index.mapper.ParsedDocument;
|
||||
import org.elasticsearch.index.mapper.TypeFieldMapper;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -366,22 +361,16 @@ public class NestedObjectMapperTests extends ESSingleNodeTestCase {
|
|||
createIndex("test1").mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE, false);
|
||||
|
||||
// explicitly setting limit to 0 prevents nested fields
|
||||
try {
|
||||
Exception e = expectThrows(IllegalArgumentException.class, () ->
|
||||
createIndex("test2", Settings.builder().put(MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING.getKey(), 0).build())
|
||||
.mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE, false);
|
||||
fail("Expected IllegalArgumentException");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), containsString("Limit of nested fields [0] in index [test2] has been exceeded"));
|
||||
}
|
||||
.mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE, false));
|
||||
assertThat(e.getMessage(), containsString("Limit of nested fields [0] in index [test2] has been exceeded"));
|
||||
|
||||
// setting limit to 1 with 2 nested fields fails
|
||||
try {
|
||||
e = expectThrows(IllegalArgumentException.class, () ->
|
||||
createIndex("test3", Settings.builder().put(MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING.getKey(), 1).build())
|
||||
.mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE, false);
|
||||
fail("Expected IllegalArgumentException");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), containsString("Limit of nested fields [1] in index [test3] has been exceeded"));
|
||||
}
|
||||
.mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE, false));
|
||||
assertThat(e.getMessage(), containsString("Limit of nested fields [1] in index [test3] has been exceeded"));
|
||||
|
||||
MapperService mapperService = createIndex("test4", Settings.builder().put(MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING.getKey(), 2)
|
||||
.build()).mapperService();
|
||||
|
@ -391,12 +380,9 @@ public class NestedObjectMapperTests extends ESSingleNodeTestCase {
|
|||
// adding new fields from different type is not ok
|
||||
String mapping2 = XContentFactory.jsonBuilder().startObject().startObject("type3").startObject("properties").startObject("nested3")
|
||||
.field("type", "nested").startObject("properties").endObject().endObject().endObject().endObject().endObject().string();
|
||||
try {
|
||||
mapperService.merge("type3", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, false);
|
||||
fail("Expected IllegalArgumentException");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), containsString("Limit of nested fields [2] in index [test4] has been exceeded"));
|
||||
}
|
||||
e = expectThrows(IllegalArgumentException.class, () ->
|
||||
mapperService.merge("type3", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, false));
|
||||
assertThat(e.getMessage(), containsString("Limit of nested fields [2] in index [test4] has been exceeded"));
|
||||
|
||||
// do not check nested fields limit if mapping is not updated
|
||||
createIndex("test5", Settings.builder().put(MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING.getKey(), 0).build())
|
||||
|
|
|
@ -30,20 +30,11 @@ import org.elasticsearch.cluster.metadata.MetaData;
|
|||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.joda.Joda;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.DocumentMapperParser;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.ParsedDocument;
|
||||
import org.elasticsearch.index.mapper.SourceToParse;
|
||||
import org.elasticsearch.index.mapper.TimestampFieldMapper;
|
||||
import org.elasticsearch.index.mapper.MapperService.MergeReason;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
|
@ -57,7 +48,6 @@ import java.util.Collection;
|
|||
import java.util.LinkedHashMap;
|
||||
|
||||
import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
|
||||
import static org.elasticsearch.test.VersionUtils.randomVersion;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
@ -212,12 +202,9 @@ public class TimestampFieldMapperTests extends ESSingleNodeTestCase {
|
|||
.field("default", (String) null)
|
||||
.endObject()
|
||||
.endObject().endObject();
|
||||
try {
|
||||
createIndex("test", BW_SETTINGS).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping.string()));
|
||||
fail("we should reject the mapping with a TimestampParsingException: default timestamp can not be set to null");
|
||||
} catch (TimestampParsingException e) {
|
||||
assertThat(e.getDetailedMessage(), containsString("default timestamp can not be set to null"));
|
||||
}
|
||||
TimestampParsingException e = expectThrows(TimestampParsingException.class, () -> createIndex("test", BW_SETTINGS).mapperService()
|
||||
.documentMapperParser().parse("type", new CompressedXContent(mapping.string())));
|
||||
assertThat(e.getDetailedMessage(), containsString("default timestamp can not be set to null"));
|
||||
}
|
||||
|
||||
// Issue 4718: was throwing a TimestampParsingException: failed to parse timestamp [null]
|
||||
|
@ -229,12 +216,9 @@ public class TimestampFieldMapperTests extends ESSingleNodeTestCase {
|
|||
.endObject()
|
||||
.endObject().endObject();
|
||||
|
||||
try {
|
||||
createIndex("test", BW_SETTINGS).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping.string()));
|
||||
fail("we should reject the mapping with a TimestampParsingException: default timestamp can not be set to null");
|
||||
} catch (TimestampParsingException e) {
|
||||
assertThat(e.getDetailedMessage(), containsString("default timestamp can not be set to null"));
|
||||
}
|
||||
TimestampParsingException e = expectThrows(TimestampParsingException.class, () -> createIndex("test", BW_SETTINGS).mapperService()
|
||||
.documentMapperParser().parse("type", new CompressedXContent(mapping.string())));
|
||||
assertThat(e.getDetailedMessage(), containsString("default timestamp can not be set to null"));
|
||||
}
|
||||
|
||||
// Issue 4718: was throwing a TimestampParsingException: failed to parse timestamp [null]
|
||||
|
@ -247,12 +231,9 @@ public class TimestampFieldMapperTests extends ESSingleNodeTestCase {
|
|||
.endObject()
|
||||
.endObject().endObject();
|
||||
|
||||
try {
|
||||
createIndex("test", BW_SETTINGS).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping.string()));
|
||||
fail("we should reject the mapping with a TimestampParsingException: default timestamp can not be set with ignore_missing set to false");
|
||||
} catch (TimestampParsingException e) {
|
||||
assertThat(e.getDetailedMessage(), containsString("default timestamp can not be set with ignore_missing set to false"));
|
||||
}
|
||||
TimestampParsingException e = expectThrows(TimestampParsingException.class, () -> createIndex("test", BW_SETTINGS).mapperService()
|
||||
.documentMapperParser().parse("type", new CompressedXContent(mapping.string())));
|
||||
assertThat(e.getDetailedMessage(), containsString("default timestamp can not be set with ignore_missing set to false"));
|
||||
}
|
||||
|
||||
// Issue 4718: was throwing a TimestampParsingException: failed to parse timestamp [null]
|
||||
|
|
|
@ -30,8 +30,8 @@ import org.elasticsearch.test.geo.RandomGeoGenerator;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.elasticsearch.common.geo.GeoHashUtils.stringEncode;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
public class GeoPointParsingTests extends ESTestCase {
|
||||
static double TOLERANCE = 1E-5;
|
||||
|
@ -112,13 +112,8 @@ public class GeoPointParsingTests extends ESTestCase {
|
|||
|
||||
XContentParser parser = JsonXContent.jsonXContent.createParser(content.bytes());
|
||||
parser.nextToken();
|
||||
|
||||
try {
|
||||
GeoUtils.parseGeoPoint(parser);
|
||||
fail("Expected ElasticsearchParseException");
|
||||
} catch (ElasticsearchParseException e) {
|
||||
assertThat(e.getMessage(), is("field must be either [lat], [lon] or [geohash]"));
|
||||
}
|
||||
Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser));
|
||||
assertThat(e.getMessage(), is("field must be either [lat], [lon] or [geohash]"));
|
||||
}
|
||||
|
||||
public void testInvalidPointLatHashMix() throws IOException {
|
||||
|
@ -130,12 +125,8 @@ public class GeoPointParsingTests extends ESTestCase {
|
|||
XContentParser parser = JsonXContent.jsonXContent.createParser(content.bytes());
|
||||
parser.nextToken();
|
||||
|
||||
try {
|
||||
GeoUtils.parseGeoPoint(parser);
|
||||
fail("Expected ElasticsearchParseException");
|
||||
} catch (ElasticsearchParseException e) {
|
||||
assertThat(e.getMessage(), is("field must be either lat/lon or geohash"));
|
||||
}
|
||||
Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser));
|
||||
assertThat(e.getMessage(), is("field must be either lat/lon or geohash"));
|
||||
}
|
||||
|
||||
public void testInvalidPointLonHashMix() throws IOException {
|
||||
|
@ -147,12 +138,8 @@ public class GeoPointParsingTests extends ESTestCase {
|
|||
XContentParser parser = JsonXContent.jsonXContent.createParser(content.bytes());
|
||||
parser.nextToken();
|
||||
|
||||
try {
|
||||
GeoUtils.parseGeoPoint(parser);
|
||||
fail("Expected ElasticsearchParseException");
|
||||
} catch (ElasticsearchParseException e) {
|
||||
assertThat(e.getMessage(), is("field must be either lat/lon or geohash"));
|
||||
}
|
||||
Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser));
|
||||
assertThat(e.getMessage(), is("field must be either lat/lon or geohash"));
|
||||
}
|
||||
|
||||
public void testInvalidField() throws IOException {
|
||||
|
@ -164,12 +151,8 @@ public class GeoPointParsingTests extends ESTestCase {
|
|||
XContentParser parser = JsonXContent.jsonXContent.createParser(content.bytes());
|
||||
parser.nextToken();
|
||||
|
||||
try {
|
||||
GeoUtils.parseGeoPoint(parser);
|
||||
fail("Expected ElasticsearchParseException");
|
||||
} catch (ElasticsearchParseException e) {
|
||||
assertThat(e.getMessage(), is("field must be either [lat], [lon] or [geohash]"));
|
||||
}
|
||||
Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser));
|
||||
assertThat(e.getMessage(), is("field must be either [lat], [lon] or [geohash]"));
|
||||
}
|
||||
|
||||
private static XContentParser objectLatLon(double lat, double lon) throws IOException {
|
||||
|
|
|
@ -19,8 +19,6 @@
|
|||
|
||||
package org.elasticsearch.index.search.geo;
|
||||
|
||||
import org.locationtech.spatial4j.context.SpatialContext;
|
||||
import org.locationtech.spatial4j.distance.DistanceUtils;
|
||||
import org.apache.lucene.spatial.prefix.tree.Cell;
|
||||
import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree;
|
||||
import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree;
|
||||
|
@ -33,6 +31,8 @@ import org.elasticsearch.common.xcontent.XContentHelper;
|
|||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser.Token;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.locationtech.spatial4j.context.SpatialContext;
|
||||
import org.locationtech.spatial4j.distance.DistanceUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -439,12 +439,8 @@ public class GeoUtilsTests extends ESTestCase {
|
|||
BytesReference jsonBytes = jsonBuilder().startObject().field("geohash", 1.0).endObject().bytes();
|
||||
XContentParser parser = XContentHelper.createParser(jsonBytes);
|
||||
parser.nextToken();
|
||||
try {
|
||||
GeoUtils.parseGeoPoint(parser);
|
||||
fail("Expected ElasticsearchParseException");
|
||||
} catch (ElasticsearchParseException e) {
|
||||
assertThat(e.getMessage(), containsString("geohash must be a string"));
|
||||
}
|
||||
Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser));
|
||||
assertThat(e.getMessage(), containsString("geohash must be a string"));
|
||||
}
|
||||
|
||||
public void testParseGeoPointLatNoLon() throws IOException {
|
||||
|
@ -452,12 +448,8 @@ public class GeoUtilsTests extends ESTestCase {
|
|||
BytesReference jsonBytes = jsonBuilder().startObject().field("lat", lat).endObject().bytes();
|
||||
XContentParser parser = XContentHelper.createParser(jsonBytes);
|
||||
parser.nextToken();
|
||||
try {
|
||||
GeoUtils.parseGeoPoint(parser);
|
||||
fail("Expected ElasticsearchParseException");
|
||||
} catch (ElasticsearchParseException e) {
|
||||
assertThat(e.getMessage(), is("field [lon] missing"));
|
||||
}
|
||||
Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser));
|
||||
assertThat(e.getMessage(), is("field [lon] missing"));
|
||||
}
|
||||
|
||||
public void testParseGeoPointLonNoLat() throws IOException {
|
||||
|
@ -465,12 +457,8 @@ public class GeoUtilsTests extends ESTestCase {
|
|||
BytesReference jsonBytes = jsonBuilder().startObject().field("lon", lon).endObject().bytes();
|
||||
XContentParser parser = XContentHelper.createParser(jsonBytes);
|
||||
parser.nextToken();
|
||||
try {
|
||||
GeoUtils.parseGeoPoint(parser);
|
||||
fail("Expected ElasticsearchParseException");
|
||||
} catch (ElasticsearchParseException e) {
|
||||
assertThat(e.getMessage(), is("field [lat] missing"));
|
||||
}
|
||||
Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser));
|
||||
assertThat(e.getMessage(), is("field [lat] missing"));
|
||||
}
|
||||
|
||||
public void testParseGeoPointLonWrongType() throws IOException {
|
||||
|
@ -478,12 +466,8 @@ public class GeoUtilsTests extends ESTestCase {
|
|||
BytesReference jsonBytes = jsonBuilder().startObject().field("lat", lat).field("lon", false).endObject().bytes();
|
||||
XContentParser parser = XContentHelper.createParser(jsonBytes);
|
||||
parser.nextToken();
|
||||
try {
|
||||
GeoUtils.parseGeoPoint(parser);
|
||||
fail("Expected ElasticsearchParseException");
|
||||
} catch (ElasticsearchParseException e) {
|
||||
assertThat(e.getMessage(), is("longitude must be a number"));
|
||||
}
|
||||
Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser));
|
||||
assertThat(e.getMessage(), is("longitude must be a number"));
|
||||
}
|
||||
|
||||
public void testParseGeoPointLatWrongType() throws IOException {
|
||||
|
@ -491,12 +475,8 @@ public class GeoUtilsTests extends ESTestCase {
|
|||
BytesReference jsonBytes = jsonBuilder().startObject().field("lat", false).field("lon", lon).endObject().bytes();
|
||||
XContentParser parser = XContentHelper.createParser(jsonBytes);
|
||||
parser.nextToken();
|
||||
try {
|
||||
GeoUtils.parseGeoPoint(parser);
|
||||
fail("Expected ElasticsearchParseException");
|
||||
} catch (ElasticsearchParseException e) {
|
||||
assertThat(e.getMessage(), is("latitude must be a number"));
|
||||
}
|
||||
Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser));
|
||||
assertThat(e.getMessage(), is("latitude must be a number"));
|
||||
}
|
||||
|
||||
public void testParseGeoPointExtraField() throws IOException {
|
||||
|
@ -505,12 +485,8 @@ public class GeoUtilsTests extends ESTestCase {
|
|||
BytesReference jsonBytes = jsonBuilder().startObject().field("lat", lat).field("lon", lon).field("foo", true).endObject().bytes();
|
||||
XContentParser parser = XContentHelper.createParser(jsonBytes);
|
||||
parser.nextToken();
|
||||
try {
|
||||
GeoUtils.parseGeoPoint(parser);
|
||||
fail("Expected ElasticsearchParseException");
|
||||
} catch (ElasticsearchParseException e) {
|
||||
assertThat(e.getMessage(), is("field must be either [lat], [lon] or [geohash]"));
|
||||
}
|
||||
Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser));
|
||||
assertThat(e.getMessage(), is("field must be either [lat], [lon] or [geohash]"));
|
||||
}
|
||||
|
||||
public void testParseGeoPointLonLatGeoHash() throws IOException {
|
||||
|
@ -521,12 +497,8 @@ public class GeoUtilsTests extends ESTestCase {
|
|||
.bytes();
|
||||
XContentParser parser = XContentHelper.createParser(jsonBytes);
|
||||
parser.nextToken();
|
||||
try {
|
||||
GeoUtils.parseGeoPoint(parser);
|
||||
fail("Expected ElasticsearchParseException");
|
||||
} catch (ElasticsearchParseException e) {
|
||||
assertThat(e.getMessage(), containsString("field must be either lat/lon or geohash"));
|
||||
}
|
||||
Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser));
|
||||
assertThat(e.getMessage(), containsString("field must be either lat/lon or geohash"));
|
||||
}
|
||||
|
||||
public void testParseGeoPointArrayTooManyValues() throws IOException {
|
||||
|
@ -539,12 +511,8 @@ public class GeoUtilsTests extends ESTestCase {
|
|||
while (parser.currentToken() != Token.START_ARRAY) {
|
||||
parser.nextToken();
|
||||
}
|
||||
try {
|
||||
GeoUtils.parseGeoPoint(parser);
|
||||
fail("Expected ElasticsearchParseException");
|
||||
} catch (ElasticsearchParseException e) {
|
||||
assertThat(e.getMessage(), is("only two values allowed"));
|
||||
}
|
||||
Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser));
|
||||
assertThat(e.getMessage(), is("only two values allowed"));
|
||||
}
|
||||
|
||||
public void testParseGeoPointArrayWrongType() throws IOException {
|
||||
|
@ -555,12 +523,8 @@ public class GeoUtilsTests extends ESTestCase {
|
|||
while (parser.currentToken() != Token.START_ARRAY) {
|
||||
parser.nextToken();
|
||||
}
|
||||
try {
|
||||
GeoUtils.parseGeoPoint(parser);
|
||||
fail("Expected ElasticsearchParseException");
|
||||
} catch (ElasticsearchParseException e) {
|
||||
assertThat(e.getMessage(), is("numeric value expected"));
|
||||
}
|
||||
Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser));
|
||||
assertThat(e.getMessage(), is("numeric value expected"));
|
||||
}
|
||||
|
||||
public void testParseGeoPointInvalidType() throws IOException {
|
||||
|
@ -569,12 +533,8 @@ public class GeoUtilsTests extends ESTestCase {
|
|||
while (parser.currentToken() != Token.VALUE_NUMBER) {
|
||||
parser.nextToken();
|
||||
}
|
||||
try {
|
||||
GeoUtils.parseGeoPoint(parser);
|
||||
fail("Expected ElasticsearchParseException");
|
||||
} catch (ElasticsearchParseException e) {
|
||||
assertThat(e.getMessage(), is("geo_point expected"));
|
||||
}
|
||||
Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser));
|
||||
assertThat(e.getMessage(), is("geo_point expected"));
|
||||
}
|
||||
|
||||
public void testPrefixTreeCellSizes() {
|
||||
|
|
|
@ -470,8 +470,6 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
throw new RuntimeException(ex);
|
||||
}
|
||||
}
|
||||
|
||||
;
|
||||
};
|
||||
thread[i].start();
|
||||
}
|
||||
|
@ -1172,6 +1170,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
throw new RuntimeException("boom");
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexSearcher wrap(IndexSearcher searcher) throws EngineException {
|
||||
return searcher;
|
||||
}
|
||||
|
|
|
@ -64,9 +64,8 @@ public class ShardPathTests extends ESTestCase {
|
|||
assumeTrue("This test tests multi data.path but we only got one", paths.length > 1);
|
||||
int id = randomIntBetween(1, 10);
|
||||
ShardStateMetaData.FORMAT.write(new ShardStateMetaData(id, true, indexUUID, AllocationId.newInitializing()), paths);
|
||||
ShardPath.loadShardPath(logger, env, shardId, IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings));
|
||||
fail("Expected IllegalStateException");
|
||||
} catch (IllegalStateException e) {
|
||||
Exception e = expectThrows(IllegalStateException.class, () ->
|
||||
ShardPath.loadShardPath(logger, env, shardId, IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings)));
|
||||
assertThat(e.getMessage(), containsString("more than one shard state found"));
|
||||
}
|
||||
}
|
||||
|
@ -81,9 +80,8 @@ public class ShardPathTests extends ESTestCase {
|
|||
Path path = randomFrom(paths);
|
||||
int id = randomIntBetween(1, 10);
|
||||
ShardStateMetaData.FORMAT.write(new ShardStateMetaData(id, true, "0xDEADBEEF", AllocationId.newInitializing()), path);
|
||||
ShardPath.loadShardPath(logger, env, shardId, IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings));
|
||||
fail("Expected IllegalStateException");
|
||||
} catch (IllegalStateException e) {
|
||||
Exception e = expectThrows(IllegalStateException.class, () ->
|
||||
ShardPath.loadShardPath(logger, env, shardId, IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings)));
|
||||
assertThat(e.getMessage(), containsString("expected: foobar on shard path"));
|
||||
}
|
||||
}
|
||||
|
@ -91,12 +89,8 @@ public class ShardPathTests extends ESTestCase {
|
|||
public void testIllegalCustomDataPath() {
|
||||
Index index = new Index("foo", "foo");
|
||||
final Path path = createTempDir().resolve(index.getUUID()).resolve("0");
|
||||
try {
|
||||
new ShardPath(true, path, path, new ShardId(index, 0));
|
||||
fail("Expected IllegalArgumentException");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), is("shard state path must be different to the data path when using custom data paths"));
|
||||
}
|
||||
Exception e = expectThrows(IllegalArgumentException.class, () -> new ShardPath(true, path, path, new ShardId(index, 0)));
|
||||
assertThat(e.getMessage(), is("shard state path must be different to the data path when using custom data paths"));
|
||||
}
|
||||
|
||||
public void testValidCtor() {
|
||||
|
|
|
@ -45,7 +45,6 @@ import org.elasticsearch.common.Strings;
|
|||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.SettingsModule;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
|
@ -444,21 +443,15 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
|
|||
|
||||
public void testAllMissingStrict() throws Exception {
|
||||
createIndex("test1");
|
||||
try {
|
||||
expectThrows(IndexNotFoundException.class, () ->
|
||||
client().prepareSearch("test2")
|
||||
.setQuery(matchAllQuery())
|
||||
.execute().actionGet();
|
||||
fail("Exception should have been thrown.");
|
||||
} catch (IndexNotFoundException e) {
|
||||
}
|
||||
.execute().actionGet());
|
||||
|
||||
try {
|
||||
expectThrows(IndexNotFoundException.class, () ->
|
||||
client().prepareSearch("test2","test3")
|
||||
.setQuery(matchAllQuery())
|
||||
.execute().actionGet();
|
||||
fail("Exception should have been thrown.");
|
||||
} catch (IndexNotFoundException e) {
|
||||
}
|
||||
.execute().actionGet());
|
||||
|
||||
//you should still be able to run empty searches without things blowing up
|
||||
client().prepareSearch().setQuery(matchAllQuery()).execute().actionGet();
|
||||
|
|
|
@ -70,22 +70,16 @@ public class OpenCloseIndexIT extends ESIntegTestCase {
|
|||
|
||||
public void testSimpleCloseMissingIndex() {
|
||||
Client client = client();
|
||||
try {
|
||||
client.admin().indices().prepareClose("test1").execute().actionGet();
|
||||
fail("Expected IndexNotFoundException");
|
||||
} catch (IndexNotFoundException e) {
|
||||
assertThat(e.getMessage(), is("no such index"));
|
||||
}
|
||||
Exception e = expectThrows(IndexNotFoundException.class, () ->
|
||||
client.admin().indices().prepareClose("test1").execute().actionGet());
|
||||
assertThat(e.getMessage(), is("no such index"));
|
||||
}
|
||||
|
||||
public void testSimpleOpenMissingIndex() {
|
||||
Client client = client();
|
||||
try {
|
||||
client.admin().indices().prepareOpen("test1").execute().actionGet();
|
||||
fail("Expected IndexNotFoundException");
|
||||
} catch (IndexNotFoundException e) {
|
||||
assertThat(e.getMessage(), is("no such index"));
|
||||
}
|
||||
Exception e = expectThrows(IndexNotFoundException.class, () ->
|
||||
client.admin().indices().prepareOpen("test1").execute().actionGet());
|
||||
assertThat(e.getMessage(), is("no such index"));
|
||||
}
|
||||
|
||||
public void testCloseOneMissingIndex() {
|
||||
|
@ -93,12 +87,9 @@ public class OpenCloseIndexIT extends ESIntegTestCase {
|
|||
createIndex("test1");
|
||||
ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
|
||||
assertThat(healthResponse.isTimedOut(), equalTo(false));
|
||||
try {
|
||||
client.admin().indices().prepareClose("test1", "test2").execute().actionGet();
|
||||
fail("Expected IndexNotFoundException");
|
||||
} catch (IndexNotFoundException e) {
|
||||
assertThat(e.getMessage(), is("no such index"));
|
||||
}
|
||||
Exception e = expectThrows(IndexNotFoundException.class, () ->
|
||||
client.admin().indices().prepareClose("test1", "test2").execute().actionGet());
|
||||
assertThat(e.getMessage(), is("no such index"));
|
||||
}
|
||||
|
||||
public void testCloseOneMissingIndexIgnoreMissing() {
|
||||
|
@ -117,12 +108,9 @@ public class OpenCloseIndexIT extends ESIntegTestCase {
|
|||
createIndex("test1");
|
||||
ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
|
||||
assertThat(healthResponse.isTimedOut(), equalTo(false));
|
||||
try {
|
||||
client.admin().indices().prepareOpen("test1", "test2").execute().actionGet();
|
||||
fail("Expected IndexNotFoundException");
|
||||
} catch (IndexNotFoundException e) {
|
||||
assertThat(e.getMessage(), is("no such index"));
|
||||
}
|
||||
Exception e = expectThrows(IndexNotFoundException.class, () ->
|
||||
client.admin().indices().prepareOpen("test1", "test2").execute().actionGet());
|
||||
assertThat(e.getMessage(), is("no such index"));
|
||||
}
|
||||
|
||||
public void testOpenOneMissingIndexIgnoreMissing() {
|
||||
|
@ -204,42 +192,30 @@ public class OpenCloseIndexIT extends ESIntegTestCase {
|
|||
|
||||
public void testCloseNoIndex() {
|
||||
Client client = client();
|
||||
try {
|
||||
client.admin().indices().prepareClose().execute().actionGet();
|
||||
fail("Expected ActionRequestValidationException");
|
||||
} catch (ActionRequestValidationException e) {
|
||||
assertThat(e.getMessage(), containsString("index is missing"));
|
||||
}
|
||||
Exception e = expectThrows(ActionRequestValidationException.class, () ->
|
||||
client.admin().indices().prepareClose().execute().actionGet());
|
||||
assertThat(e.getMessage(), containsString("index is missing"));
|
||||
}
|
||||
|
||||
public void testCloseNullIndex() {
|
||||
Client client = client();
|
||||
try {
|
||||
client.admin().indices().prepareClose((String[])null).execute().actionGet();
|
||||
fail("Expected ActionRequestValidationException");
|
||||
} catch (ActionRequestValidationException e) {
|
||||
assertThat(e.getMessage(), containsString("index is missing"));
|
||||
}
|
||||
Exception e = expectThrows(ActionRequestValidationException.class, () ->
|
||||
client.admin().indices().prepareClose((String[])null).execute().actionGet());
|
||||
assertThat(e.getMessage(), containsString("index is missing"));
|
||||
}
|
||||
|
||||
public void testOpenNoIndex() {
|
||||
Client client = client();
|
||||
try {
|
||||
client.admin().indices().prepareOpen().execute().actionGet();
|
||||
fail("Expected ActionRequestValidationException");
|
||||
} catch (ActionRequestValidationException e) {
|
||||
assertThat(e.getMessage(), containsString("index is missing"));
|
||||
}
|
||||
Exception e = expectThrows(ActionRequestValidationException.class, () ->
|
||||
client.admin().indices().prepareOpen().execute().actionGet());
|
||||
assertThat(e.getMessage(), containsString("index is missing"));
|
||||
}
|
||||
|
||||
public void testOpenNullIndex() {
|
||||
Client client = client();
|
||||
try {
|
||||
client.admin().indices().prepareOpen((String[])null).execute().actionGet();
|
||||
fail("Expected ActionRequestValidationException");
|
||||
} catch (ActionRequestValidationException e) {
|
||||
assertThat(e.getMessage(), containsString("index is missing"));
|
||||
}
|
||||
Exception e = expectThrows(ActionRequestValidationException.class, () ->
|
||||
client.admin().indices().prepareOpen((String[])null).execute().actionGet());
|
||||
assertThat(e.getMessage(), containsString("index is missing"));
|
||||
}
|
||||
|
||||
public void testOpenAlreadyOpenedIndex() {
|
||||
|
|
|
@ -42,7 +42,6 @@ import org.elasticsearch.cluster.service.ClusterService;
|
|||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.discovery.DiscoveryModule;
|
||||
import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.gateway.GatewayAllocator;
|
||||
import org.elasticsearch.index.Index;
|
||||
|
@ -77,9 +76,8 @@ import static org.hamcrest.Matchers.instanceOf;
|
|||
public class RareClusterStateIT extends ESIntegTestCase {
|
||||
|
||||
@Override
|
||||
protected Settings nodeSettings(int nodeOrdinal) {
|
||||
return Settings.builder().put(super.nodeSettings(nodeOrdinal))
|
||||
.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "zen").build();
|
||||
protected boolean addMockZenPings() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -173,9 +171,7 @@ public class RareClusterStateIT extends ESIntegTestCase {
|
|||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/14932")
|
||||
public void testDeleteCreateInOneBulk() throws Exception {
|
||||
internalCluster().startNodesAsync(2, Settings.builder()
|
||||
.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "zen")
|
||||
.build()).get();
|
||||
internalCluster().startNodesAsync(2).get();
|
||||
assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut());
|
||||
prepareCreate("test").setSettings(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, true).addMapping("type").get();
|
||||
ensureGreen("test");
|
||||
|
|
|
@ -819,7 +819,7 @@ public class SearchFieldsIT extends ESIntegTestCase {
|
|||
assertThat(searchResponse.getHits().getAt(0).fields().get("float_field").value(), equalTo((Object) 5.0));
|
||||
assertThat(searchResponse.getHits().getAt(0).fields().get("double_field").value(), equalTo((Object) 6.0d));
|
||||
assertThat(searchResponse.getHits().getAt(0).fields().get("date_field").value(), equalTo((Object) 1332374400000L));
|
||||
assertThat(searchResponse.getHits().getAt(0).fields().get("boolean_field").value(), equalTo((Object) 1L));
|
||||
assertThat(searchResponse.getHits().getAt(0).fields().get("boolean_field").value(), equalTo((Object) true));
|
||||
assertThat(searchResponse.getHits().getAt(0).fields().get("text_field").value(), equalTo("foo"));
|
||||
assertThat(searchResponse.getHits().getAt(0).fields().get("keyword_field").value(), equalTo("foo"));
|
||||
}
|
||||
|
|
|
@ -535,13 +535,10 @@ public class SearchQueryIT extends ESIntegTestCase {
|
|||
searchResponse = client().prepareSearch().setQuery(queryStringQuery("future:[now/d TO now+2M/d]").lowercaseExpandedTerms(false)).get();
|
||||
assertHitCount(searchResponse, 1L);
|
||||
|
||||
try {
|
||||
client().prepareSearch().setQuery(queryStringQuery("future:[now/D TO now+2M/d]").lowercaseExpandedTerms(false)).get();
|
||||
fail("expected SearchPhaseExecutionException (total failure)");
|
||||
} catch (SearchPhaseExecutionException e) {
|
||||
assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST));
|
||||
assertThat(e.toString(), containsString("unit [D] not supported for date math"));
|
||||
}
|
||||
SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, () -> client().prepareSearch()
|
||||
.setQuery(queryStringQuery("future:[now/D TO now+2M/d]").lowercaseExpandedTerms(false)).get());
|
||||
assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST));
|
||||
assertThat(e.toString(), containsString("unit [D] not supported for date math"));
|
||||
}
|
||||
|
||||
// Issue #7880
|
||||
|
@ -776,12 +773,7 @@ public class SearchQueryIT extends ESIntegTestCase {
|
|||
searchResponse = client().prepareSearch().setQuery(matchQuery("double", "2")).get();
|
||||
assertHitCount(searchResponse, 1L);
|
||||
assertFirstHit(searchResponse, hasId("2"));
|
||||
try {
|
||||
client().prepareSearch().setQuery(matchQuery("double", "2 3 4")).get();
|
||||
fail("SearchPhaseExecutionException should have been thrown");
|
||||
} catch (SearchPhaseExecutionException ex) {
|
||||
// number format exception
|
||||
}
|
||||
expectThrows(SearchPhaseExecutionException.class, () -> client().prepareSearch().setQuery(matchQuery("double", "2 3 4")).get());
|
||||
}
|
||||
|
||||
public void testMultiMatchQuery() throws Exception {
|
||||
|
@ -1777,15 +1769,11 @@ public class SearchQueryIT extends ESIntegTestCase {
|
|||
refresh();
|
||||
|
||||
//has_child fails if executed on "simple" index
|
||||
try {
|
||||
client().prepareSearch("simple")
|
||||
.setQuery(hasChildQuery("child", matchQuery("text", "value"), ScoreMode.None)).get();
|
||||
fail("Should have failed as has_child query can only be executed against parent-child types");
|
||||
} catch (SearchPhaseExecutionException e) {
|
||||
assertThat(e.shardFailures().length, greaterThan(0));
|
||||
for (ShardSearchFailure shardSearchFailure : e.shardFailures()) {
|
||||
assertThat(shardSearchFailure.reason(), containsString("no mapping found for type [child]"));
|
||||
}
|
||||
SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class,
|
||||
() -> client().prepareSearch("simple").setQuery(hasChildQuery("child", matchQuery("text", "value"), ScoreMode.None)).get());
|
||||
assertThat(e.shardFailures().length, greaterThan(0));
|
||||
for (ShardSearchFailure shardSearchFailure : e.shardFailures()) {
|
||||
assertThat(shardSearchFailure.reason(), containsString("no mapping found for type [child]"));
|
||||
}
|
||||
|
||||
//has_child doesn't get parsed for "simple" index
|
||||
|
@ -1983,14 +1971,10 @@ public class SearchQueryIT extends ESIntegTestCase {
|
|||
assertThat(searchResponse.getHits().getAt(0).getId(), is("3"));
|
||||
|
||||
// When we use long values, it means we have ms since epoch UTC based so we don't apply any transformation
|
||||
try {
|
||||
Exception e = expectThrows(SearchPhaseExecutionException.class, () ->
|
||||
client().prepareSearch("test")
|
||||
.setQuery(QueryBuilders.rangeQuery("date").from(1388534400000L).to(1388537940999L).timeZone("+01:00"))
|
||||
.get();
|
||||
fail("A Range Filter using ms since epoch with a TimeZone should raise a ParsingException");
|
||||
} catch (SearchPhaseExecutionException e) {
|
||||
// We expect it
|
||||
}
|
||||
.get());
|
||||
|
||||
searchResponse = client().prepareSearch("test")
|
||||
.setQuery(QueryBuilders.rangeQuery("date").from("2014-01-01").to("2014-01-01T00:59:00").timeZone("-01:00"))
|
||||
|
@ -2005,14 +1989,10 @@ public class SearchQueryIT extends ESIntegTestCase {
|
|||
assertThat(searchResponse.getHits().getAt(0).getId(), is("4"));
|
||||
|
||||
// A Range Filter on a numeric field with a TimeZone should raise an exception
|
||||
try {
|
||||
e = expectThrows(SearchPhaseExecutionException.class, () ->
|
||||
client().prepareSearch("test")
|
||||
.setQuery(QueryBuilders.rangeQuery("num").from("0").to("4").timeZone("-01:00"))
|
||||
.get();
|
||||
fail("A Range Filter on a numeric field with a TimeZone should raise a ParsingException");
|
||||
} catch (SearchPhaseExecutionException e) {
|
||||
// We expect it
|
||||
}
|
||||
.get());
|
||||
}
|
||||
|
||||
public void testSearchEmptyDoc() {
|
||||
|
|
|
@ -26,8 +26,6 @@ import org.elasticsearch.common.xcontent.XContentFactory;
|
|||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.query.QueryParseContext;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.junit.Rule;
|
||||
import org.junit.rules.ExpectedException;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -49,16 +47,12 @@ public class ScoreSortBuilderTests extends AbstractSortTestCase<ScoreSortBuilder
|
|||
return result;
|
||||
}
|
||||
|
||||
@Rule
|
||||
public ExpectedException exceptionRule = ExpectedException.none();
|
||||
|
||||
/**
|
||||
* test passing null to {@link ScoreSortBuilder#order(SortOrder)} is illegal
|
||||
*/
|
||||
public void testIllegalOrder() {
|
||||
exceptionRule.expect(NullPointerException.class);
|
||||
exceptionRule.expectMessage("sort order cannot be null.");
|
||||
new ScoreSortBuilder().order(null);
|
||||
Exception e = expectThrows(NullPointerException.class, () -> new ScoreSortBuilder().order(null));
|
||||
assertEquals("sort order cannot be null.", e.getMessage());
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -20,16 +20,11 @@
|
|||
package org.elasticsearch.search.sort;
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.junit.Rule;
|
||||
import org.junit.rules.ExpectedException;
|
||||
|
||||
import java.util.Locale;
|
||||
|
||||
public class SortModeTests extends ESTestCase {
|
||||
|
||||
@Rule
|
||||
public ExpectedException exceptionRule = ExpectedException.none();
|
||||
|
||||
public void testSortMode() {
|
||||
// we rely on these ordinals in serialization, so changing them breaks bwc.
|
||||
assertEquals(0, SortMode.MIN.ordinal());
|
||||
|
@ -50,16 +45,11 @@ public class SortModeTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testParseNull() {
|
||||
exceptionRule.expect(NullPointerException.class);
|
||||
exceptionRule.expectMessage("input string is null");
|
||||
SortMode.fromString(null);
|
||||
}
|
||||
public void testParsingFromStringExceptions() {
|
||||
Exception e = expectThrows(NullPointerException.class, () -> SortMode.fromString(null));
|
||||
assertEquals("input string is null", e.getMessage());
|
||||
|
||||
public void testIllegalArgument() {
|
||||
exceptionRule.expect(IllegalArgumentException.class);
|
||||
exceptionRule.expectMessage("Unknown SortMode [xyz]");
|
||||
SortMode.fromString("xyz");
|
||||
e = expectThrows(IllegalArgumentException.class, () -> SortMode.fromString("xyz"));
|
||||
assertEquals("Unknown SortMode [xyz]", e.getMessage());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -44,7 +44,6 @@ import org.elasticsearch.common.Priority;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.discovery.DiscoveryModule;
|
||||
import org.elasticsearch.discovery.zen.ElectMasterService;
|
||||
import org.elasticsearch.discovery.zen.ZenDiscovery;
|
||||
import org.elasticsearch.index.store.IndexStore;
|
||||
|
@ -59,7 +58,6 @@ import org.elasticsearch.rest.RestResponse;
|
|||
import org.elasticsearch.rest.action.admin.cluster.RestClusterStateAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.RestGetRepositoriesAction;
|
||||
import org.elasticsearch.snapshots.mockstore.MockRepository;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
||||
import org.elasticsearch.test.ESIntegTestCase.Scope;
|
||||
import org.elasticsearch.test.InternalTestCluster;
|
||||
|
@ -93,13 +91,6 @@ import static org.hamcrest.Matchers.nullValue;
|
|||
@ClusterScope(scope = Scope.TEST, numDataNodes = 0, transportClientRatio = 0)
|
||||
public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCase {
|
||||
|
||||
@Override
|
||||
protected Settings nodeSettings(int nodeOrdinal) {
|
||||
return Settings.builder().put(super.nodeSettings(nodeOrdinal))
|
||||
// TODO only restorePersistentSettingsTest needs this maybe factor out?
|
||||
.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "zen").build();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
return Arrays.asList(MockRepository.Plugin.class);
|
||||
|
|
|
@ -57,9 +57,9 @@ import org.elasticsearch.common.collect.ImmutableOpenMap;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.store.IndexStore;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.indices.InvalidIndexNameException;
|
||||
import org.elasticsearch.repositories.IndexId;
|
||||
import org.elasticsearch.repositories.RepositoriesService;
|
||||
|
@ -693,7 +693,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("fs").setSettings(Settings.builder().put("location", repositoryLocation)));
|
||||
|
||||
prepareCreate("test-idx").setSettings(Settings.builder().put("index.allocation.max_retries", Integer.MAX_VALUE)).get();
|
||||
createIndex("test-idx");
|
||||
ensureGreen();
|
||||
|
||||
logger.info("--> indexing some data");
|
||||
|
|
|
@ -33,19 +33,15 @@ import org.elasticsearch.common.UUIDs;
|
|||
import org.elasticsearch.common.lease.Releasable;
|
||||
import org.elasticsearch.common.network.NetworkModule;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.discovery.DiscoveryModule;
|
||||
import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.discovery.MasterNotDiscoveredException;
|
||||
import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing;
|
||||
import org.elasticsearch.node.Node;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.InternalTestCluster;
|
||||
import org.elasticsearch.test.NodeConfigurationSource;
|
||||
import org.elasticsearch.transport.MockTcpTransportPlugin;
|
||||
import org.elasticsearch.transport.Transport;
|
||||
import org.junit.After;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
|
@ -54,7 +50,6 @@ import java.io.IOException;
|
|||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.function.Consumer;
|
||||
|
@ -66,6 +61,7 @@ import java.util.stream.StreamSupport;
|
|||
import static java.util.stream.Collectors.toSet;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||
import static org.hamcrest.Matchers.containsInAnyOrder;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
|
@ -195,22 +191,12 @@ public class TribeIT extends ESIntegTestCase {
|
|||
settings.put(Node.NODE_MASTER_SETTING.getKey(), true);
|
||||
settings.put(NetworkModule.HTTP_ENABLED.getKey(), false);
|
||||
settings.put(NetworkModule.TRANSPORT_TYPE_SETTING.getKey(), MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME);
|
||||
settings.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local");
|
||||
|
||||
doWithAllClusters(filter, c -> {
|
||||
String tribeSetting = "tribe." + c.getClusterName() + ".";
|
||||
settings.put(tribeSetting + ClusterName.CLUSTER_NAME_SETTING.getKey(), c.getClusterName());
|
||||
settings.put(tribeSetting + DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "100ms");
|
||||
settings.put(tribeSetting + NetworkModule.TRANSPORT_TYPE_SETTING.getKey(), MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME);
|
||||
settings.put(tribeSetting + DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local");
|
||||
|
||||
Set<String> hosts = new HashSet<>();
|
||||
for (Transport transport : c.getInstances(Transport.class)) {
|
||||
TransportAddress address = transport.boundAddress().publishAddress();
|
||||
hosts.add(address.getAddress() + ":" + address.getPort());
|
||||
}
|
||||
settings.putArray(tribeSetting + UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.getKey(),
|
||||
hosts.toArray(new String[hosts.size()]));
|
||||
});
|
||||
|
||||
return settings;
|
||||
|
@ -497,7 +483,7 @@ public class TribeIT extends ESIntegTestCase {
|
|||
assertBusy(() -> {
|
||||
ClusterState state = client().admin().cluster().prepareState().setNodes(true).get().getState();
|
||||
Set<String> nodes = StreamSupport.stream(state.getNodes().spliterator(), false).map(DiscoveryNode::getName).collect(toSet());
|
||||
assertThat(nodes.containsAll(expectedNodes), is(true));
|
||||
assertThat(nodes, containsInAnyOrder(expectedNodes.toArray()));
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,69 @@
|
|||
{
|
||||
"mappings": {
|
||||
"_default_": {
|
||||
"_all": {
|
||||
"norms": false
|
||||
},
|
||||
"_meta": {
|
||||
"version": "5.0.0-beta1"
|
||||
},
|
||||
"dynamic_templates": [
|
||||
{
|
||||
"fields": {
|
||||
"mapping": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"match_mapping_type": "string",
|
||||
"path_match": "fields.*"
|
||||
}
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"@timestamp": {
|
||||
"type": "date"
|
||||
},
|
||||
"beat": {
|
||||
"properties": {
|
||||
"hostname": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"name": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"input_type": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"message": {
|
||||
"norms": false,
|
||||
"type": "text"
|
||||
},
|
||||
"offset": {
|
||||
"type": "long"
|
||||
},
|
||||
"source": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"tags": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"type": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"order": 0,
|
||||
"settings": {
|
||||
"index.refresh_interval": "5s"
|
||||
},
|
||||
"template": "filebeat-*"
|
||||
}
|
|
@ -0,0 +1,45 @@
|
|||
{
|
||||
"template" : "logstash-*",
|
||||
"settings" : {
|
||||
"index.refresh_interval" : "5s"
|
||||
},
|
||||
"mappings" : {
|
||||
"_default_" : {
|
||||
"_all" : {"enabled" : true, "norms" : false},
|
||||
"dynamic_templates" : [ {
|
||||
"message_field" : {
|
||||
"match" : "message",
|
||||
"match_mapping_type" : "string",
|
||||
"mapping" : {
|
||||
"type" : "string", "index" : "analyzed", "norms" : false,
|
||||
"fielddata" : { "format" : "disabled" }
|
||||
}
|
||||
}
|
||||
}, {
|
||||
"string_fields" : {
|
||||
"match" : "*",
|
||||
"match_mapping_type" : "string",
|
||||
"mapping" : {
|
||||
"type" : "text", "norms" : false,
|
||||
"fields" : {
|
||||
"keyword" : { "type": "keyword" }
|
||||
}
|
||||
}
|
||||
}
|
||||
} ],
|
||||
"properties" : {
|
||||
"@timestamp": { "type": "date", "include_in_all": false },
|
||||
"@version": { "type": "keyword", "include_in_all": false },
|
||||
"geoip" : {
|
||||
"dynamic": true,
|
||||
"properties" : {
|
||||
"ip": { "type": "ip" },
|
||||
"location" : { "type" : "geo_point" },
|
||||
"latitude" : { "type" : "half_float" },
|
||||
"longitude" : { "type" : "half_float" }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,162 @@
|
|||
{
|
||||
"mappings": {
|
||||
"_default_": {
|
||||
"_all": {
|
||||
"norms": false
|
||||
},
|
||||
"_meta": {
|
||||
"version": "5.0.0-beta1"
|
||||
},
|
||||
"dynamic_templates": [
|
||||
{
|
||||
"fields": {
|
||||
"mapping": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"match_mapping_type": "string",
|
||||
"path_match": "fields.*"
|
||||
}
|
||||
},
|
||||
{
|
||||
"event_data": {
|
||||
"mapping": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"match_mapping_type": "string",
|
||||
"path_match": "event_data.*"
|
||||
}
|
||||
},
|
||||
{
|
||||
"user_data": {
|
||||
"mapping": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"match_mapping_type": "string",
|
||||
"path_match": "user_data.*"
|
||||
}
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"@timestamp": {
|
||||
"type": "date"
|
||||
},
|
||||
"activity_id": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"beat": {
|
||||
"properties": {
|
||||
"hostname": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"name": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"computer_name": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"event_id": {
|
||||
"type": "long"
|
||||
},
|
||||
"keywords": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"level": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"log_name": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"message": {
|
||||
"norms": false,
|
||||
"type": "text"
|
||||
},
|
||||
"message_error": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"opcode": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"process_id": {
|
||||
"type": "long"
|
||||
},
|
||||
"provider_guid": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"record_number": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"related_activity_id": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"source_name": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"tags": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"task": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"thread_id": {
|
||||
"type": "long"
|
||||
},
|
||||
"type": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"user": {
|
||||
"properties": {
|
||||
"domain": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"identifier": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"name": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"type": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"version": {
|
||||
"type": "long"
|
||||
},
|
||||
"xml": {
|
||||
"norms": false,
|
||||
"type": "text"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"order": 0,
|
||||
"settings": {
|
||||
"index.refresh_interval": "5s"
|
||||
},
|
||||
"template": "winlogbeat-*"
|
||||
}
|
|
@ -1,417 +0,0 @@
|
|||
# Licensed to Elasticsearch under one or more contributor
|
||||
# license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright
|
||||
# ownership. Elasticsearch licenses this file to you under
|
||||
# the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on
|
||||
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
|
||||
# either express or implied. See the License for the specific
|
||||
# language governing permissions and limitations under the License.
|
||||
|
||||
# Prepare a release
|
||||
#
|
||||
# 1. Update the Version.java to remove the snapshot bit
|
||||
# 2. Remove the -SNAPSHOT suffix in all pom.xml files
|
||||
#
|
||||
# USAGE:
|
||||
#
|
||||
# python3 ./dev-tools/prepare-release.py
|
||||
#
|
||||
# Note: Ensure the script is run from the elasticsearch top level directory
|
||||
#
|
||||
|
||||
import fnmatch
|
||||
import argparse
|
||||
from prepare_release_update_documentation import update_reference_docs
|
||||
import subprocess
|
||||
import tempfile
|
||||
import re
|
||||
import os
|
||||
import shutil
|
||||
from functools import partial
|
||||
import sys
|
||||
|
||||
VERSION_FILE = 'core/src/main/java/org/elasticsearch/Version.java'
|
||||
POM_FILE = 'pom.xml'
|
||||
MAIL_TEMPLATE = """
|
||||
Hi all
|
||||
|
||||
The new release candidate for %(version)s is now available, including the x-plugins and RPM/deb repos. This release is based on:
|
||||
|
||||
* Elasticsearch commit: %(hash)s - https://github.com/elastic/elasticsearch/commit/%(hash)s
|
||||
* X-Plugins commit: FILL_IN_X-PLUGINS_HASH - https://github.com/elastic/x-plugins/commit/FILL_IN_X-PLUGINS_HASH
|
||||
|
||||
The packages may be downloaded from the following URLs:
|
||||
|
||||
* ZIP - http://%(bucket)s/elasticsearch/staging/%(version)s-%(hash)s/org/elasticsearch/distribution/zip/elasticsearch/%(version)s/elasticsearch-%(version)s.zip
|
||||
* tar.gz - http://%(bucket)s/elasticsearch/staging/%(version)s-%(hash)s/org/elasticsearch/distribution/tar/elasticsearch/%(version)s/elasticsearch-%(version)s.tar.gz
|
||||
* RPM - http://%(bucket)s/elasticsearch/staging/%(version)s-%(hash)s/org/elasticsearch/distribution/rpm/elasticsearch/%(version)s/elasticsearch-%(version)s.rpm
|
||||
* deb - http://%(bucket)s/elasticsearch/staging/%(version)s-%(hash)s/org/elasticsearch/distribution/deb/elasticsearch/%(version)s/elasticsearch-%(version)s.deb
|
||||
|
||||
Plugins can be installed as follows:
|
||||
|
||||
ES_JAVA_OPTS="-Des.plugins.staging=true" bin/elasticsearch-plugin install cloud-aws
|
||||
|
||||
The same goes for the x-plugins:
|
||||
|
||||
ES_JAVA_OPTS="-Des.plugins.staging=true" bin/elasticsearch-plugin install license
|
||||
ES_JAVA_OPTS="-Des.plugins.staging=true" bin/elasticsearch-plugin install marvel-agent
|
||||
ES_JAVA_OPTS="-Des.plugins.staging=true" bin/elasticsearch-plugin install shield
|
||||
ES_JAVA_OPTS="-Des.plugins.staging=true" bin/elasticsearch-plugin install watcher
|
||||
|
||||
To install the deb from an APT repo:
|
||||
|
||||
APT line sources.list line:
|
||||
|
||||
deb http://%(bucket)s/elasticsearch/staging/%(version)s-%(hash)s/repos/%(package_repo_version)s/debian/ stable main
|
||||
|
||||
To install the RPM, create a YUM file like:
|
||||
|
||||
/etc/yum.repos.d/elasticsearch.repo
|
||||
|
||||
containing:
|
||||
|
||||
[elasticsearch-2.0]
|
||||
name=Elasticsearch repository for packages
|
||||
baseurl=http://%(bucket)s/elasticsearch/staging/%(version)s-%(hash)s/repos/%(package_repo_version)s/centos
|
||||
gpgcheck=1
|
||||
gpgkey=http://packages.elastic.co/GPG-KEY-elasticsearch
|
||||
enabled=1
|
||||
|
||||
To smoke-test the release please run:
|
||||
|
||||
python3 -B ./dev-tools/smoke_test_rc.py --version %(version)s --hash %(hash)s --plugins license,shield,watcher
|
||||
|
||||
NOTE: this script requires JAVA_HOME to point to a Java 7 Runtime
|
||||
|
||||
"""
|
||||
|
||||
# console colors
|
||||
COLOR_OK = '\033[92m'
|
||||
COLOR_END = '\033[0m'
|
||||
COLOR_FAIL = '\033[91m'
|
||||
|
||||
def run(command, env_vars=None):
|
||||
if env_vars:
|
||||
for key, value in env_vars.items():
|
||||
os.putenv(key, value)
|
||||
print('*** Running: %s%s%s' % (COLOR_OK, command, COLOR_END))
|
||||
if os.system(command):
|
||||
raise RuntimeError(' FAILED: %s' % (command))
|
||||
|
||||
def ensure_checkout_is_clean():
|
||||
# Make sure no local mods:
|
||||
s = subprocess.check_output('git diff --shortstat', shell=True).decode('utf-8')
|
||||
if len(s) > 0:
|
||||
raise RuntimeError('git diff --shortstat is non-empty got:\n%s' % s)
|
||||
|
||||
# Make sure no untracked files:
|
||||
s = subprocess.check_output('git status', shell=True).decode('utf-8', errors='replace')
|
||||
if 'Untracked files:' in s:
|
||||
if 'dev-tools/__pycache__/' in s:
|
||||
print('*** NOTE: invoke python with -B to prevent __pycache__ directories ***')
|
||||
raise RuntimeError('git status shows untracked files got:\n%s' % s)
|
||||
|
||||
# Make sure we have all changes from origin:
|
||||
if 'is behind' in s:
|
||||
raise RuntimeError('git status shows not all changes pulled from origin; try running "git pull origin" in this branch got:\n%s' % (s))
|
||||
|
||||
# Make sure we no local unpushed changes (this is supposed to be a clean area):
|
||||
if 'is ahead' in s:
|
||||
raise RuntimeError('git status shows local commits; try running "git fetch origin", "git checkout ", "git reset --hard origin/" in this branch got:\n%s' % (s))
|
||||
|
||||
# Reads the given file and applies the
|
||||
# callback to it. If the callback changed
|
||||
# a line the given file is replaced with
|
||||
# the modified input.
|
||||
def process_file(file_path, line_callback):
|
||||
fh, abs_path = tempfile.mkstemp()
|
||||
modified = False
|
||||
with open(abs_path,'w', encoding='utf-8') as new_file:
|
||||
with open(file_path, encoding='utf-8') as old_file:
|
||||
for line in old_file:
|
||||
new_line = line_callback(line)
|
||||
modified = modified or (new_line != line)
|
||||
new_file.write(new_line)
|
||||
os.close(fh)
|
||||
if modified:
|
||||
#Remove original file
|
||||
os.remove(file_path)
|
||||
#Move new file
|
||||
shutil.move(abs_path, file_path)
|
||||
return True
|
||||
else:
|
||||
# nothing to do - just remove the tmp file
|
||||
os.remove(abs_path)
|
||||
return False
|
||||
|
||||
# Moves the Version.java file from a snapshot to a release
|
||||
def remove_version_snapshot(version_file, release):
|
||||
# 1.0.0.Beta1 -> 1_0_0_Beta1
|
||||
release = release.replace('.', '_')
|
||||
release = release.replace('-', '_')
|
||||
pattern = 'new Version(V_%s_ID, true' % (release)
|
||||
replacement = 'new Version(V_%s_ID, false' % (release)
|
||||
def callback(line):
|
||||
return line.replace(pattern, replacement)
|
||||
processed = process_file(version_file, callback)
|
||||
if not processed:
|
||||
raise RuntimeError('failed to remove snapshot version for %s' % (release))
|
||||
|
||||
def rename_local_meta_files(path):
|
||||
for root, _, file_names in os.walk(path):
|
||||
for file_name in fnmatch.filter(file_names, 'maven-metadata-local.xml*'):
|
||||
full_path = os.path.join(root, file_name)
|
||||
os.rename(full_path, os.path.join(root, file_name.replace('-local', '')))
|
||||
|
||||
# Checks the pom.xml for the release version.
|
||||
# This method fails if the pom file has no SNAPSHOT version set ie.
|
||||
# if the version is already on a release version we fail.
|
||||
# Returns the next version string ie. 0.90.7
|
||||
def find_release_version():
|
||||
with open('pom.xml', encoding='utf-8') as file:
|
||||
for line in file:
|
||||
match = re.search(r'<version>(.+)-SNAPSHOT</version>', line)
|
||||
if match:
|
||||
return match.group(1)
|
||||
raise RuntimeError('Could not find release version in branch')
|
||||
|
||||
# Checks if the produced RPM is signed with the supplied GPG key
|
||||
def ensure_rpm_is_signed(rpm, gpg_key):
|
||||
rpm_check_signature_cmd = 'rpm -v -K %s | grep -qi %s' % (rpm, gpg_key)
|
||||
try:
|
||||
subprocess.check_output(rpm_check_signature_cmd, shell=True)
|
||||
except:
|
||||
raise RuntimeError('Aborting. RPM does not seem to be signed, check with: rpm -v -K %s' % rpm)
|
||||
|
||||
# Checks if a command exists, needed for external binaries
|
||||
def check_command_exists(name, cmd):
|
||||
try:
|
||||
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError:
|
||||
raise RuntimeError('Could not run command %s - please make sure it is installed and in $PATH' % (name))
|
||||
|
||||
def run_and_print(text, run_function):
|
||||
try:
|
||||
print(text, end='')
|
||||
run_function()
|
||||
print(COLOR_OK + 'OK' + COLOR_END)
|
||||
return True
|
||||
except RuntimeError:
|
||||
print(COLOR_FAIL + 'NOT OK' + COLOR_END)
|
||||
return False
|
||||
|
||||
def check_env_var(text, env_var):
|
||||
try:
|
||||
print(text, end='')
|
||||
os.environ[env_var]
|
||||
print(COLOR_OK + 'OK' + COLOR_END)
|
||||
return True
|
||||
except KeyError:
|
||||
print(COLOR_FAIL + 'NOT OK' + COLOR_END)
|
||||
return False
|
||||
|
||||
def check_environment_and_commandline_tools(check_only):
|
||||
checks = list()
|
||||
checks.append(check_env_var('Checking for AWS env configuration AWS_SECRET_KEY... ', 'AWS_SECRET_KEY'))
|
||||
checks.append(check_env_var('Checking for AWS env configuration AWS_ACCESS_KEY... ', 'AWS_ACCESS_KEY'))
|
||||
checks.append(run_and_print('Checking command: rpm... ', partial(check_command_exists, 'rpm', 'rpm --version')))
|
||||
checks.append(run_and_print('Checking command: dpkg... ', partial(check_command_exists, 'dpkg', 'dpkg --version')))
|
||||
checks.append(run_and_print('Checking command: gpg... ', partial(check_command_exists, 'gpg', 'gpg --version')))
|
||||
checks.append(run_and_print('Checking command: expect... ', partial(check_command_exists, 'expect', 'expect -v')))
|
||||
checks.append(run_and_print('Checking command: createrepo... ', partial(check_command_exists, 'createrepo', 'createrepo --version')))
|
||||
checks.append(run_and_print('Checking command: s3cmd... ', partial(check_command_exists, 's3cmd', 's3cmd --version')))
|
||||
checks.append(run_and_print('Checking command: deb-s3... ', partial(check_command_exists, 'deb-s3', 'deb-s3 -h')))
|
||||
checks.append(run_and_print('Checking command: rpm-s3... ', partial(check_command_exists, 'rpm-s3', 'rpm-s3 -h')))
|
||||
|
||||
if check_only:
|
||||
sys.exit(0)
|
||||
|
||||
if False in checks:
|
||||
print("Exiting due to failing checks")
|
||||
sys.exit(0)
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description='Builds and publishes a Elasticsearch Release')
|
||||
parser.add_argument('--deploy-sonatype', dest='deploy_sonatype', action='store_true',
|
||||
help='Installs and Deploys the release on a sonatype staging repository.')
|
||||
parser.add_argument('--deploy-s3', dest='deploy_s3', action='store_true',
|
||||
help='Pushes artifacts to the S3 staging area')
|
||||
parser.add_argument('--deploy-s3-repos', dest='deploy_s3_repos', action='store_true',
|
||||
help='Creates package repositories in S3 repo')
|
||||
parser.add_argument('--no-install', dest='no_install', action='store_true',
|
||||
help='Does not run "mvn install", expects this to be run already and reuses artifacts from local repo, only useful with --deploy-s3/--deploy-s3-repos, after sonatype deplomeny to ensure same artifacts')
|
||||
parser.add_argument('--skip-doc-check', dest='skip_doc_check', action='store_false',
|
||||
help='Skips any checks for pending documentation changes')
|
||||
parser.add_argument('--skip-tests', dest='skip_tests', action='store_true',
|
||||
help='Skips any test runs')
|
||||
parser.add_argument('--gpg-key', dest='gpg_key', default="D88E42B4",
|
||||
help='Allows you to specify a different gpg_key to be used instead of the default release key')
|
||||
parser.add_argument('--bucket', '-b', dest='bucket', default="download.elasticsearch.org",
|
||||
help='Allows you to specify a different s3 bucket to upload the artifacts to')
|
||||
parser.add_argument('--quiet', dest='quiet', action='store_true',
|
||||
help='Runs the script in quiet mode')
|
||||
parser.add_argument('--check', dest='check', action='store_true',
|
||||
help='Checks and reports for all requirements and then exits')
|
||||
|
||||
# by default, we only run mvn install and don't push anything repo
|
||||
parser.set_defaults(deploy_sonatype=False)
|
||||
parser.set_defaults(deploy_s3=False)
|
||||
parser.set_defaults(deploy_s3_repos=False)
|
||||
parser.set_defaults(no_install=False)
|
||||
# other defaults
|
||||
parser.set_defaults(skip_doc_check=False)
|
||||
parser.set_defaults(quiet=False)
|
||||
parser.set_defaults(skip_tests=False)
|
||||
|
||||
args = parser.parse_args()
|
||||
skip_doc_check = args.skip_doc_check
|
||||
gpg_key = args.gpg_key
|
||||
bucket = args.bucket
|
||||
deploy_sonatype = args.deploy_sonatype
|
||||
deploy_s3 = args.deploy_s3
|
||||
deploy_s3_repos = args.deploy_s3_repos
|
||||
run_mvn_install = not args.no_install
|
||||
skip_tests = args.skip_tests
|
||||
|
||||
check_environment_and_commandline_tools(args.check)
|
||||
|
||||
if not run_mvn_install and deploy_sonatype:
|
||||
print('Using --no-install and --deploy-sonatype together does not work. Exiting')
|
||||
sys.exit(-1)
|
||||
|
||||
print('*** Preparing a release candidate: ', end='')
|
||||
print('deploy sonatype: %s%s%s' % (COLOR_OK if deploy_sonatype else COLOR_FAIL, 'yes' if deploy_sonatype else 'no', COLOR_END), end='')
|
||||
print(', deploy s3: %s%s%s' % (COLOR_OK if deploy_s3 else COLOR_FAIL, 'yes' if deploy_s3 else 'no', COLOR_END), end='')
|
||||
print(', deploy s3 repos: %s%s%s' % (COLOR_OK if deploy_s3_repos else COLOR_FAIL, 'yes' if deploy_s3_repos else 'no', COLOR_END), end='')
|
||||
print('')
|
||||
|
||||
shortHash = subprocess.check_output('git log --pretty=format:"%h" -n 1', shell=True).decode('utf-8')
|
||||
releaseDirectory = os.getenv('HOME') + '/elastic-releases'
|
||||
release_version = find_release_version()
|
||||
localRepo = '%s/elasticsearch-%s-%s' % (releaseDirectory, release_version, shortHash)
|
||||
localRepoElasticsearch = localRepo + '/org/elasticsearch'
|
||||
|
||||
ensure_checkout_is_clean()
|
||||
if not re.match('(\d+\.\d+)\.*',release_version):
|
||||
raise RuntimeError('illegal release version format: %s' % (release_version))
|
||||
package_repo_version = '%s.x' % re.match('(\d+)\.*', release_version).group(1)
|
||||
|
||||
print('*** Preparing release version: [%s]' % release_version)
|
||||
|
||||
if not skip_doc_check:
|
||||
print('*** Check for pending documentation changes')
|
||||
pending_files = update_reference_docs(release_version)
|
||||
if pending_files:
|
||||
raise RuntimeError('pending coming[%s] documentation changes found in %s' % (release_version, pending_files))
|
||||
|
||||
run('cd dev-tools && mvn versions:set -DnewVersion=%s -DgenerateBackupPoms=false' % (release_version))
|
||||
run('cd rest-api-spec && mvn versions:set -DnewVersion=%s -DgenerateBackupPoms=false' % (release_version))
|
||||
run('mvn versions:set -DnewVersion=%s -DgenerateBackupPoms=false' % (release_version))
|
||||
|
||||
remove_version_snapshot(VERSION_FILE, release_version)
|
||||
|
||||
print('*** Done removing snapshot version. DO NOT COMMIT THIS, WHEN CREATING A RELEASE CANDIDATE.')
|
||||
|
||||
if not os.path.exists(releaseDirectory):
|
||||
os.mkdir(releaseDirectory)
|
||||
if os.path.exists(localRepoElasticsearch) and run_mvn_install:
|
||||
print('clean local repository %s' % localRepoElasticsearch)
|
||||
shutil.rmtree(localRepoElasticsearch)
|
||||
|
||||
mvn_target = 'deploy' if deploy_sonatype else 'install'
|
||||
tests = '-DskipTests' if skip_tests else '-Dskip.integ.tests=true'
|
||||
install_command = 'mvn clean %s -Prelease %s -Dgpg.key="%s" -Dpackaging.rpm.rpmbuild=/usr/bin/rpmbuild -Drpm.sign=true -Dmaven.repo.local=%s -Dno.commit.pattern="\\bno(n|)commit\\b" -Dforbidden.test.signatures=""' % (mvn_target, tests, gpg_key, localRepo)
|
||||
clean_repo_command = 'find %s -name _remote.repositories -exec rm {} \;' % (localRepoElasticsearch)
|
||||
|
||||
if not run_mvn_install:
|
||||
print('')
|
||||
print('*** By choosing --no-install we assume you ran the following commands successfully:')
|
||||
print(' %s' % (install_command))
|
||||
print(' 1. Remove all _remote.repositories: %s' % (clean_repo_command))
|
||||
rename_metadata_files_command = 'for i in $(find %s -name "maven-metadata-local.xml*") ; do mv "$i" "${i/-local/}" ; done' % (localRepoElasticsearch)
|
||||
print(' 2. Rename all maven metadata files: %s' % (rename_metadata_files_command))
|
||||
else:
|
||||
for cmd in [install_command, clean_repo_command]:
|
||||
run(cmd)
|
||||
rename_local_meta_files(localRepoElasticsearch)
|
||||
|
||||
rpm = '%s/distribution/rpm/elasticsearch/%s/elasticsearch-%s.rpm' % (localRepoElasticsearch, release_version, release_version)
|
||||
print('Ensuring that RPM has been signed')
|
||||
ensure_rpm_is_signed(rpm, gpg_key)
|
||||
|
||||
# repository push commands
|
||||
s3cmd_sync_to_staging_bucket_cmd = 's3cmd sync -P %s s3://%s/elasticsearch/staging/%s-%s/org/' % (localRepoElasticsearch, bucket, release_version, shortHash)
|
||||
s3_bucket_sync_to = '%s/elasticsearch/staging/%s-%s/repos/' % (bucket, release_version, shortHash)
|
||||
s3cmd_sync_official_repo_cmd = 's3cmd sync s3://packages.elasticsearch.org/elasticsearch/%s s3://%s' % (package_repo_version, s3_bucket_sync_to)
|
||||
|
||||
debs3_prefix = 'elasticsearch/staging/%s-%s/repos/%s/debian' % (release_version, shortHash, package_repo_version)
|
||||
debs3_upload_cmd = 'deb-s3 upload --preserve-versions %s/distribution/deb/elasticsearch/%s/elasticsearch-%s.deb -b %s --prefix %s --sign %s --arch amd64' % (localRepoElasticsearch, release_version, release_version, bucket, debs3_prefix, gpg_key)
|
||||
debs3_list_cmd = 'deb-s3 list -b %s --prefix %s' % (bucket, debs3_prefix)
|
||||
debs3_verify_cmd = 'deb-s3 verify -b %s --prefix %s' % (bucket, debs3_prefix)
|
||||
rpms3_prefix = 'elasticsearch/staging/%s-%s/repos/%s/centos' % (release_version, shortHash, package_repo_version)
|
||||
# external-1 is the alias name for the us-east-1 region. This is used by rpm-s3 to construct the hostname
|
||||
rpms3_upload_cmd = 'rpm-s3 -v -b %s -p %s --sign --visibility public-read -k 100 %s -r external-1' % (bucket, rpms3_prefix, rpm)
|
||||
|
||||
if deploy_s3:
|
||||
run(s3cmd_sync_to_staging_bucket_cmd)
|
||||
else:
|
||||
print('')
|
||||
print('*** To push a release candidate to s3 run: ')
|
||||
print(' 1. Sync %s into S3 bucket' % (localRepoElasticsearch))
|
||||
print (' %s' % (s3cmd_sync_to_staging_bucket_cmd))
|
||||
|
||||
if deploy_s3_repos:
|
||||
print('*** Syncing official package repository into staging s3 bucket')
|
||||
run(s3cmd_sync_official_repo_cmd)
|
||||
print('*** Uploading debian package (you will be prompted for the passphrase!)')
|
||||
run(debs3_upload_cmd)
|
||||
run(debs3_list_cmd)
|
||||
run(debs3_verify_cmd)
|
||||
print('*** Uploading rpm package (you will be prompted for the passphrase!)')
|
||||
run(rpms3_upload_cmd)
|
||||
else:
|
||||
print('*** To create repositories on S3 run:')
|
||||
print(' 1. Sync existing repo into staging: %s' % s3cmd_sync_official_repo_cmd)
|
||||
print(' 2. Upload debian package (and sign it): %s' % debs3_upload_cmd)
|
||||
print(' 3. List all debian packages: %s' % debs3_list_cmd)
|
||||
print(' 4. Verify debian packages: %s' % debs3_verify_cmd)
|
||||
print(' 5. Upload RPM: %s' % rpms3_upload_cmd)
|
||||
print('')
|
||||
print('NOTE: the above mvn command will promt you several times for the GPG passphrase of the key you specified you can alternatively pass it via -Dgpg.passphrase=yourPassPhrase')
|
||||
print(' since RPM signing doesn\'t support gpg-agents the recommended way to set the password is to add a release profile to your settings.xml:')
|
||||
print("""
|
||||
<profiles>
|
||||
<profile>
|
||||
<id>release</id>
|
||||
<properties>
|
||||
<gpg.passphrase>YourPasswordGoesHere</gpg.passphrase>
|
||||
</properties>
|
||||
</profile>
|
||||
</profiles>
|
||||
""")
|
||||
print('NOTE: Running s3cmd might require you to create a config file with your credentials, if the s3cmd does not support suppliying them via the command line!')
|
||||
|
||||
print('*** Once the release is deployed and published send out the following mail to dev@elastic.co:')
|
||||
string_format_dict = {'version' : release_version, 'hash': shortHash, 'package_repo_version' : package_repo_version, 'bucket': bucket}
|
||||
print(MAIL_TEMPLATE % string_format_dict)
|
||||
|
||||
print('')
|
||||
print('You can verify that pushing to the staging repository pushed all the artifacts by running (log into sonatype to find out the correct id):')
|
||||
print(' python -B dev-tools/validate-maven-repository.py %s https://oss.sonatype.org/service/local/repositories/orgelasticsearch-IDTOFINDOUT/content/org/elasticsearch ' %(localRepoElasticsearch))
|
||||
|
||||
print('')
|
||||
print('To publish the release and the repo on S3 execute the following commands:')
|
||||
print(' s3cmd cp --recursive s3://%(bucket)s/elasticsearch/staging/%(version)s-%(hash)s/repos/%(package_repo_version)s/ s3://packages.elasticsearch.org/elasticsearch/%(package_repo_version)s' % string_format_dict)
|
||||
print(' s3cmd cp --recursive s3://%(bucket)s/elasticsearch/staging/%(version)s-%(hash)s/org/ s3://%(bucket)s/elasticsearch/release/org' % string_format_dict)
|
||||
print('Now go ahead and tag the release:')
|
||||
print(' git tag -a v%(version)s %(hash)s' % string_format_dict)
|
||||
print(' git push origin v%(version)s' % string_format_dict )
|
||||
|
||||
|
|
@ -1,130 +0,0 @@
|
|||
# Licensed to Elasticsearch under one or more contributor
|
||||
# license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright
|
||||
# ownership. Elasticsearch licenses this file to you under
|
||||
# the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on
|
||||
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
|
||||
# either express or implied. See the License for the specific
|
||||
# language governing permissions and limitations under the License.
|
||||
|
||||
# Helper python script to check if a sonatype staging repo contains
|
||||
# all the required files compared to a local repository
|
||||
#
|
||||
# The script does the following steps
|
||||
#
|
||||
# 1. Scans the local maven repo for all files in /org/elasticsearch
|
||||
# 2. Opens a HTTP connection to the staging repo
|
||||
# 3. Executes a HEAD request for each file found in step one
|
||||
# 4. Compares the content-length response header with the real file size
|
||||
# 5. Return an error if those two numbers differ
|
||||
#
|
||||
# A pre requirement to run this, is to find out via the oss.sonatype.org web UI, how that repo is named
|
||||
# - After logging in you go to 'Staging repositories' and search for the one you just created
|
||||
# - Click into the `Content` tab
|
||||
# - Open any artifact (not a directory)
|
||||
# - Copy the link of `Repository Path` on the right and reuse that part of the URL
|
||||
#
|
||||
# Alternatively you can just use the name of the repository and reuse the rest (ie. the repository
|
||||
# named for the example below would have been named orgelasticsearch-1012)
|
||||
#
|
||||
#
|
||||
# Example call
|
||||
# python dev-tools/validate-maven-repository.py /path/to/repo/org/elasticsearch/ \
|
||||
# https://oss.sonatype.org/service/local/repositories/orgelasticsearch-1012/content/org/elasticsearch
|
||||
|
||||
import sys
|
||||
import os
|
||||
import httplib
|
||||
import urlparse
|
||||
import re
|
||||
|
||||
# Draw a simple progress bar, a couple of hundred HEAD requests might take a while
|
||||
# Note, when drawing this, it uses the carriage return character, so you should not
|
||||
# write anything in between
|
||||
def drawProgressBar(percent, barLen = 40):
|
||||
sys.stdout.write("\r")
|
||||
progress = ""
|
||||
for i in range(barLen):
|
||||
if i < int(barLen * percent):
|
||||
progress += "="
|
||||
else:
|
||||
progress += " "
|
||||
sys.stdout.write("[ %s ] %.2f%%" % (progress, percent * 100))
|
||||
sys.stdout.flush()
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) != 3:
|
||||
print 'Usage: %s <localRep> <stagingRepo> [user:pass]' % (sys.argv[0])
|
||||
print ''
|
||||
print 'Example: %s /tmp/my-maven-repo/org/elasticsearch https://oss.sonatype.org/service/local/repositories/orgelasticsearch-1012/content/org/elasticsearch' % (sys.argv[0])
|
||||
else:
|
||||
sys.argv[1] = re.sub('/$', '', sys.argv[1])
|
||||
sys.argv[2] = re.sub('/$', '', sys.argv[2])
|
||||
|
||||
localMavenRepo = sys.argv[1]
|
||||
endpoint = sys.argv[2]
|
||||
|
||||
filesToCheck = []
|
||||
foundSignedFiles = False
|
||||
|
||||
for root, dirs, files in os.walk(localMavenRepo):
|
||||
for file in files:
|
||||
# no metadata files (they get renamed from maven-metadata-local.xml to maven-metadata.xml while deploying)
|
||||
# no .properties and .repositories files (they don't get uploaded)
|
||||
if not file.startswith('maven-metadata') and not file.endswith('.properties') and not file.endswith('.repositories'):
|
||||
filesToCheck.append(os.path.join(root, file))
|
||||
if file.endswith('.asc'):
|
||||
foundSignedFiles = True
|
||||
|
||||
print "Need to check %i files" % len(filesToCheck)
|
||||
if not foundSignedFiles:
|
||||
print '### Warning: No signed .asc files found'
|
||||
|
||||
# set up http
|
||||
parsed_uri = urlparse.urlparse(endpoint)
|
||||
domain = parsed_uri.netloc
|
||||
if parsed_uri.scheme == 'https':
|
||||
conn = httplib.HTTPSConnection(domain)
|
||||
else:
|
||||
conn = httplib.HTTPConnection(domain)
|
||||
#conn.set_debuglevel(5)
|
||||
|
||||
drawProgressBar(0)
|
||||
errors = []
|
||||
for idx, file in enumerate(filesToCheck):
|
||||
request_uri = parsed_uri.path + file[len(localMavenRepo):]
|
||||
conn.request("HEAD", request_uri)
|
||||
res = conn.getresponse()
|
||||
res.read() # useless call for head, but prevents httplib.ResponseNotReady raise
|
||||
|
||||
absolute_url = parsed_uri.scheme + '://' + parsed_uri.netloc + request_uri
|
||||
if res.status == 200:
|
||||
content_length = res.getheader('content-length')
|
||||
local_file_size = os.path.getsize(file)
|
||||
if int(content_length) != int(local_file_size):
|
||||
errors.append('LENGTH MISMATCH: %s differs in size. local %s <=> %s remote' % (absolute_url, content_length, local_file_size))
|
||||
elif res.status == 404:
|
||||
errors.append('MISSING: %s' % absolute_url)
|
||||
elif res.status == 301 or res.status == 302:
|
||||
errors.append('REDIRECT: %s to %s' % (absolute_url, res.getheader('location')))
|
||||
else:
|
||||
errors.append('ERROR: %s http response: %s %s' %(absolute_url, res.status, res.reason))
|
||||
|
||||
# update progressbar at the end
|
||||
drawProgressBar((idx+1)/float(len(filesToCheck)))
|
||||
|
||||
print
|
||||
|
||||
if len(errors) != 0:
|
||||
print 'The following errors occurred (%s out of %s files)' % (len(errors), len(filesToCheck))
|
||||
print
|
||||
for error in errors:
|
||||
print error
|
||||
sys.exit(-1)
|
|
@ -178,7 +178,7 @@ integTest {
|
|||
configFile 'userdict_ja.txt'
|
||||
configFile 'KeywordTokenizer.rbbi'
|
||||
// Whitelist reindexing from the local node so we can test it.
|
||||
setting 'reindex.remote.whitelist', 'myself'
|
||||
setting 'reindex.remote.whitelist', '127.0.0.1:*'
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -81,12 +81,12 @@ documentation
|
|||
|
||||
[source,java]
|
||||
--------------------------------------------------
|
||||
SearchRequestBuilder srb1 = node.client()
|
||||
SearchRequestBuilder srb1 = client
|
||||
.prepareSearch().setQuery(QueryBuilders.queryStringQuery("elasticsearch")).setSize(1);
|
||||
SearchRequestBuilder srb2 = node.client()
|
||||
SearchRequestBuilder srb2 = client
|
||||
.prepareSearch().setQuery(QueryBuilders.matchQuery("name", "kimchy")).setSize(1);
|
||||
|
||||
MultiSearchResponse sr = node.client().prepareMultiSearch()
|
||||
MultiSearchResponse sr = client.prepareMultiSearch()
|
||||
.add(srb1)
|
||||
.add(srb2)
|
||||
.execute().actionGet();
|
||||
|
@ -107,7 +107,7 @@ The following code shows how to add two aggregations within your search:
|
|||
|
||||
[source,java]
|
||||
--------------------------------------------------
|
||||
SearchResponse sr = node.client().prepareSearch()
|
||||
SearchResponse sr = client.prepareSearch()
|
||||
.setQuery(QueryBuilders.matchAllQuery())
|
||||
.addAggregation(
|
||||
AggregationBuilders.terms("agg1").field("field")
|
||||
|
|
|
@ -268,6 +268,4 @@ params:: Optional. An object whose contents will be passed as variable
|
|||
"_agg" : {}
|
||||
}
|
||||
--------------------------------------------------
|
||||
reduce_params:: Optional. An object whose contents will be passed as variables to the `reduce_script`. This can be useful to allow the user to control
|
||||
the behavior of the reduce phase. If this is not specified the variable will be undefined in the reduce_script execution.
|
||||
|
||||
|
|
|
@ -219,6 +219,7 @@ Some examples are:
|
|||
`2015-01-01||+1M/d`:: `2015-01-01` plus one month, rounded down to the nearest day.
|
||||
|
||||
[float]
|
||||
[[common-options-response-filtering]]
|
||||
=== Response Filtering
|
||||
|
||||
All REST APIs accept a `filter_path` parameter that can be used to reduce
|
||||
|
|
|
@ -410,8 +410,8 @@ basic auth or the password will be sent in plain text.
|
|||
Remote hosts have to be explicitly whitelisted in elasticsearch.yaml using the
|
||||
`reindex.remote.whitelist` property. It can be set to a comma delimited list
|
||||
of allowed remote `host` and `port` combinations (e.g.
|
||||
`otherhost:9200, another:9200`). Scheme is ignored by the whitelist - only host
|
||||
and port are used.
|
||||
`otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*`). Scheme is
|
||||
ignored by the whitelist - only host and port are used.
|
||||
|
||||
This feature should work with remote clusters of any version of Elasticsearch
|
||||
you are likely to find. This should allow you to upgrade from any version of
|
||||
|
|
|
@ -24,7 +24,7 @@ index.search.slowlog.threshold.fetch.debug: 500ms
|
|||
index.search.slowlog.threshold.fetch.trace: 200ms
|
||||
--------------------------------------------------
|
||||
|
||||
All of the above settings are _dynamic_ and can be set per-index.
|
||||
All of the above settings are _dynamic_ and are set per-index.
|
||||
|
||||
By default, none are enabled (set to `-1`). Levels (`warn`, `info`,
|
||||
`debug`, `trace`) allow to control under which logging level the log
|
||||
|
@ -42,7 +42,7 @@ level.
|
|||
The logging file is configured by default using the following
|
||||
configuration (found in `log4j2.properties`):
|
||||
|
||||
[source,yaml]
|
||||
[source,properties]
|
||||
--------------------------------------------------
|
||||
appender.index_search_slowlog_rolling.type = RollingFile
|
||||
appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling
|
||||
|
@ -67,8 +67,8 @@ logger.index_search_slowlog_rolling.additivity = false
|
|||
|
||||
The indexing slow log, similar in functionality to the search slow
|
||||
log. The log file name ends with `_index_indexing_slowlog.log`. Log and
|
||||
the thresholds are configured in the elasticsearch.yml file in the same
|
||||
way as the search slowlog. Index slowlog sample:
|
||||
the thresholds are configured in the same way as the search slowlog.
|
||||
Index slowlog sample:
|
||||
|
||||
[source,yaml]
|
||||
--------------------------------------------------
|
||||
|
@ -80,23 +80,31 @@ index.indexing.slowlog.level: info
|
|||
index.indexing.slowlog.source: 1000
|
||||
--------------------------------------------------
|
||||
|
||||
All of the above settings are _dynamic_ and can be set per-index.
|
||||
All of the above settings are _dynamic_ and are set per-index.
|
||||
|
||||
By default Elasticsearch will log the first 1000 characters of the _source in
|
||||
the slowlog. You can change that with `index.indexing.slowlog.source`. Setting
|
||||
it to `false` or `0` will skip logging the source entirely an setting it to
|
||||
`true` will log the entire source regardless of size.
|
||||
|
||||
The index slow log file is configured by default in the `logging.yml`
|
||||
The index slow log file is configured by default in the `log4j2.properties`
|
||||
file:
|
||||
|
||||
[source,yaml]
|
||||
[source,properties]
|
||||
--------------------------------------------------
|
||||
index_indexing_slow_log_file:
|
||||
type: dailyRollingFile
|
||||
file: ${path.logs}/${cluster.name}_index_indexing_slowlog.log
|
||||
datePattern: "'.'yyyy-MM-dd"
|
||||
layout:
|
||||
type: pattern
|
||||
conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
|
||||
appender.index_indexing_slowlog_rolling.type = RollingFile
|
||||
appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling
|
||||
appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs}_index_indexing_slowlog.log
|
||||
appender.index_indexing_slowlog_rolling.layout.type = PatternLayout
|
||||
appender.index_indexing_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.10000m%n
|
||||
appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs}_index_indexing_slowlog-%d{yyyy-MM-dd}.log
|
||||
appender.index_indexing_slowlog_rolling.policies.type = Policies
|
||||
appender.index_indexing_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy
|
||||
appender.index_indexing_slowlog_rolling.policies.time.interval = 1
|
||||
appender.index_indexing_slowlog_rolling.policies.time.modulate = true
|
||||
|
||||
logger.index_indexing_slowlog.name = index.indexing.slowlog.index
|
||||
logger.index_indexing_slowlog.level = trace
|
||||
logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling
|
||||
logger.index_indexing_slowlog.additivity = false
|
||||
--------------------------------------------------
|
||||
|
|
|
@ -34,7 +34,7 @@ POST /logs_write/_rollover <2>
|
|||
// TEST[s/# Add > 1000 documents to logs-000001/POST _reindex?refresh\n{"source":{"index":"twitter"},"dest":{"index":"logs-000001"}}/]
|
||||
<1> Creates an index called `logs-0000001` with the alias `logs_write`.
|
||||
<2> If the index pointed to by `logs_write` was created 7 or more days ago, or
|
||||
contains 1,000 or more documents, then the `logs-0002` index is created
|
||||
contains 1,000 or more documents, then the `logs-000002` index is created
|
||||
and the `logs_write` alias is updated to point to `logs-000002`.
|
||||
|
||||
The above request might return the following response:
|
||||
|
|
|
@ -181,3 +181,50 @@ for indices of that start with `te*`, source will still be enabled.
|
|||
Note, for mappings, the merging is "deep", meaning that specific
|
||||
object/property based mappings can easily be added/overridden on higher
|
||||
order templates, with lower order templates providing the basis.
|
||||
|
||||
[float]
|
||||
[[versioning-templates]]
|
||||
=== Template Versioning
|
||||
|
||||
Templates can optionally add a `version` number, which can be any integer value,
|
||||
in order to simplify template management by external systems. The `version`
|
||||
field is completely optional and it is meant solely for external management of
|
||||
templates. To unset a `version`, simply replace the template without specifying
|
||||
one.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /_template/template_1
|
||||
{
|
||||
"template" : "*",
|
||||
"order" : 0,
|
||||
"settings" : {
|
||||
"number_of_shards" : 1
|
||||
},
|
||||
"version": 123
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
To check for the `version`, you can
|
||||
<<common-options-response-filtering, filter responses>>
|
||||
using `filter_path` to limit the response to just the `version`:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /_template/template_1?filter_path=*.version
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
This should give a small response that makes it both easy and inexpensive to parse:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"template_1" : {
|
||||
"version" : 123
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE
|
||||
|
|
|
@ -90,6 +90,56 @@ For each returned pipeline, the source and the version are returned.
|
|||
The version is useful for knowing which version of the pipeline the node has.
|
||||
You can specify multiple IDs to return more than one pipeline. Wildcards are also supported.
|
||||
|
||||
[float]
|
||||
[[versioning-pipelines]]
|
||||
==== Pipeline Versioning
|
||||
|
||||
Pipelines can optionally add a `version` number, which can be any integer value,
|
||||
in order to simplify pipeline management by external systems. The `version`
|
||||
field is completely optional and it is meant solely for external management of
|
||||
pipelines. To unset a `version`, simply replace the pipeline without specifying
|
||||
one.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT _ingest/pipeline/my-pipeline-id
|
||||
{
|
||||
"description" : "describe pipeline",
|
||||
"version" : 123,
|
||||
"processors" : [
|
||||
{
|
||||
"set" : {
|
||||
"field": "foo",
|
||||
"value": "bar"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
To check for the `version`, you can
|
||||
<<common-options-response-filtering, filter responses>>
|
||||
using `filter_path` to limit the response to just the `version`:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /_ingest/pipeline/my-pipeline-id?filter_path=*.version
|
||||
--------------------------------------------------
|
||||
// TEST[continued]
|
||||
|
||||
This should give a small response that makes it both easy and inexpensive to parse:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"my-pipeline-id" : {
|
||||
"version" : 123
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE
|
||||
|
||||
[[delete-pipeline-api]]
|
||||
=== Delete Pipeline API
|
||||
|
||||
|
@ -1382,14 +1432,16 @@ caching see <<modules-scripting-using-caching, Script Caching>>.
|
|||
.Script Options
|
||||
[options="header"]
|
||||
|======
|
||||
| Name | Required | Default | Description
|
||||
| `lang` | no | - | The scripting language
|
||||
| `file` | no | - | The script file to refer to
|
||||
| `id` | no | - | The stored script id to refer to
|
||||
| `inline` | no | - | An inline script to be executed
|
||||
| `params` | no | - | Script Parameters
|
||||
| Name | Required | Default | Description
|
||||
| `lang` | no | "painless" | The scripting language
|
||||
| `file` | no | - | The script file to refer to
|
||||
| `id` | no | - | The stored script id to refer to
|
||||
| `inline` | no | - | An inline script to be executed
|
||||
| `params` | no | - | Script Parameters
|
||||
|======
|
||||
|
||||
One of `file`, `id`, `inline` options must be provided in order to properly reference a script to execute.
|
||||
|
||||
You can access the current ingest document from within the script context by using the `ctx` variable.
|
||||
|
||||
The following example sets a new field called `field_a_plus_b_times_c` to be the sum of two existing
|
||||
|
@ -1639,4 +1691,4 @@ pipeline should be used:
|
|||
--------------------------------------------------
|
||||
|
||||
The reason for this is that Ingest doesn't know how to automatically cast
|
||||
a scalar field to an object field.
|
||||
a scalar field to an object field.
|
||||
|
|
|
@ -6,6 +6,31 @@ allocation of shards to nodes, cluster-level shard allocation filtering allows
|
|||
you to allow or disallow the allocation of shards from *any* index to
|
||||
particular nodes.
|
||||
|
||||
The available _dynamic_ cluster settings are as follows, where `{attribute}`
|
||||
refers to an arbitrary node attribute.:
|
||||
|
||||
`cluster.routing.allocation.include.{attribute}`::
|
||||
|
||||
Allocate shards to a node whose `{attribute}` has at least one of the
|
||||
comma-separated values.
|
||||
|
||||
`cluster.routing.allocation.require.{attribute}`::
|
||||
|
||||
Only allocate shards to a node whose `{attribute}` has _all_ of the
|
||||
comma-separated values.
|
||||
|
||||
`cluster.routing.allocation.exclude.{attribute}`::
|
||||
|
||||
Do not allocate shards to a node whose `{attribute}` has _none_ of the
|
||||
comma-separated values.
|
||||
|
||||
These special attributes are also supported:
|
||||
|
||||
[horizontal]
|
||||
`_name`:: Match nodes by node names
|
||||
`_ip`:: Match nodes by IP addresses (the IP address associated with the hostname)
|
||||
`_host`:: Match nodes by hostnames
|
||||
|
||||
The typical use case for cluster-wide shard allocation filtering is when you
|
||||
want to decommission a node, and you would like to move the shards from that
|
||||
node to other nodes in the cluster before shutting it down.
|
||||
|
@ -27,35 +52,8 @@ NOTE: Shards will only be relocated if it is possible to do so without
|
|||
breaking another routing constraint, such as never allocating a primary and
|
||||
replica shard to the same node.
|
||||
|
||||
Cluster-wide shard allocation filtering works in the same way as index-level
|
||||
shard allocation filtering (see <<index-modules-allocation>> for details).
|
||||
|
||||
The available _dynamic_ cluster settings are as follows, where `{attribute}`
|
||||
refers to an arbitrary node attribute.:
|
||||
|
||||
`cluster.routing.allocation.include.{attribute}`::
|
||||
|
||||
Assign the index to a node whose `{attribute}` has at least one of the
|
||||
comma-separated values.
|
||||
|
||||
`cluster.routing.allocation.require.{attribute}`::
|
||||
|
||||
Assign the index to a node whose `{attribute}` has _all_ of the
|
||||
comma-separated values.
|
||||
|
||||
`cluster.routing.allocation.exclude.{attribute}`::
|
||||
|
||||
Assign the index to a node whose `{attribute}` has _none_ of the
|
||||
comma-separated values.
|
||||
|
||||
These special attributes are also supported:
|
||||
|
||||
[horizontal]
|
||||
`_name`:: Match nodes by node name
|
||||
`_ip`:: Match nodes by IP address (the IP address associated with the hostname)
|
||||
`_host`:: Match nodes by hostname
|
||||
|
||||
All attribute values can be specified with wildcards, eg:
|
||||
In addition to listing multiple values as a comma-separated list, all
|
||||
attribute values can be specified with wildcards, eg:
|
||||
|
||||
[source,js]
|
||||
------------------------
|
||||
|
|
|
@ -203,7 +203,7 @@ For string `"abcd"`:
|
|||
===== Optional operators
|
||||
|
||||
These operators are available by default as the `flags` parameter defaults to `ALL`.
|
||||
Different flag combinations (concatened with `"\"`) can be used to enable/disable
|
||||
Different flag combinations (concatenated with `"|"`) can be used to enable/disable
|
||||
specific operators:
|
||||
|
||||
{
|
||||
|
|
|
@ -15,14 +15,14 @@ GET /_search
|
|||
},
|
||||
"highlight" : {
|
||||
"fields" : {
|
||||
"content" : {}
|
||||
"comment" : {}
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
In the above case, the `content` field will be highlighted for each
|
||||
In the above case, the `comment` field will be highlighted for each
|
||||
search hit (there will be another element in each search hit, called
|
||||
`highlight`, which includes the highlighted fields and the highlighted
|
||||
fragments).
|
||||
|
@ -71,14 +71,14 @@ natural languages, not as well with fields containing for instance html markup
|
|||
* Treats the document as the whole corpus, and scores individual sentences as
|
||||
if they were documents in this corpus, using the BM25 algorithm
|
||||
|
||||
Here is an example of setting the `content` field in the index mapping to allow for
|
||||
Here is an example of setting the `comment` field in the index mapping to allow for
|
||||
highlighting using the postings highlighter on it:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"type_name" : {
|
||||
"content" : {"index_options" : "offsets"}
|
||||
"comment" : {"index_options" : "offsets"}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
@ -113,7 +113,7 @@ will be used instead of the plain highlighter. The fast vector highlighter:
|
|||
for things like phrase matches being sorted above term matches when
|
||||
highlighting a Boosting Query that boosts phrase matches over term matches
|
||||
|
||||
Here is an example of setting the `content` field to allow for
|
||||
Here is an example of setting the `comment` field to allow for
|
||||
highlighting using the fast vector highlighter on it (this will cause
|
||||
the index to be bigger):
|
||||
|
||||
|
@ -121,7 +121,7 @@ the index to be bigger):
|
|||
--------------------------------------------------
|
||||
{
|
||||
"type_name" : {
|
||||
"content" : {"term_vector" : "with_positions_offsets"}
|
||||
"comment" : {"term_vector" : "with_positions_offsets"}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
@ -142,7 +142,7 @@ GET /_search
|
|||
},
|
||||
"highlight" : {
|
||||
"fields" : {
|
||||
"content" : {"type" : "plain"}
|
||||
"comment" : {"type" : "plain"}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -163,7 +163,7 @@ GET /_search
|
|||
},
|
||||
"highlight" : {
|
||||
"fields" : {
|
||||
"content" : {"force_source" : true}
|
||||
"comment" : {"force_source" : true}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -241,7 +241,7 @@ GET /_search
|
|||
"highlight" : {
|
||||
"tags_schema" : "styled",
|
||||
"fields" : {
|
||||
"content" : {}
|
||||
"comment" : {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -271,7 +271,7 @@ GET /_search
|
|||
},
|
||||
"highlight" : {
|
||||
"fields" : {
|
||||
"content" : {"fragment_size" : 150, "number_of_fragments" : 3}
|
||||
"comment" : {"fragment_size" : 150, "number_of_fragments" : 3}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -294,7 +294,7 @@ GET /_search
|
|||
"highlight" : {
|
||||
"order" : "score",
|
||||
"fields" : {
|
||||
"content" : {"fragment_size" : 150, "number_of_fragments" : 3}
|
||||
"comment" : {"fragment_size" : 150, "number_of_fragments" : 3}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -317,7 +317,7 @@ GET /_search
|
|||
"highlight" : {
|
||||
"fields" : {
|
||||
"_all" : {},
|
||||
"bio.title" : {"number_of_fragments" : 0}
|
||||
"blog.title" : {"number_of_fragments" : 0}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -345,7 +345,7 @@ GET /_search
|
|||
},
|
||||
"highlight" : {
|
||||
"fields" : {
|
||||
"content" : {
|
||||
"comment" : {
|
||||
"fragment_size" : 150,
|
||||
"number_of_fragments" : 3,
|
||||
"no_match_size": 150
|
||||
|
@ -375,7 +375,7 @@ GET /_search
|
|||
"stored_fields": [ "_id" ],
|
||||
"query" : {
|
||||
"match": {
|
||||
"content": {
|
||||
"comment": {
|
||||
"query": "foo bar"
|
||||
}
|
||||
}
|
||||
|
@ -385,7 +385,7 @@ GET /_search
|
|||
"query": {
|
||||
"rescore_query" : {
|
||||
"match_phrase": {
|
||||
"content": {
|
||||
"comment": {
|
||||
"query": "foo bar",
|
||||
"slop": 1
|
||||
}
|
||||
|
@ -397,21 +397,21 @@ GET /_search
|
|||
"highlight" : {
|
||||
"order" : "score",
|
||||
"fields" : {
|
||||
"content" : {
|
||||
"comment" : {
|
||||
"fragment_size" : 150,
|
||||
"number_of_fragments" : 3,
|
||||
"highlight_query": {
|
||||
"bool": {
|
||||
"must": {
|
||||
"match": {
|
||||
"content": {
|
||||
"comment": {
|
||||
"query": "foo bar"
|
||||
}
|
||||
}
|
||||
},
|
||||
"should": {
|
||||
"match_phrase": {
|
||||
"content": {
|
||||
"comment": {
|
||||
"query": "foo bar",
|
||||
"slop": 1,
|
||||
"boost": 10.0
|
||||
|
@ -452,9 +452,9 @@ GET /_search
|
|||
"fragment_size" : 150,
|
||||
"fields" : {
|
||||
"_all" : { "pre_tags" : ["<em>"], "post_tags" : ["</em>"] },
|
||||
"bio.title" : { "number_of_fragments" : 0 },
|
||||
"bio.author" : { "number_of_fragments" : 0 },
|
||||
"bio.content" : { "number_of_fragments" : 5, "order" : "score" }
|
||||
"blog.title" : { "number_of_fragments" : 0 },
|
||||
"blog.author" : { "number_of_fragments" : 0 },
|
||||
"blog.comment" : { "number_of_fragments" : 5, "order" : "score" }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -508,8 +508,8 @@ ways. All `matched_fields` must have `term_vector` set to
|
|||
combined is loaded so only that field would benefit from having
|
||||
`store` set to `yes`.
|
||||
|
||||
In the following examples `content` is analyzed by the `english`
|
||||
analyzer and `content.plain` is analyzed by the `standard` analyzer.
|
||||
In the following examples `comment` is analyzed by the `english`
|
||||
analyzer and `comment.plain` is analyzed by the `standard` analyzer.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
@ -517,15 +517,15 @@ GET /_search
|
|||
{
|
||||
"query": {
|
||||
"query_string": {
|
||||
"query": "content.plain:running scissors",
|
||||
"fields": ["content"]
|
||||
"query": "comment.plain:running scissors",
|
||||
"fields": ["comment"]
|
||||
}
|
||||
},
|
||||
"highlight": {
|
||||
"order": "score",
|
||||
"fields": {
|
||||
"content": {
|
||||
"matched_fields": ["content", "content.plain"],
|
||||
"comment": {
|
||||
"matched_fields": ["comment", "comment.plain"],
|
||||
"type" : "fvh"
|
||||
}
|
||||
}
|
||||
|
@ -546,14 +546,14 @@ GET /_search
|
|||
"query": {
|
||||
"query_string": {
|
||||
"query": "running scissors",
|
||||
"fields": ["content", "content.plain^10"]
|
||||
"fields": ["comment", "comment.plain^10"]
|
||||
}
|
||||
},
|
||||
"highlight": {
|
||||
"order": "score",
|
||||
"fields": {
|
||||
"content": {
|
||||
"matched_fields": ["content", "content.plain"],
|
||||
"comment": {
|
||||
"matched_fields": ["comment", "comment.plain"],
|
||||
"type" : "fvh"
|
||||
}
|
||||
}
|
||||
|
@ -572,14 +572,14 @@ GET /_search
|
|||
"query": {
|
||||
"query_string": {
|
||||
"query": "running scissors",
|
||||
"fields": ["content", "content.plain^10"]
|
||||
"fields": ["comment", "comment.plain^10"]
|
||||
}
|
||||
},
|
||||
"highlight": {
|
||||
"order": "score",
|
||||
"fields": {
|
||||
"content": {
|
||||
"matched_fields": ["content.plain"],
|
||||
"comment": {
|
||||
"matched_fields": ["comment.plain"],
|
||||
"type" : "fvh"
|
||||
}
|
||||
}
|
||||
|
@ -590,7 +590,7 @@ GET /_search
|
|||
|
||||
The above query wouldn't highlight "run" or "scissor" but shows that
|
||||
it is just fine not to list the field to which the matches are combined
|
||||
(`content`) in the matched fields.
|
||||
(`comment`) in the matched fields.
|
||||
|
||||
[NOTE]
|
||||
Technically it is also fine to add fields to `matched_fields` that
|
||||
|
@ -606,7 +606,7 @@ There is a small amount of overhead involved with setting
|
|||
--------------------------------------------------
|
||||
"highlight": {
|
||||
"fields": {
|
||||
"content": {}
|
||||
"comment": {}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
@ -615,8 +615,8 @@ to
|
|||
--------------------------------------------------
|
||||
"highlight": {
|
||||
"fields": {
|
||||
"content": {
|
||||
"matched_fields": ["content"],
|
||||
"comment": {
|
||||
"matched_fields": ["comment"],
|
||||
"type" : "fvh"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,6 +25,11 @@ subprojects {
|
|||
// for local ES plugins, the name of the plugin is the same as the directory
|
||||
name project.name
|
||||
}
|
||||
|
||||
run {
|
||||
// these cannot be run with the normal distribution, since they are included in it!
|
||||
distribution = 'integ-test-zip'
|
||||
}
|
||||
|
||||
if (project.file('src/main/packaging').exists()) {
|
||||
throw new InvalidModelException("Modules cannot contain packaging files")
|
||||
|
|
|
@ -69,6 +69,10 @@ public final class ScriptProcessor extends AbstractProcessor {
|
|||
return TYPE;
|
||||
}
|
||||
|
||||
Script getScript() {
|
||||
return script;
|
||||
}
|
||||
|
||||
public static final class Factory implements Processor.Factory {
|
||||
|
||||
private final ScriptService scriptService;
|
||||
|
@ -80,7 +84,7 @@ public final class ScriptProcessor extends AbstractProcessor {
|
|||
@Override
|
||||
public ScriptProcessor create(Map<String, Processor.Factory> registry, String processorTag,
|
||||
Map<String, Object> config) throws Exception {
|
||||
String lang = readStringProperty(TYPE, processorTag, config, "lang");
|
||||
String lang = readOptionalStringProperty(TYPE, processorTag, config, "lang");
|
||||
String inline = readOptionalStringProperty(TYPE, processorTag, config, "inline");
|
||||
String file = readOptionalStringProperty(TYPE, processorTag, config, "file");
|
||||
String id = readOptionalStringProperty(TYPE, processorTag, config, "id");
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
package org.elasticsearch.ingest.common;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.junit.Before;
|
||||
|
@ -29,18 +29,48 @@ import java.util.Collections;
|
|||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
||||
public class ScriptProcessorFactoryTests extends ESTestCase {
|
||||
|
||||
private ScriptProcessor.Factory factory;
|
||||
private static final Map<String, String> ingestScriptParamToType;
|
||||
static {
|
||||
Map<String, String> map = new HashMap<>();
|
||||
map.put("id", "stored");
|
||||
map.put("inline", "inline");
|
||||
map.put("file", "file");
|
||||
ingestScriptParamToType = Collections.unmodifiableMap(map);
|
||||
}
|
||||
|
||||
@Before
|
||||
public void init() {
|
||||
factory = new ScriptProcessor.Factory(mock(ScriptService.class));
|
||||
}
|
||||
|
||||
public void testFactoryValidationWithDefaultLang() throws Exception {
|
||||
Map<String, Object> configMap = new HashMap<>();
|
||||
String randomType = randomFrom("id", "inline", "file");
|
||||
configMap.put(randomType, "foo");
|
||||
ScriptProcessor processor = factory.create(null, randomAsciiOfLength(10), configMap);
|
||||
assertThat(processor.getScript().getLang(), equalTo(Script.DEFAULT_SCRIPT_LANG));
|
||||
assertThat(processor.getScript().getType().toString(), equalTo(ingestScriptParamToType.get(randomType)));
|
||||
assertThat(processor.getScript().getParams(), equalTo(Collections.emptyMap()));
|
||||
}
|
||||
|
||||
public void testFactoryValidationWithParams() throws Exception {
|
||||
Map<String, Object> configMap = new HashMap<>();
|
||||
String randomType = randomFrom("id", "inline", "file");
|
||||
Map<String, Object> randomParams = Collections.singletonMap(randomAsciiOfLength(10), randomAsciiOfLength(10));
|
||||
configMap.put(randomType, "foo");
|
||||
configMap.put("params", randomParams);
|
||||
ScriptProcessor processor = factory.create(null, randomAsciiOfLength(10), configMap);
|
||||
assertThat(processor.getScript().getLang(), equalTo(Script.DEFAULT_SCRIPT_LANG));
|
||||
assertThat(processor.getScript().getType().toString(), equalTo(ingestScriptParamToType.get(randomType)));
|
||||
assertThat(processor.getScript().getParams(), equalTo(randomParams));
|
||||
}
|
||||
|
||||
public void testFactoryValidationForMultipleScriptingTypes() throws Exception {
|
||||
Map<String, Object> configMap = new HashMap<>();
|
||||
|
|
|
@ -101,6 +101,12 @@ class org.elasticsearch.index.fielddata.ScriptDocValues.GeoPoints -> org.elastic
|
|||
double geohashDistanceWithDefault(String,double)
|
||||
}
|
||||
|
||||
class org.elasticsearch.index.fielddata.ScriptDocValues.Booleans -> org.elasticsearch.index.fielddata.ScriptDocValues$Booleans extends List,Collection,Iterable,Object {
|
||||
Boolean get(int)
|
||||
boolean getValue()
|
||||
List getValues()
|
||||
}
|
||||
|
||||
# for testing.
|
||||
# currently FeatureTest exposes overloaded constructor, field load store, and overloaded static methods
|
||||
class org.elasticsearch.painless.FeatureTest -> org.elasticsearch.painless.FeatureTest extends Object {
|
||||
|
|
|
@ -6,19 +6,19 @@
|
|||
index: test
|
||||
type: test
|
||||
id: 1
|
||||
body: { "test": "value beck", "num1": 1.0 }
|
||||
body: { "test": "value beck", "num1": 1.0, "bool": true }
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: test
|
||||
id: 2
|
||||
body: { "test": "value beck", "num1": 2.0 }
|
||||
body: { "test": "value beck", "num1": 2.0, "bool": false }
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: test
|
||||
id: 3
|
||||
body: { "test": "value beck", "num1": 3.0 }
|
||||
body: { "test": "value beck", "num1": 3.0, "bool": true }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
|
@ -95,6 +95,19 @@
|
|||
- match: { hits.hits.1.fields.sNum1.0: 2.0 }
|
||||
- match: { hits.hits.2.fields.sNum1.0: 3.0 }
|
||||
|
||||
- do:
|
||||
index: test
|
||||
search:
|
||||
body:
|
||||
query:
|
||||
script:
|
||||
script:
|
||||
inline: "doc['bool'].value == false"
|
||||
lang: painless
|
||||
|
||||
- match: { hits.total: 1 }
|
||||
- match: { hits.hits.0._id: "2" }
|
||||
|
||||
---
|
||||
|
||||
"Custom Script Boost":
|
||||
|
|
|
@ -26,13 +26,13 @@ esplugin {
|
|||
integTest {
|
||||
cluster {
|
||||
// Whitelist reindexing from the local node so we can test it.
|
||||
setting 'reindex.remote.whitelist', 'myself'
|
||||
setting 'reindex.remote.whitelist', '127.0.0.1:*'
|
||||
}
|
||||
}
|
||||
|
||||
run {
|
||||
// Whitelist reindexing from the local node so we can test it.
|
||||
setting 'reindex.remote.whitelist', 'myself'
|
||||
setting 'reindex.remote.whitelist', '127.0.0.1:*'
|
||||
}
|
||||
|
||||
dependencies {
|
||||
|
|
|
@ -28,6 +28,11 @@ import org.apache.http.impl.client.BasicCredentialsProvider;
|
|||
import org.apache.http.impl.nio.reactor.IOReactorConfig;
|
||||
import org.apache.http.message.BasicHeader;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.lucene.util.automaton.Automata;
|
||||
import org.apache.lucene.util.automaton.Automaton;
|
||||
import org.apache.lucene.util.automaton.CharacterRunAutomaton;
|
||||
import org.apache.lucene.util.automaton.MinimizationOperations;
|
||||
import org.apache.lucene.util.automaton.Operations;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.bulk.BackoffPolicy;
|
||||
|
@ -44,8 +49,10 @@ import org.elasticsearch.cluster.ClusterState;
|
|||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -65,11 +72,9 @@ import org.elasticsearch.threadpool.ThreadPool;
|
|||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.function.Function;
|
||||
|
@ -87,7 +92,7 @@ public class TransportReindexAction extends HandledTransportAction<ReindexReques
|
|||
private final ScriptService scriptService;
|
||||
private final AutoCreateIndex autoCreateIndex;
|
||||
private final Client client;
|
||||
private final Set<String> remoteWhitelist;
|
||||
private final CharacterRunAutomaton remoteWhitelist;
|
||||
private final HttpServer httpServer;
|
||||
|
||||
@Inject
|
||||
|
@ -100,7 +105,7 @@ public class TransportReindexAction extends HandledTransportAction<ReindexReques
|
|||
this.scriptService = scriptService;
|
||||
this.autoCreateIndex = autoCreateIndex;
|
||||
this.client = client;
|
||||
remoteWhitelist = new HashSet<>(REMOTE_CLUSTER_WHITELIST.get(settings));
|
||||
remoteWhitelist = buildRemoteWhitelist(REMOTE_CLUSTER_WHITELIST.get(settings));
|
||||
this.httpServer = httpServer;
|
||||
}
|
||||
|
||||
|
@ -128,22 +133,35 @@ public class TransportReindexAction extends HandledTransportAction<ReindexReques
|
|||
checkRemoteWhitelist(remoteWhitelist, remoteInfo, publishAddress);
|
||||
}
|
||||
|
||||
static void checkRemoteWhitelist(Set<String> whitelist, RemoteInfo remoteInfo, TransportAddress publishAddress) {
|
||||
if (remoteInfo == null) return;
|
||||
static void checkRemoteWhitelist(CharacterRunAutomaton whitelist, RemoteInfo remoteInfo, TransportAddress publishAddress) {
|
||||
if (remoteInfo == null) {
|
||||
return;
|
||||
}
|
||||
String check = remoteInfo.getHost() + ':' + remoteInfo.getPort();
|
||||
if (whitelist.contains(check)) return;
|
||||
/*
|
||||
* For testing we support the key "myself" to allow connecting to the local node. We can't just change the setting to include the
|
||||
* local node because it is intentionally not a dynamic setting for security purposes. We can't use something like "localhost:9200"
|
||||
* because we don't know up front which port we'll get because the tests bind to port 0. Instead we try to resolve it here, taking
|
||||
* "myself" to mean "my published http address".
|
||||
*/
|
||||
if (whitelist.contains("myself") && publishAddress != null && publishAddress.toString().equals(check)) {
|
||||
if (whitelist.run(check)) {
|
||||
return;
|
||||
}
|
||||
throw new IllegalArgumentException('[' + check + "] not whitelisted in " + REMOTE_CLUSTER_WHITELIST.getKey());
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the {@link CharacterRunAutomaton} that represents the reindex-from-remote whitelist and make sure that it doesn't whitelist
|
||||
* the world.
|
||||
*/
|
||||
static CharacterRunAutomaton buildRemoteWhitelist(List<String> whitelist) {
|
||||
if (whitelist.isEmpty()) {
|
||||
return new CharacterRunAutomaton(Automata.makeEmpty());
|
||||
}
|
||||
Automaton automaton = Regex.simpleMatchToAutomaton(whitelist.toArray(Strings.EMPTY_ARRAY));
|
||||
automaton = MinimizationOperations.minimize(automaton, Operations.DEFAULT_MAX_DETERMINIZED_STATES);
|
||||
if (Operations.isTotal(automaton)) {
|
||||
throw new IllegalArgumentException("Refusing to start because whitelist " + whitelist + " accepts all addresses. "
|
||||
+ "This would allow users to reindex-from-remote any URL they like effectively having Elasticsearch make HTTP GETs "
|
||||
+ "for them.");
|
||||
}
|
||||
return new CharacterRunAutomaton(automaton);
|
||||
}
|
||||
|
||||
/**
|
||||
* Throws an ActionRequestValidationException if the request tries to index
|
||||
* back into the same index or into an index that points to two indexes.
|
||||
|
|
|
@ -27,11 +27,14 @@ import org.junit.Before;
|
|||
|
||||
import java.net.InetAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
import static java.util.Collections.emptyList;
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.emptySet;
|
||||
import static java.util.Collections.singletonList;
|
||||
import static org.elasticsearch.index.reindex.TransportReindexAction.buildRemoteWhitelist;
|
||||
import static org.elasticsearch.index.reindex.TransportReindexAction.checkRemoteWhitelist;
|
||||
|
||||
/**
|
||||
|
@ -46,45 +49,89 @@ public class ReindexFromRemoteWhitelistTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testLocalRequestWithoutWhitelist() {
|
||||
checkRemoteWhitelist(emptySet(), null, localhostOrNone());
|
||||
checkRemoteWhitelist(buildRemoteWhitelist(emptyList()), null, localhostOrNone());
|
||||
}
|
||||
|
||||
public void testLocalRequestWithWhitelist() {
|
||||
checkRemoteWhitelist(randomWhitelist(), null, localhostOrNone());
|
||||
checkRemoteWhitelist(buildRemoteWhitelist(randomWhitelist()), null, localhostOrNone());
|
||||
}
|
||||
|
||||
public void testWhitelistedRemote() {
|
||||
Set<String> whitelist = randomWhitelist();
|
||||
List<String> whitelist = randomWhitelist();
|
||||
String[] inList = whitelist.iterator().next().split(":");
|
||||
String host = inList[0];
|
||||
int port = Integer.valueOf(inList[1]);
|
||||
checkRemoteWhitelist(whitelist, new RemoteInfo(randomAsciiOfLength(5), host, port, new BytesArray("test"), null, null, emptyMap()),
|
||||
checkRemoteWhitelist(buildRemoteWhitelist(whitelist),
|
||||
new RemoteInfo(randomAsciiOfLength(5), host, port, new BytesArray("test"), null, null, emptyMap()),
|
||||
localhostOrNone());
|
||||
}
|
||||
|
||||
public void testMyselfInWhitelistRemote() throws UnknownHostException {
|
||||
Set<String> whitelist = randomWhitelist();
|
||||
whitelist.add("myself");
|
||||
public void testWhitelistedByPrefix() {
|
||||
checkRemoteWhitelist(buildRemoteWhitelist(singletonList("*.example.com:9200")),
|
||||
new RemoteInfo(randomAsciiOfLength(5), "es.example.com", 9200, new BytesArray("test"), null, null, emptyMap()),
|
||||
localhostOrNone());
|
||||
checkRemoteWhitelist(buildRemoteWhitelist(singletonList("*.example.com:9200")),
|
||||
new RemoteInfo(randomAsciiOfLength(5), "6e134134a1.us-east-1.aws.example.com", 9200,
|
||||
new BytesArray("test"), null, null, emptyMap()),
|
||||
localhostOrNone());
|
||||
}
|
||||
|
||||
public void testWhitelistedBySuffix() {
|
||||
checkRemoteWhitelist(buildRemoteWhitelist(singletonList("es.example.com:*")),
|
||||
new RemoteInfo(randomAsciiOfLength(5), "es.example.com", 9200, new BytesArray("test"), null, null, emptyMap()),
|
||||
localhostOrNone());
|
||||
}
|
||||
|
||||
public void testWhitelistedByInfix() {
|
||||
checkRemoteWhitelist(buildRemoteWhitelist(singletonList("es*.example.com:9200")),
|
||||
new RemoteInfo(randomAsciiOfLength(5), "es1.example.com", 9200, new BytesArray("test"), null, null, emptyMap()),
|
||||
localhostOrNone());
|
||||
}
|
||||
|
||||
|
||||
public void testLoopbackInWhitelistRemote() throws UnknownHostException {
|
||||
List<String> whitelist = randomWhitelist();
|
||||
whitelist.add("127.0.0.1:*");
|
||||
TransportAddress publishAddress = new TransportAddress(InetAddress.getByAddress(new byte[] {0x7f,0x00,0x00,0x01}), 9200);
|
||||
checkRemoteWhitelist(whitelist,
|
||||
new RemoteInfo(randomAsciiOfLength(5), "127.0.0.1", 9200, new BytesArray("test"), null, null, emptyMap()), publishAddress);
|
||||
checkRemoteWhitelist(buildRemoteWhitelist(whitelist),
|
||||
new RemoteInfo(randomAsciiOfLength(5), "127.0.0.1", 9200, new BytesArray("test"), null, null, emptyMap()),
|
||||
publishAddress);
|
||||
}
|
||||
|
||||
public void testUnwhitelistedRemote() {
|
||||
int port = between(1, Integer.MAX_VALUE);
|
||||
RemoteInfo remoteInfo = new RemoteInfo(randomAsciiOfLength(5), "not in list", port, new BytesArray("test"), null, null, emptyMap());
|
||||
List<String> whitelist = randomBoolean() ? randomWhitelist() : emptyList();
|
||||
Exception e = expectThrows(IllegalArgumentException.class,
|
||||
() -> checkRemoteWhitelist(randomWhitelist(), remoteInfo, localhostOrNone()));
|
||||
() -> checkRemoteWhitelist(buildRemoteWhitelist(whitelist), remoteInfo, localhostOrNone()));
|
||||
assertEquals("[not in list:" + port + "] not whitelisted in reindex.remote.whitelist", e.getMessage());
|
||||
}
|
||||
|
||||
private Set<String> randomWhitelist() {
|
||||
public void testRejectMatchAll() {
|
||||
assertMatchesTooMuch(singletonList("*"));
|
||||
assertMatchesTooMuch(singletonList("**"));
|
||||
assertMatchesTooMuch(singletonList("***"));
|
||||
assertMatchesTooMuch(Arrays.asList("realstuff", "*"));
|
||||
assertMatchesTooMuch(Arrays.asList("*", "realstuff"));
|
||||
List<String> random = randomWhitelist();
|
||||
random.add("*");
|
||||
assertMatchesTooMuch(random);
|
||||
}
|
||||
|
||||
private void assertMatchesTooMuch(List<String> whitelist) {
|
||||
Exception e = expectThrows(IllegalArgumentException.class, () -> buildRemoteWhitelist(whitelist));
|
||||
assertEquals("Refusing to start because whitelist " + whitelist + " accepts all addresses. "
|
||||
+ "This would allow users to reindex-from-remote any URL they like effectively having Elasticsearch make HTTP GETs "
|
||||
+ "for them.", e.getMessage());
|
||||
}
|
||||
|
||||
private List<String> randomWhitelist() {
|
||||
int size = between(1, 100);
|
||||
Set<String> set = new HashSet<>(size);
|
||||
while (set.size() < size) {
|
||||
set.add(randomAsciiOfLength(5) + ':' + between(1, Integer.MAX_VALUE));
|
||||
List<String> whitelist = new ArrayList<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
whitelist.add(randomAsciiOfLength(5) + ':' + between(1, Integer.MAX_VALUE));
|
||||
}
|
||||
return set;
|
||||
return whitelist;
|
||||
}
|
||||
|
||||
private TransportAddress localhostOrNone() {
|
||||
|
|
|
@ -72,7 +72,7 @@ public class ReindexFromRemoteWithAuthTests extends ESSingleNodeTestCase {
|
|||
// Weird incantation required to test with netty
|
||||
settings.put(NetworkModule.HTTP_ENABLED.getKey(), true);
|
||||
// Whitelist reindexing from the http host we're going to use
|
||||
settings.put(TransportReindexAction.REMOTE_CLUSTER_WHITELIST.getKey(), "myself");
|
||||
settings.put(TransportReindexAction.REMOTE_CLUSTER_WHITELIST.getKey(), "127.0.0.1:*");
|
||||
settings.put(NetworkModule.HTTP_TYPE_KEY, Netty4Plugin.NETTY_HTTP_TRANSPORT_NAME);
|
||||
return settings.build();
|
||||
}
|
||||
|
|
|
@ -109,7 +109,7 @@ public class RetryTests extends ESSingleNodeTestCase {
|
|||
// Enable http so we can test retries on reindex from remote. In this case the "remote" cluster is just this cluster.
|
||||
settings.put(NetworkModule.HTTP_ENABLED.getKey(), true);
|
||||
// Whitelist reindexing from the http host we're going to use
|
||||
settings.put(TransportReindexAction.REMOTE_CLUSTER_WHITELIST.getKey(), "myself");
|
||||
settings.put(TransportReindexAction.REMOTE_CLUSTER_WHITELIST.getKey(), "127.0.0.1:*");
|
||||
if (useNetty3) {
|
||||
settings.put(NetworkModule.HTTP_TYPE_KEY, Netty3Plugin.NETTY_HTTP_TRANSPORT_NAME);
|
||||
settings.put(NetworkModule.TRANSPORT_TYPE_KEY, Netty3Plugin.NETTY_TRANSPORT_NAME);
|
||||
|
|
|
@ -35,7 +35,6 @@ import org.elasticsearch.cluster.ClusterState;
|
|||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.discovery.DiscoveryModule;
|
||||
import org.elasticsearch.repositories.RepositoryMissingException;
|
||||
import org.elasticsearch.repositories.RepositoryVerificationException;
|
||||
import org.elasticsearch.repositories.azure.AzureRepository.Repository;
|
||||
|
@ -75,14 +74,6 @@ public class AzureSnapshotRestoreTests extends AbstractAzureWithThirdPartyIntegT
|
|||
return testName.contains(" ") ? Strings.split(testName, " ")[0] : testName;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Settings nodeSettings(int nodeOrdinal) {
|
||||
return Settings.builder().put(super.nodeSettings(nodeOrdinal))
|
||||
// In snapshot tests, we explicitly disable cloud discovery
|
||||
.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local")
|
||||
.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Settings indexSettings() {
|
||||
// During restore we frequently restore index to exactly the same state it was before, that might cause the same
|
||||
|
|
|
@ -26,22 +26,23 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
|||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.network.NetworkModule;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.discovery.DiscoveryModule;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.node.MockNode;
|
||||
import org.elasticsearch.node.Node;
|
||||
import org.elasticsearch.node.NodeValidationException;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.InternalTestCluster;
|
||||
import org.elasticsearch.test.discovery.MockZenPing;
|
||||
import org.elasticsearch.transport.MockTcpTransportPlugin;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Collections;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.either;
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
|
@ -63,25 +64,25 @@ public class TribeUnitTests extends ESTestCase {
|
|||
Settings baseSettings = Settings.builder()
|
||||
.put(NetworkModule.HTTP_ENABLED.getKey(), false)
|
||||
.put("transport.type", MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME)
|
||||
.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local")
|
||||
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir())
|
||||
.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 2)
|
||||
.build();
|
||||
|
||||
final List<Class<? extends Plugin>> mockPlugins = Arrays.asList(MockTcpTransportPlugin.class, MockZenPing.TestPlugin.class);
|
||||
tribe1 = new TribeClientNode(
|
||||
Settings.builder()
|
||||
.put(baseSettings)
|
||||
.put("cluster.name", "tribe1")
|
||||
.put("node.name", "tribe1_node")
|
||||
.put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), random().nextLong())
|
||||
.build(), Collections.singleton(MockTcpTransportPlugin.class)).start();
|
||||
.build(), mockPlugins).start();
|
||||
tribe2 = new TribeClientNode(
|
||||
Settings.builder()
|
||||
.put(baseSettings)
|
||||
.put("cluster.name", "tribe2")
|
||||
.put("node.name", "tribe2_node")
|
||||
.put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), random().nextLong())
|
||||
.build(), Collections.singleton(MockTcpTransportPlugin.class)).start();
|
||||
.build(), mockPlugins).start();
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
|
@ -106,11 +107,10 @@ public class TribeUnitTests extends ESTestCase {
|
|||
.put("transport.type", MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME).put("discovery.type", "local")
|
||||
.put("tribe.t1.transport.type", MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME)
|
||||
.put("tribe.t2.transport.type",MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME)
|
||||
.put("tribe.t1.discovery.type", "local").put("tribe.t2.discovery.type", "local")
|
||||
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir())
|
||||
.put(extraSettings).build();
|
||||
|
||||
try (Node node = new MockNode(settings, Collections.singleton(MockTcpTransportPlugin.class)).start()) {
|
||||
try (Node node = new MockNode(settings, Arrays.asList(MockTcpTransportPlugin.class, MockZenPing.TestPlugin.class)).start()) {
|
||||
try (Client client = node.client()) {
|
||||
assertBusy(() -> {
|
||||
ClusterState state = client.admin().cluster().prepareState().clear().setNodes(true).get().getState();
|
||||
|
|
|
@ -9,7 +9,6 @@
|
|||
"processors": [
|
||||
{
|
||||
"script" : {
|
||||
"lang" : "painless",
|
||||
"inline": "ctx.bytes_total = (ctx.bytes_in + ctx.bytes_out) * params.factor",
|
||||
"params": {
|
||||
"factor": 10
|
||||
|
@ -48,7 +47,6 @@
|
|||
"processors": [
|
||||
{
|
||||
"script" : {
|
||||
"lang" : "painless",
|
||||
"file": "master"
|
||||
}
|
||||
}
|
||||
|
@ -94,7 +92,6 @@
|
|||
"processors": [
|
||||
{
|
||||
"script" : {
|
||||
"lang" : "painless",
|
||||
"id" : "sum_bytes"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,7 +37,6 @@ List<String> availableBoxes = [
|
|||
'sles-12',
|
||||
'ubuntu-1204',
|
||||
'ubuntu-1404',
|
||||
'ubuntu-1504',
|
||||
'ubuntu-1604'
|
||||
]
|
||||
|
||||
|
|
|
@ -0,0 +1,75 @@
|
|||
{
|
||||
"create": {
|
||||
"documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/docs-index_.html",
|
||||
"methods": ["PUT","POST"],
|
||||
"url": {
|
||||
"path": "/{index}/{type}/{id}/_create",
|
||||
"paths": ["/{index}/{type}/{id}/_create"],
|
||||
"parts": {
|
||||
"id": {
|
||||
"type" : "string",
|
||||
"required" : true,
|
||||
"description" : "Document ID"
|
||||
},
|
||||
"index": {
|
||||
"type" : "string",
|
||||
"required" : true,
|
||||
"description" : "The name of the index"
|
||||
},
|
||||
"type": {
|
||||
"type" : "string",
|
||||
"required" : true,
|
||||
"description" : "The type of the document"
|
||||
}
|
||||
},
|
||||
"params": {
|
||||
"wait_for_active_shards": {
|
||||
"type" : "string",
|
||||
"description" : "Sets the number of shard copies that must be active before proceeding with the index operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1)"
|
||||
},
|
||||
"parent": {
|
||||
"type" : "string",
|
||||
"description" : "ID of the parent document"
|
||||
},
|
||||
"refresh": {
|
||||
"type" : "enum",
|
||||
"options": ["true", "false", "wait_for"],
|
||||
"description" : "If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then do nothing with refreshes."
|
||||
},
|
||||
"routing": {
|
||||
"type" : "string",
|
||||
"description" : "Specific routing value"
|
||||
},
|
||||
"timeout": {
|
||||
"type" : "time",
|
||||
"description" : "Explicit operation timeout"
|
||||
},
|
||||
"timestamp": {
|
||||
"type" : "time",
|
||||
"description" : "Explicit timestamp for the document"
|
||||
},
|
||||
"ttl": {
|
||||
"type" : "time",
|
||||
"description" : "Expiration time for the document"
|
||||
},
|
||||
"version" : {
|
||||
"type" : "number",
|
||||
"description" : "Explicit version number for concurrency control"
|
||||
},
|
||||
"version_type": {
|
||||
"type" : "enum",
|
||||
"options" : ["internal", "external", "external_gte", "force"],
|
||||
"description" : "Specific version type"
|
||||
},
|
||||
"pipeline" : {
|
||||
"type" : "string",
|
||||
"description" : "The pipeline id to preprocess incoming documents with"
|
||||
}
|
||||
}
|
||||
},
|
||||
"body": {
|
||||
"description" : "The document",
|
||||
"required" : true
|
||||
}
|
||||
}
|
||||
}
|
|
@ -26,6 +26,7 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.InternalTestCluster;
|
||||
import org.elasticsearch.transport.MockTcpTransportPlugin;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Path;
|
||||
|
@ -46,13 +47,12 @@ public class NodeTests extends ESTestCase {
|
|||
.put(ClusterName.CLUSTER_NAME_SETTING.getKey(), InternalTestCluster.clusterName("single-node-cluster", randomLong()))
|
||||
.put(Environment.PATH_HOME_SETTING.getKey(), tempDir)
|
||||
.put(NetworkModule.HTTP_ENABLED.getKey(), false)
|
||||
.put("discovery.type", "local")
|
||||
.put("transport.type", "local")
|
||||
.put("transport.type", "mock-socket-network")
|
||||
.put(Node.NODE_DATA_SETTING.getKey(), true);
|
||||
if (name != null) {
|
||||
settings.put(Node.NODE_NAME_SETTING.getKey(), name);
|
||||
}
|
||||
try (Node node = new MockNode(settings.build(), Collections.emptyList())) {
|
||||
try (Node node = new MockNode(settings.build(), Collections.singleton(MockTcpTransportPlugin.class))) {
|
||||
final Settings nodeSettings = randomBoolean() ? node.settings() : node.getEnvironment().settings();
|
||||
if (name == null) {
|
||||
assertThat(Node.NODE_NAME_SETTING.get(nodeSettings), equalTo(node.getNodeEnvironment().nodeId().substring(0, 7)));
|
||||
|
|
|
@ -29,8 +29,6 @@ import org.elasticsearch.common.io.PathUtils;
|
|||
import org.elasticsearch.common.network.NetworkModule;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.discovery.DiscoveryModule;
|
||||
import org.elasticsearch.node.Node;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.junit.annotations.TestLogging;
|
||||
import org.elasticsearch.test.junit.listeners.LoggingListener;
|
||||
|
@ -206,6 +204,11 @@ public abstract class ESBackcompatTestCase extends ESIntegTestCase {
|
|||
return finalSettings.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean addMockZenPings() {
|
||||
return false;
|
||||
}
|
||||
|
||||
protected int minExternalNodes() { return 1; }
|
||||
|
||||
protected int maxExternalNodes() {
|
||||
|
@ -243,7 +246,6 @@ public abstract class ESBackcompatTestCase extends ESIntegTestCase {
|
|||
protected Settings commonNodeSettings(int nodeOrdinal) {
|
||||
Settings.Builder builder = Settings.builder().put(requiredSettings());
|
||||
builder.put(NetworkModule.TRANSPORT_TYPE_KEY, randomBoolean() ? "netty3" : "netty4"); // run same transport / disco as external
|
||||
builder.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "zen");
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
|
|
|
@ -28,15 +28,8 @@ import org.apache.lucene.util.LuceneTestCase;
|
|||
import org.apache.lucene.util.TestUtil;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.DocWriteResponse;
|
||||
import org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings;
|
||||
import org.elasticsearch.discovery.DiscoveryModule;
|
||||
import org.elasticsearch.client.RestClientBuilder;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.transport.AssertingTransportInterceptor;
|
||||
import org.elasticsearch.transport.MockTcpTransportPlugin;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.DocWriteResponse;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
||||
|
@ -65,6 +58,7 @@ import org.elasticsearch.client.AdminClient;
|
|||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.client.Requests;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.client.RestClientBuilder;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
|
@ -74,6 +68,7 @@ import org.elasticsearch.cluster.routing.IndexRoutingTable;
|
|||
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
|
@ -99,9 +94,10 @@ import org.elasticsearch.common.xcontent.XContentHelper;
|
|||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.discovery.zen.ZenDiscovery;
|
||||
import org.elasticsearch.discovery.zen.ElectMasterService;
|
||||
import org.elasticsearch.discovery.zen.ZenDiscovery;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexModule;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
|
@ -119,12 +115,16 @@ import org.elasticsearch.indices.store.IndicesStore;
|
|||
import org.elasticsearch.node.NodeMocksPlugin;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.search.MockSearchService;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.test.client.RandomizingClient;
|
||||
import org.elasticsearch.test.discovery.MockZenPing;
|
||||
import org.elasticsearch.test.disruption.ServiceDisruptionScheme;
|
||||
import org.elasticsearch.test.store.MockFSIndexStore;
|
||||
import org.elasticsearch.test.transport.MockTransportService;
|
||||
import org.elasticsearch.transport.AssertingTransportInterceptor;
|
||||
import org.elasticsearch.transport.MockTcpTransportPlugin;
|
||||
import org.hamcrest.Matchers;
|
||||
import org.junit.After;
|
||||
import org.junit.AfterClass;
|
||||
|
@ -1750,7 +1750,6 @@ public abstract class ESIntegTestCase extends ESTestCase {
|
|||
public Settings nodeSettings(int nodeOrdinal) {
|
||||
return Settings.builder()
|
||||
.put(NetworkModule.HTTP_ENABLED.getKey(), false)
|
||||
.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local")
|
||||
.put(networkSettings.build()).
|
||||
put(ESIntegTestCase.this.nodeSettings(nodeOrdinal)).build();
|
||||
}
|
||||
|
@ -1787,6 +1786,10 @@ public abstract class ESIntegTestCase extends ESTestCase {
|
|||
return true;
|
||||
}
|
||||
|
||||
protected boolean addMockZenPings() {
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a function that allows to wrap / filter all clients that are exposed by the test cluster. This is useful
|
||||
* for debugging or request / response pre and post processing. It also allows to intercept all calls done by the test
|
||||
|
@ -1823,6 +1826,10 @@ public abstract class ESIntegTestCase extends ESTestCase {
|
|||
if (addMockTransportService()) {
|
||||
mocks.add(MockTcpTransportPlugin.class);
|
||||
}
|
||||
|
||||
if (addMockZenPings()) {
|
||||
mocks.add(MockZenPing.TestPlugin.class);
|
||||
}
|
||||
mocks.add(TestSeedPlugin.class);
|
||||
return Collections.unmodifiableList(mocks);
|
||||
}
|
||||
|
|
|
@ -45,6 +45,7 @@ import org.elasticsearch.node.NodeValidationException;
|
|||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.test.discovery.MockZenPing;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.MockTcpTransportPlugin;
|
||||
import org.junit.After;
|
||||
|
@ -181,7 +182,6 @@ public abstract class ESSingleNodeTestCase extends ESTestCase {
|
|||
.put(ScriptService.SCRIPT_MAX_COMPILATIONS_PER_MINUTE.getKey(), 1000)
|
||||
.put(EsExecutors.PROCESSORS_SETTING.getKey(), 1) // limit the number of threads created
|
||||
.put(NetworkModule.HTTP_ENABLED.getKey(), false)
|
||||
.put("discovery.type", "local")
|
||||
.put("transport.type", MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME)
|
||||
.put(Node.NODE_DATA_SETTING.getKey(), true)
|
||||
.put(nodeSettings()) // allow test cases to provide their own settings or override these
|
||||
|
@ -191,6 +191,10 @@ public abstract class ESSingleNodeTestCase extends ESTestCase {
|
|||
plugins = new ArrayList<>(plugins);
|
||||
plugins.add(MockTcpTransportPlugin.class);
|
||||
}
|
||||
if (plugins.contains(MockZenPing.TestPlugin.class) == false) {
|
||||
plugins = new ArrayList<>(plugins);
|
||||
plugins.add(MockZenPing.TestPlugin.class);
|
||||
}
|
||||
Node build = new MockNode(settings, plugins);
|
||||
try {
|
||||
build.start();
|
||||
|
|
|
@ -25,7 +25,6 @@ import org.elasticsearch.common.network.NetworkModule;
|
|||
import org.elasticsearch.common.network.NetworkUtils;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.CollectionUtils;
|
||||
import org.elasticsearch.discovery.DiscoveryModule;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.test.InternalTestCluster;
|
||||
import org.elasticsearch.test.NodeConfigurationSource;
|
||||
|
@ -39,7 +38,7 @@ import java.util.Set;
|
|||
|
||||
public class ClusterDiscoveryConfiguration extends NodeConfigurationSource {
|
||||
|
||||
static Settings DEFAULT_NODE_SETTINGS = Settings.builder().put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "zen").build();
|
||||
static Settings DEFAULT_NODE_SETTINGS = Settings.EMPTY;
|
||||
private static final String IP_ADDR = "127.0.0.1";
|
||||
|
||||
final int numOfNodes;
|
||||
|
|
|
@ -0,0 +1,107 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.test.discovery;
|
||||
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.discovery.DiscoveryModule;
|
||||
import org.elasticsearch.discovery.zen.ping.PingContextProvider;
|
||||
import org.elasticsearch.discovery.zen.ping.ZenPing;
|
||||
import org.elasticsearch.plugins.DiscoveryPlugin;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* A {@link ZenPing} implementation which returns results based on an static in-memory map. This allows pinging
|
||||
* to be immediate and can be used to speed up tests.
|
||||
*/
|
||||
public final class MockZenPing extends AbstractLifecycleComponent implements ZenPing {
|
||||
|
||||
static final Map<ClusterName, Set<MockZenPing>> activeNodesPerCluster = ConcurrentCollections.newConcurrentMap();
|
||||
|
||||
private volatile PingContextProvider contextProvider;
|
||||
|
||||
@Inject
|
||||
public MockZenPing(Settings settings) {
|
||||
super(settings);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setPingContextProvider(PingContextProvider contextProvider) {
|
||||
this.contextProvider = contextProvider;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void ping(PingListener listener, TimeValue timeout) {
|
||||
logger.info("pinging using mock zen ping");
|
||||
List<PingResponse> responseList = getActiveNodesForCurrentCluster().stream()
|
||||
.filter(p -> p != this) // remove this as pings are not expected to return the local node
|
||||
.map(MockZenPing::getPingResponse)
|
||||
.collect(Collectors.toList());
|
||||
listener.onPing(responseList);
|
||||
}
|
||||
|
||||
private ClusterName getClusterName() {
|
||||
return contextProvider.clusterState().getClusterName();
|
||||
}
|
||||
|
||||
private PingResponse getPingResponse() {
|
||||
final ClusterState clusterState = contextProvider.clusterState();
|
||||
return new PingResponse(clusterState.nodes().getLocalNode(), clusterState.nodes().getMasterNode(), clusterState);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStart() {
|
||||
assert contextProvider != null;
|
||||
boolean added = getActiveNodesForCurrentCluster().add(this);
|
||||
assert added;
|
||||
}
|
||||
|
||||
private Set<MockZenPing> getActiveNodesForCurrentCluster() {
|
||||
return activeNodesPerCluster.computeIfAbsent(getClusterName(),
|
||||
clusterName -> ConcurrentCollections.newConcurrentSet());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStop() {
|
||||
boolean found = getActiveNodesForCurrentCluster().remove(this);
|
||||
assert found;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doClose() {
|
||||
|
||||
}
|
||||
|
||||
public static class TestPlugin extends Plugin implements DiscoveryPlugin {
|
||||
|
||||
public void onModule(DiscoveryModule discoveryModule) {
|
||||
discoveryModule.addZenPing(MockZenPing.class);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -27,13 +27,13 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
|||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.network.NetworkModule;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.discovery.DiscoveryModule;
|
||||
import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.InternalTestCluster;
|
||||
import org.elasticsearch.test.NodeConfigurationSource;
|
||||
import org.elasticsearch.transport.MockTcpTransport;
|
||||
import org.elasticsearch.test.discovery.MockZenPing;
|
||||
import org.elasticsearch.transport.MockTcpTransportPlugin;
|
||||
import org.elasticsearch.transport.TransportSettings;
|
||||
|
||||
|
@ -141,7 +141,6 @@ public class InternalTestClusterTests extends ESTestCase {
|
|||
NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(),
|
||||
2 * ((masterNodes ? InternalTestCluster.DEFAULT_HIGH_NUM_MASTER_NODES : 0) + maxNumDataNodes + numClientNodes))
|
||||
.put(NetworkModule.HTTP_ENABLED.getKey(), false)
|
||||
.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local")
|
||||
.put(NetworkModule.TRANSPORT_TYPE_KEY, MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME).build();
|
||||
}
|
||||
|
||||
|
@ -156,12 +155,13 @@ public class InternalTestClusterTests extends ESTestCase {
|
|||
String nodePrefix = "foobar";
|
||||
|
||||
Path baseDir = createTempDir();
|
||||
final List<Class<? extends Plugin>> mockPlugins = Arrays.asList(MockTcpTransportPlugin.class, MockZenPing.TestPlugin.class);
|
||||
InternalTestCluster cluster0 = new InternalTestCluster(clusterSeed, baseDir, masterNodes,
|
||||
minNumDataNodes, maxNumDataNodes, clusterName1, nodeConfigurationSource, numClientNodes,
|
||||
enableHttpPipelining, nodePrefix, Collections.singleton(MockTcpTransportPlugin.class), Function.identity());
|
||||
enableHttpPipelining, nodePrefix, mockPlugins, Function.identity());
|
||||
InternalTestCluster cluster1 = new InternalTestCluster(clusterSeed, baseDir, masterNodes,
|
||||
minNumDataNodes, maxNumDataNodes, clusterName2, nodeConfigurationSource, numClientNodes,
|
||||
enableHttpPipelining, nodePrefix, Collections.singleton(MockTcpTransportPlugin.class), Function.identity());
|
||||
enableHttpPipelining, nodePrefix, mockPlugins, Function.identity());
|
||||
|
||||
assertClusters(cluster0, cluster1, false);
|
||||
long seed = randomLong();
|
||||
|
@ -205,7 +205,6 @@ public class InternalTestClusterTests extends ESTestCase {
|
|||
NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(),
|
||||
2 + (masterNodes ? InternalTestCluster.DEFAULT_HIGH_NUM_MASTER_NODES : 0) + maxNumDataNodes + numClientNodes)
|
||||
.put(NetworkModule.TRANSPORT_TYPE_KEY, MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME)
|
||||
.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local")
|
||||
.build();
|
||||
}
|
||||
@Override
|
||||
|
@ -219,7 +218,8 @@ public class InternalTestClusterTests extends ESTestCase {
|
|||
Path baseDir = createTempDir();
|
||||
InternalTestCluster cluster = new InternalTestCluster(clusterSeed, baseDir, masterNodes,
|
||||
minNumDataNodes, maxNumDataNodes, clusterName1, nodeConfigurationSource, numClientNodes,
|
||||
enableHttpPipelining, nodePrefix, Collections.singleton(MockTcpTransportPlugin.class), Function.identity());
|
||||
enableHttpPipelining, nodePrefix, Arrays.asList(MockTcpTransportPlugin.class, MockZenPing.TestPlugin.class),
|
||||
Function.identity());
|
||||
try {
|
||||
cluster.beforeTest(random(), 0.0);
|
||||
final Map<String,Path[]> shardNodePaths = new HashMap<>();
|
||||
|
@ -288,7 +288,6 @@ public class InternalTestClusterTests extends ESTestCase {
|
|||
.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), numNodes)
|
||||
.put(NetworkModule.HTTP_ENABLED.getKey(), false)
|
||||
.put(NetworkModule.TRANSPORT_TYPE_KEY, MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME)
|
||||
.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local")
|
||||
.put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), 0).build();
|
||||
}
|
||||
|
||||
|
@ -297,7 +296,7 @@ public class InternalTestClusterTests extends ESTestCase {
|
|||
return Settings.builder()
|
||||
.put(NetworkModule.TRANSPORT_TYPE_KEY, MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME).build();
|
||||
}
|
||||
}, 0, randomBoolean(), "", Collections.singleton(MockTcpTransportPlugin.class), Function.identity());
|
||||
}, 0, randomBoolean(), "", Arrays.asList(MockTcpTransportPlugin.class, MockZenPing.TestPlugin.class), Function.identity());
|
||||
cluster.beforeTest(random(), 0.0);
|
||||
try {
|
||||
Map<DiscoveryNode.Role, Set<String>> pathsPerRole = new HashMap<>();
|
||||
|
|
Loading…
Reference in New Issue