diff --git a/TESTING.asciidoc b/TESTING.asciidoc index 40e148260f3..b117ba54810 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -347,7 +347,6 @@ These are the linux flavors the Vagrantfile currently supports: * ubuntu-1204 aka precise * ubuntu-1404 aka trusty -* ubuntu-1504 aka vivid * ubuntu-1604 aka xenial * debian-8 aka jessie, the current debian stable distribution * centos-6 diff --git a/Vagrantfile b/Vagrantfile index 8ade7c8dd30..96151724d13 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -30,13 +30,6 @@ Vagrant.configure(2) do |config| config.vm.box = "elastic/ubuntu-14.04-x86_64" ubuntu_common config end - config.vm.define "ubuntu-1504" do |config| - config.vm.box = "elastic/ubuntu-15.04-x86_64" - ubuntu_common config, extra: <<-SHELL - # Install Jayatana so we can work around it being present. - [ -f /usr/share/java/jayatanaag.jar ] || install jayatana - SHELL - end config.vm.define "ubuntu-1604" do |config| config.vm.box = "elastic/ubuntu-16.04-x86_64" ubuntu_common config, extra: <<-SHELL diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RunTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RunTask.groovy index f045c95740b..a71dc59dbf9 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RunTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RunTask.groovy @@ -16,6 +16,7 @@ public class RunTask extends DefaultTask { clusterConfig.httpPort = 9200 clusterConfig.transportPort = 9300 clusterConfig.daemonize = false + clusterConfig.distribution = 'zip' project.afterEvaluate { ClusterFormationTasks.setup(project, this, clusterConfig) } diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterState.java b/core/src/main/java/org/elasticsearch/cluster/ClusterState.java index e592b5092b7..7fc0a4d09b0 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -52,7 +52,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.discovery.Discovery; -import org.elasticsearch.discovery.local.LocalDiscovery; import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction; import java.io.IOException; @@ -72,8 +71,7 @@ import java.util.Set; * single thread and controlled by the {@link ClusterService}. After every update the * {@link Discovery#publish} method publishes new version of the cluster state to all other nodes in the * cluster. The actual publishing mechanism is delegated to the {@link Discovery#publish} method and depends on - * the type of discovery. For example, for local discovery it is implemented by the {@link LocalDiscovery#publish} - * method. In the Zen Discovery it is handled in the {@link PublishClusterStateAction#publish} method. The + * the type of discovery. In the Zen Discovery it is handled in the {@link PublishClusterStateAction#publish} method. The * publishing mechanism can be overridden by other discovery. *

* The cluster state implements the {@link Diffable} interface in order to support publishing of cluster state diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index 198d37971c6..377409dd86b 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -579,6 +579,7 @@ public class IndexNameExpressionResolver extends AbstractComponent { private Set innerResolve(Context context, List expressions, IndicesOptions options, MetaData metaData) { Set result = null; + boolean wildcardSeen = false; for (int i = 0; i < expressions.size(); i++) { String expression = expressions.get(i); if (aliasOrIndexExists(metaData, expression)) { @@ -598,13 +599,14 @@ public class IndexNameExpressionResolver extends AbstractComponent { } expression = expression.substring(1); } else if (expression.charAt(0) == '-') { - // if its the first, fill it with all the indices... - if (i == 0) { - List concreteIndices = resolveEmptyOrTrivialWildcard(options, metaData, false); - result = new HashSet<>(concreteIndices); + // if there is a negation without a wildcard being previously seen, add it verbatim, + // otherwise return the expression + if (wildcardSeen) { + add = false; + expression = expression.substring(1); + } else { + add = true; } - add = false; - expression = expression.substring(1); } if (result == null) { // add all the previous ones... @@ -634,6 +636,10 @@ public class IndexNameExpressionResolver extends AbstractComponent { if (!noIndicesAllowedOrMatches(options, matches)) { throw infe(expression); } + + if (Regex.isSimpleMatchPattern(expression)) { + wildcardSeen = true; + } } return result; } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java index 395d3472329..0e7159c857b 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java @@ -54,7 +54,8 @@ public class MaxRetryAllocationDecider extends AllocationDecider { @Override public Decision canAllocate(ShardRouting shardRouting, RoutingAllocation allocation) { - UnassignedInfo unassignedInfo = shardRouting.unassignedInfo(); + final UnassignedInfo unassignedInfo = shardRouting.unassignedInfo(); + final Decision decision; if (unassignedInfo != null && unassignedInfo.getNumFailedAllocations() > 0) { final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index()); final int maxRetry = SETTING_ALLOCATION_MAX_RETRY.get(indexMetaData.getSettings()); @@ -62,16 +63,21 @@ public class MaxRetryAllocationDecider extends AllocationDecider { // if we are called via the _reroute API we ignore the failure counter and try to allocate // this improves the usability since people don't need to raise the limits to issue retries since a simple _reroute call is // enough to manually retry. - return allocation.decision(Decision.YES, NAME, "shard has already failed allocating [" + decision = allocation.decision(Decision.YES, NAME, "shard has already failed allocating [" + unassignedInfo.getNumFailedAllocations() + "] times vs. [" + maxRetry + "] retries allowed " + unassignedInfo.toString() + " - retrying once on manual allocation"); } else if (unassignedInfo.getNumFailedAllocations() >= maxRetry) { - return allocation.decision(Decision.NO, NAME, "shard has already failed allocating [" + decision = allocation.decision(Decision.NO, NAME, "shard has already failed allocating [" + unassignedInfo.getNumFailedAllocations() + "] times vs. [" + maxRetry + "] retries allowed " + unassignedInfo.toString() + " - manually call [/_cluster/reroute?retry_failed=true] to retry"); + } else { + decision = allocation.decision(Decision.YES, NAME, "shard has already failed allocating [" + + unassignedInfo.getNumFailedAllocations() + "] times but [" + maxRetry + "] retries are allowed"); } + } else { + decision = allocation.decision(Decision.YES, NAME, "shard has no previous failures"); } - return allocation.decision(Decision.YES, NAME, "shard has no previous failures"); + return decision; } @Override diff --git a/core/src/main/java/org/elasticsearch/common/util/ExtensionPoint.java b/core/src/main/java/org/elasticsearch/common/util/ExtensionPoint.java index fcdfaafb1d5..a5dac12fab7 100644 --- a/core/src/main/java/org/elasticsearch/common/util/ExtensionPoint.java +++ b/core/src/main/java/org/elasticsearch/common/util/ExtensionPoint.java @@ -200,6 +200,10 @@ public abstract class ExtensionPoint { allocationMultibinder.addBinding().to(clazz); } } + + public boolean isEmpty() { + return extensions.isEmpty(); + } } /** diff --git a/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java b/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java index b41316b6534..1e41204d64a 100644 --- a/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java +++ b/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java @@ -25,7 +25,6 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.ExtensionPoint; -import org.elasticsearch.discovery.local.LocalDiscovery; import org.elasticsearch.discovery.zen.ElectMasterService; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.discovery.zen.ping.ZenPing; @@ -59,11 +58,9 @@ public class DiscoveryModule extends AbstractModule { public DiscoveryModule(Settings settings) { this.settings = settings; - addDiscoveryType("local", LocalDiscovery.class); + addDiscoveryType("none", NoneDiscovery.class); addDiscoveryType("zen", ZenDiscovery.class); addElectMasterService("zen", ElectMasterService.class); - // always add the unicast hosts, or things get angry! - addZenPing(UnicastZenPing.class); } /** @@ -113,7 +110,7 @@ public class DiscoveryModule extends AbstractModule { throw new IllegalArgumentException("Unknown Discovery type [" + discoveryType + "]"); } - if (discoveryType.equals("local") == false) { + if (discoveryType.equals("none") == false) { String masterServiceTypeKey = ZEN_MASTER_SERVICE_TYPE_SETTING.get(settings); final Class masterService = masterServiceType.get(masterServiceTypeKey); if (masterService == null) { @@ -130,6 +127,9 @@ public class DiscoveryModule extends AbstractModule { unicastHostProviders.getOrDefault(discoveryType, Collections.emptyList())) { unicastHostsProviderMultibinder.addBinding().to(unicastHostProvider); } + if (zenPings.isEmpty()) { + zenPings.registerExtension(UnicastZenPing.class); + } zenPings.bind(binder()); } bind(Discovery.class).to(discoveryClass).asEagerSingleton(); diff --git a/core/src/main/java/org/elasticsearch/discovery/NoneDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/NoneDiscovery.java new file mode 100644 index 00000000000..91b04ce396b --- /dev/null +++ b/core/src/main/java/org/elasticsearch/discovery/NoneDiscovery.java @@ -0,0 +1,102 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.discovery; + +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.zen.ElectMasterService; + +/** + * A {@link Discovery} implementation that is used by {@link org.elasticsearch.tribe.TribeService}. This implementation + * doesn't support any clustering features. Most notably {@link #startInitialJoin()} does nothing and + * {@link #publish(ClusterChangedEvent, AckListener)} is not supported. + */ +public class NoneDiscovery extends AbstractLifecycleComponent implements Discovery { + + private final ClusterService clusterService; + private final DiscoverySettings discoverySettings; + + @Inject + public NoneDiscovery(Settings settings, ClusterService clusterService, ClusterSettings clusterSettings) { + super(settings); + this.clusterService = clusterService; + this.discoverySettings = new DiscoverySettings(settings, clusterSettings); + } + + @Override + public DiscoveryNode localNode() { + return clusterService.localNode(); + } + + @Override + public String nodeDescription() { + return clusterService.getClusterName().value() + "/" + clusterService.localNode().getId(); + } + + @Override + public void setAllocationService(AllocationService allocationService) { + + } + + @Override + public void publish(ClusterChangedEvent clusterChangedEvent, AckListener ackListener) { + throw new UnsupportedOperationException(); + } + + @Override + public DiscoveryStats stats() { + return null; + } + + @Override + public DiscoverySettings getDiscoverySettings() { + return discoverySettings; + } + + @Override + public void startInitialJoin() { + + } + + @Override + public int getMinimumMasterNodes() { + return ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.get(settings); + } + + @Override + protected void doStart() { + + } + + @Override + protected void doStop() { + + } + + @Override + protected void doClose() { + + } +} diff --git a/core/src/main/java/org/elasticsearch/discovery/local/LocalDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/local/LocalDiscovery.java deleted file mode 100644 index 389f5ee03bb..00000000000 --- a/core/src/main/java/org/elasticsearch/discovery/local/LocalDiscovery.java +++ /dev/null @@ -1,422 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.discovery.local; - -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; -import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateUpdateTask; -import org.elasticsearch.cluster.Diff; -import org.elasticsearch.cluster.IncompatibleClusterStateVersionException; -import org.elasticsearch.cluster.block.ClusterBlocks; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.routing.allocation.AllocationService; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.discovery.AckClusterStatePublishResponseHandler; -import org.elasticsearch.discovery.BlockingClusterStatePublishResponseHandler; -import org.elasticsearch.discovery.Discovery; -import org.elasticsearch.discovery.DiscoverySettings; -import org.elasticsearch.discovery.DiscoveryStats; -import org.elasticsearch.discovery.zen.publish.PendingClusterStateStats; - -import java.util.HashSet; -import java.util.Optional; -import java.util.Queue; -import java.util.Set; -import java.util.concurrent.ConcurrentMap; - -import static org.elasticsearch.cluster.ClusterState.Builder; - -public class LocalDiscovery extends AbstractLifecycleComponent implements Discovery { - - private static final LocalDiscovery[] NO_MEMBERS = new LocalDiscovery[0]; - - private final ClusterService clusterService; - private AllocationService allocationService; - private final ClusterName clusterName; - - private final DiscoverySettings discoverySettings; - - private volatile boolean master = false; - - private static final ConcurrentMap clusterGroups = ConcurrentCollections.newConcurrentMap(); - - private volatile ClusterState lastProcessedClusterState; - - @Inject - public LocalDiscovery(Settings settings, ClusterService clusterService, ClusterSettings clusterSettings) { - super(settings); - this.clusterName = clusterService.getClusterName(); - this.clusterService = clusterService; - this.discoverySettings = new DiscoverySettings(settings, clusterSettings); - } - - @Override - public void setAllocationService(AllocationService allocationService) { - this.allocationService = allocationService; - } - - @Override - protected void doStart() { - - } - - @Override - public void startInitialJoin() { - synchronized (clusterGroups) { - ClusterGroup clusterGroup = clusterGroups.get(clusterName); - if (clusterGroup == null) { - clusterGroup = new ClusterGroup(); - clusterGroups.put(clusterName, clusterGroup); - } - logger.debug("Connected to cluster [{}]", clusterName); - - Optional current = clusterGroup.members().stream().filter(other -> ( - other.localNode().equals(this.localNode()) || other.localNode().getId().equals(this.localNode().getId()) - )).findFirst(); - if (current.isPresent()) { - throw new IllegalStateException("current cluster group already contains a node with the same id. current " - + current.get().localNode() + ", this node " + localNode()); - } - - clusterGroup.members().add(this); - - LocalDiscovery firstMaster = null; - for (LocalDiscovery localDiscovery : clusterGroup.members()) { - if (localDiscovery.localNode().isMasterNode()) { - firstMaster = localDiscovery; - break; - } - } - - if (firstMaster != null && firstMaster.equals(this)) { - // we are the first master (and the master) - master = true; - final LocalDiscovery master = firstMaster; - clusterService.submitStateUpdateTask("local-disco-initial_connect(master)", new ClusterStateUpdateTask() { - - @Override - public boolean runOnlyOnMaster() { - return false; - } - - @Override - public ClusterState execute(ClusterState currentState) { - DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(); - for (LocalDiscovery discovery : clusterGroups.get(clusterName).members()) { - nodesBuilder.add(discovery.localNode()); - } - nodesBuilder.localNodeId(master.localNode().getId()).masterNodeId(master.localNode().getId()); - // remove the NO_MASTER block in this case - ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()).removeGlobalBlock(discoverySettings.getNoMasterBlock()); - return ClusterState.builder(currentState).nodes(nodesBuilder).blocks(blocks).build(); - } - - @Override - public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); - } - }); - } else if (firstMaster != null) { - // tell the master to send the fact that we are here - final LocalDiscovery master = firstMaster; - firstMaster.clusterService.submitStateUpdateTask("local-disco-receive(from node[" + localNode() + "])", new ClusterStateUpdateTask() { - @Override - public boolean runOnlyOnMaster() { - return false; - } - - @Override - public ClusterState execute(ClusterState currentState) { - DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(); - for (LocalDiscovery discovery : clusterGroups.get(clusterName).members()) { - nodesBuilder.add(discovery.localNode()); - } - nodesBuilder.localNodeId(master.localNode().getId()).masterNodeId(master.localNode().getId()); - currentState = ClusterState.builder(currentState).nodes(nodesBuilder).build(); - return master.allocationService.reroute(currentState, "node_add"); - } - - @Override - public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); - } - - }); - } - } // else, no master node, the next node that will start will fill things in... - } - - @Override - protected void doStop() { - synchronized (clusterGroups) { - ClusterGroup clusterGroup = clusterGroups.get(clusterName); - if (clusterGroup == null) { - logger.warn("Illegal state, should not have an empty cluster group when stopping, I should be there at teh very least..."); - return; - } - clusterGroup.members().remove(this); - if (clusterGroup.members().isEmpty()) { - // no more members, remove and return - clusterGroups.remove(clusterName); - return; - } - - LocalDiscovery firstMaster = null; - for (LocalDiscovery localDiscovery : clusterGroup.members()) { - if (localDiscovery.localNode().isMasterNode()) { - firstMaster = localDiscovery; - break; - } - } - - if (firstMaster != null) { - // if the removed node is the master, make the next one as the master - if (master) { - firstMaster.master = true; - } - - final Set newMembers = new HashSet<>(); - for (LocalDiscovery discovery : clusterGroup.members()) { - newMembers.add(discovery.localNode().getId()); - } - - final LocalDiscovery master = firstMaster; - master.clusterService.submitStateUpdateTask("local-disco-update", new ClusterStateUpdateTask() { - @Override - public boolean runOnlyOnMaster() { - return false; - } - - @Override - public ClusterState execute(ClusterState currentState) { - DiscoveryNodes newNodes = currentState.nodes().removeDeadMembers(newMembers, master.localNode().getId()); - DiscoveryNodes.Delta delta = newNodes.delta(currentState.nodes()); - if (delta.added()) { - logger.warn("No new nodes should be created when a new discovery view is accepted"); - } - // reroute here, so we eagerly remove dead nodes from the routing - ClusterState updatedState = ClusterState.builder(currentState).nodes(newNodes).build(); - return master.allocationService.deassociateDeadNodes(updatedState, true, "node stopped"); - } - - @Override - public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); - } - }); - } - } - } - - @Override - protected void doClose() { - } - - @Override - public DiscoveryNode localNode() { - return clusterService.localNode(); - } - - @Override - public String nodeDescription() { - return clusterName.value() + "/" + localNode().getId(); - } - - @Override - public void publish(ClusterChangedEvent clusterChangedEvent, final Discovery.AckListener ackListener) { - if (!master) { - throw new IllegalStateException("Shouldn't publish state when not master"); - } - LocalDiscovery[] members = members(); - if (members.length > 0) { - Set nodesToPublishTo = new HashSet<>(members.length); - for (LocalDiscovery localDiscovery : members) { - if (localDiscovery.master) { - continue; - } - nodesToPublishTo.add(localDiscovery.localNode()); - } - publish(members, clusterChangedEvent, new AckClusterStatePublishResponseHandler(nodesToPublishTo, ackListener)); - } - } - - @Override - public DiscoveryStats stats() { - return new DiscoveryStats((PendingClusterStateStats)null); - } - - @Override - public DiscoverySettings getDiscoverySettings() { - return discoverySettings; - } - - @Override - public int getMinimumMasterNodes() { - return -1; - } - - private LocalDiscovery[] members() { - ClusterGroup clusterGroup = clusterGroups.get(clusterName); - if (clusterGroup == null) { - return NO_MEMBERS; - } - Queue members = clusterGroup.members(); - return members.toArray(new LocalDiscovery[members.size()]); - } - - private void publish(LocalDiscovery[] members, ClusterChangedEvent clusterChangedEvent, final BlockingClusterStatePublishResponseHandler publishResponseHandler) { - - try { - // we do the marshaling intentionally, to check it works well... - byte[] clusterStateBytes = null; - byte[] clusterStateDiffBytes = null; - - ClusterState clusterState = clusterChangedEvent.state(); - for (final LocalDiscovery discovery : members) { - if (discovery.master) { - continue; - } - ClusterState newNodeSpecificClusterState = null; - synchronized (this) { - // we do the marshaling intentionally, to check it works well... - // check if we published cluster state at least once and node was in the cluster when we published cluster state the last time - if (discovery.lastProcessedClusterState != null && clusterChangedEvent.previousState().nodes().nodeExists(discovery.localNode())) { - // both conditions are true - which means we can try sending cluster state as diffs - if (clusterStateDiffBytes == null) { - Diff diff = clusterState.diff(clusterChangedEvent.previousState()); - BytesStreamOutput os = new BytesStreamOutput(); - diff.writeTo(os); - clusterStateDiffBytes = BytesReference.toBytes(os.bytes()); - } - try { - newNodeSpecificClusterState = discovery.lastProcessedClusterState.readDiffFrom(StreamInput.wrap(clusterStateDiffBytes)).apply(discovery.lastProcessedClusterState); - logger.trace("sending diff cluster state version [{}] with size {} to [{}]", clusterState.version(), clusterStateDiffBytes.length, discovery.localNode().getName()); - } catch (IncompatibleClusterStateVersionException ex) { - logger.warn((Supplier) () -> new ParameterizedMessage("incompatible cluster state version [{}] - resending complete cluster state", clusterState.version()), ex); - } - } - if (newNodeSpecificClusterState == null) { - if (clusterStateBytes == null) { - clusterStateBytes = Builder.toBytes(clusterState); - } - newNodeSpecificClusterState = ClusterState.Builder.fromBytes(clusterStateBytes, discovery.localNode()); - } - discovery.lastProcessedClusterState = newNodeSpecificClusterState; - } - final ClusterState nodeSpecificClusterState = newNodeSpecificClusterState; - - nodeSpecificClusterState.status(ClusterState.ClusterStateStatus.RECEIVED); - // ignore cluster state messages that do not include "me", not in the game yet... - if (nodeSpecificClusterState.nodes().getLocalNode() != null) { - assert nodeSpecificClusterState.nodes().getMasterNode() != null : "received a cluster state without a master"; - assert !nodeSpecificClusterState.blocks().hasGlobalBlock(discoverySettings.getNoMasterBlock()) : "received a cluster state with a master block"; - - discovery.clusterService.submitStateUpdateTask("local-disco-receive(from master)", new ClusterStateUpdateTask() { - @Override - public boolean runOnlyOnMaster() { - return false; - } - - @Override - public ClusterState execute(ClusterState currentState) { - if (currentState.supersedes(nodeSpecificClusterState)) { - return currentState; - } - - if (currentState.blocks().hasGlobalBlock(discoverySettings.getNoMasterBlock())) { - // its a fresh update from the master as we transition from a start of not having a master to having one - logger.debug("got first state from fresh master [{}]", nodeSpecificClusterState.nodes().getMasterNodeId()); - return nodeSpecificClusterState; - } - - ClusterState.Builder builder = ClusterState.builder(nodeSpecificClusterState); - // if the routing table did not change, use the original one - if (nodeSpecificClusterState.routingTable().version() == currentState.routingTable().version()) { - builder.routingTable(currentState.routingTable()); - } - if (nodeSpecificClusterState.metaData().version() == currentState.metaData().version()) { - builder.metaData(currentState.metaData()); - } - - return builder.build(); - } - - @Override - public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); - publishResponseHandler.onFailure(discovery.localNode(), e); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - publishResponseHandler.onResponse(discovery.localNode()); - } - }); - } else { - publishResponseHandler.onResponse(discovery.localNode()); - } - } - - TimeValue publishTimeout = discoverySettings.getPublishTimeout(); - if (publishTimeout.millis() > 0) { - try { - boolean awaited = publishResponseHandler.awaitAllNodes(publishTimeout); - if (!awaited) { - DiscoveryNode[] pendingNodes = publishResponseHandler.pendingNodes(); - // everyone may have just responded - if (pendingNodes.length > 0) { - logger.warn("timed out waiting for all nodes to process published state [{}] (timeout [{}], pending nodes: {})", clusterState.version(), publishTimeout, pendingNodes); - } - } - } catch (InterruptedException e) { - // ignore & restore interrupt - Thread.currentThread().interrupt(); - } - } - - - } catch (Exception e) { - // failure to marshal or un-marshal - throw new IllegalStateException("Cluster state failed to serialize", e); - } - } - - private class ClusterGroup { - - private Queue members = ConcurrentCollections.newQueue(); - - Queue members() { - return members; - } - } -} diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java b/core/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java index c2479363840..2a173c755fe 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java @@ -100,8 +100,10 @@ public class PublishClusterStateAction extends AbstractComponent { this.discoverySettings = discoverySettings; this.clusterName = clusterName; this.pendingStatesQueue = new PendingClusterStatesQueue(logger, settings.getAsInt(SETTINGS_MAX_PENDING_CLUSTER_STATES, 25)); - transportService.registerRequestHandler(SEND_ACTION_NAME, BytesTransportRequest::new, ThreadPool.Names.SAME, new SendClusterStateRequestHandler()); - transportService.registerRequestHandler(COMMIT_ACTION_NAME, CommitClusterStateRequest::new, ThreadPool.Names.SAME, new CommitClusterStateRequestHandler()); + transportService.registerRequestHandler(SEND_ACTION_NAME, BytesTransportRequest::new, ThreadPool.Names.SAME, false, false, + new SendClusterStateRequestHandler()); + transportService.registerRequestHandler(COMMIT_ACTION_NAME, CommitClusterStateRequest::new, ThreadPool.Names.SAME, false, false, + new CommitClusterStateRequestHandler()); } public PendingClusterStatesQueue pendingStatesQueue() { diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java b/core/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java index 3991a37a8bf..403a1290546 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java @@ -291,4 +291,38 @@ public interface ScriptDocValues extends List { return geohashDistance(geohash); } } + + final class Booleans extends AbstractList implements ScriptDocValues { + + private final SortedNumericDocValues values; + + public Booleans(SortedNumericDocValues values) { + this.values = values; + } + + @Override + public void setNextDocId(int docId) { + values.setDocument(docId); + } + + @Override + public List getValues() { + return this; + } + + public boolean getValue() { + return values.count() != 0 && values.valueAt(0) == 1; + } + + @Override + public Boolean get(int index) { + return values.valueAt(index) == 1; + } + + @Override + public int size() { + return values.count(); + } + + } } diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/AtomicLongFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/AtomicLongFieldData.java index b3b0604e9e2..c52ccb90bed 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/AtomicLongFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/AtomicLongFieldData.java @@ -19,28 +19,24 @@ package org.elasticsearch.index.fielddata.plain; -import org.apache.lucene.index.DocValues; -import org.apache.lucene.index.SortedNumericDocValues; -import org.apache.lucene.util.Accountable; import org.elasticsearch.index.fielddata.AtomicNumericFieldData; import org.elasticsearch.index.fielddata.FieldData; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; -import java.util.Collection; -import java.util.Collections; - - /** * Specialization of {@link AtomicNumericFieldData} for integers. */ abstract class AtomicLongFieldData implements AtomicNumericFieldData { private final long ramBytesUsed; + /** True if this numeric data is for a boolean field, and so only has values 0 and 1. */ + private final boolean isBoolean; - AtomicLongFieldData(long ramBytesUsed) { + AtomicLongFieldData(long ramBytesUsed, boolean isBoolean) { this.ramBytesUsed = ramBytesUsed; + this.isBoolean = isBoolean; } @Override @@ -50,7 +46,11 @@ abstract class AtomicLongFieldData implements AtomicNumericFieldData { @Override public final ScriptDocValues getScriptValues() { - return new ScriptDocValues.Longs(getLongValues()); + if (isBoolean) { + return new ScriptDocValues.Booleans(getLongValues()); + } else { + return new ScriptDocValues.Longs(getLongValues()); + } } @Override @@ -63,24 +63,6 @@ abstract class AtomicLongFieldData implements AtomicNumericFieldData { return FieldData.castToDouble(getLongValues()); } - public static AtomicNumericFieldData empty(final int maxDoc) { - return new AtomicLongFieldData(0) { - - @Override - public SortedNumericDocValues getLongValues() { - return DocValues.emptySortedNumeric(maxDoc); - } - - @Override - public Collection getChildResources() { - return Collections.emptyList(); - } - - }; - } - @Override - public void close() { - } - + public void close() {} } diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/SortedNumericDVIndexFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/SortedNumericDVIndexFieldData.java index be877b9c68a..cf1fccabee0 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/SortedNumericDVIndexFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/SortedNumericDVIndexFieldData.java @@ -96,7 +96,7 @@ public class SortedNumericDVIndexFieldData extends DocValuesIndexFieldData imple case DOUBLE: return new SortedNumericDoubleFieldData(reader, field); default: - return new SortedNumericLongFieldData(reader, field); + return new SortedNumericLongFieldData(reader, field, numericType == NumericType.BOOLEAN); } } @@ -117,8 +117,8 @@ public class SortedNumericDVIndexFieldData extends DocValuesIndexFieldData imple final LeafReader reader; final String field; - SortedNumericLongFieldData(LeafReader reader, String field) { - super(0L); + SortedNumericLongFieldData(LeafReader reader, String field, boolean isBoolean) { + super(0L, isBoolean); this.reader = reader; this.field = field; } diff --git a/core/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java b/core/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java index ac21256acaa..a0ed868e51c 100644 --- a/core/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java +++ b/core/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java @@ -269,6 +269,7 @@ class InstallPluginCommand extends SettingCommand { URL url = new URL(urlString); Path zip = Files.createTempFile(tmpDir, null, ".zip"); URLConnection urlConnection = url.openConnection(); + urlConnection.addRequestProperty("User-Agent", "elasticsearch-plugin-installer"); int contentLength = urlConnection.getContentLength(); try (InputStream in = new TerminalProgressInputStream(urlConnection.getInputStream(), contentLength, terminal)) { // must overwrite since creating the temp file above actually created the file diff --git a/core/src/main/java/org/elasticsearch/transport/TransportService.java b/core/src/main/java/org/elasticsearch/transport/TransportService.java index 5eb6bf2fea7..18bb8770f7d 100644 --- a/core/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/core/src/main/java/org/elasticsearch/transport/TransportService.java @@ -114,14 +114,14 @@ public class TransportService extends AbstractLifecycleComponent { private final Logger tracerLog; volatile String[] tracerLogInclude; - volatile String[] tracelLogExclude; + volatile String[] tracerLogExclude; /** if set will call requests sent to this id to shortcut and executed locally */ volatile DiscoveryNode localNode = null; /** * Build the service. - * + * * @param clusterSettings if non null the the {@linkplain TransportService} will register with the {@link ClusterSettings} for settings * updates for {@link #TRACE_LOG_EXCLUDE_SETTING} and {@link #TRACE_LOG_INCLUDE_SETTING}. */ @@ -173,8 +173,8 @@ public class TransportService extends AbstractLifecycleComponent { this.tracerLogInclude = tracerLogInclude.toArray(Strings.EMPTY_ARRAY); } - void setTracerLogExclude(List tracelLogExclude) { - this.tracelLogExclude = tracelLogExclude.toArray(Strings.EMPTY_ARRAY); + void setTracerLogExclude(List tracerLogExclude) { + this.tracerLogExclude = tracerLogExclude.toArray(Strings.EMPTY_ARRAY); } @Override @@ -589,8 +589,8 @@ public class TransportService extends AbstractLifecycleComponent { return false; } } - if (tracelLogExclude.length > 0) { - return !Regex.simpleMatch(tracelLogExclude, action); + if (tracerLogExclude.length > 0) { + return !Regex.simpleMatch(tracerLogExclude, action); } return true; } diff --git a/core/src/main/java/org/elasticsearch/tribe/TribeService.java b/core/src/main/java/org/elasticsearch/tribe/TribeService.java index fd697340cd7..7871a0a6f39 100644 --- a/core/src/main/java/org/elasticsearch/tribe/TribeService.java +++ b/core/src/main/java/org/elasticsearch/tribe/TribeService.java @@ -124,7 +124,7 @@ public class TribeService extends AbstractLifecycleComponent { if (!NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.exists(settings)) { sb.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), nodesSettings.size()); } - sb.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local"); // a tribe node should not use zen discovery + sb.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "none"); // a tribe node should not use zen discovery // nothing is going to be discovered, since no master will be elected sb.put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), 0); if (sb.get("cluster.name") == null) { diff --git a/core/src/test/java/org/elasticsearch/VersionTests.java b/core/src/test/java/org/elasticsearch/VersionTests.java index 8ba5dc4868f..cc2f000fbae 100644 --- a/core/src/test/java/org/elasticsearch/VersionTests.java +++ b/core/src/test/java/org/elasticsearch/VersionTests.java @@ -97,40 +97,23 @@ public class VersionTests extends ESTestCase { } public void testTooLongVersionFromString() { - try { - Version.fromString("1.0.0.1.3"); - fail("Expected IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("needs to contain major, minor, and revision")); - } + Exception e = expectThrows(IllegalArgumentException.class, () -> Version.fromString("1.0.0.1.3")); + assertThat(e.getMessage(), containsString("needs to contain major, minor, and revision")); } public void testTooShortVersionFromString() { - try { - Version.fromString("1.0"); - fail("Expected IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("needs to contain major, minor, and revision")); - } - + Exception e = expectThrows(IllegalArgumentException.class, () -> Version.fromString("1.0")); + assertThat(e.getMessage(), containsString("needs to contain major, minor, and revision")); } public void testWrongVersionFromString() { - try { - Version.fromString("WRONG.VERSION"); - fail("Expected IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("needs to contain major, minor, and revision")); - } + Exception e = expectThrows(IllegalArgumentException.class, () -> Version.fromString("WRONG.VERSION")); + assertThat(e.getMessage(), containsString("needs to contain major, minor, and revision")); } public void testVersionNoPresentInSettings() { - try { - Version.indexCreated(Settings.builder().build()); - fail("Expected IllegalArgumentException"); - } catch (IllegalStateException e) { - assertThat(e.getMessage(), containsString("[index.version.created] is not present")); - } + Exception e = expectThrows(IllegalStateException.class, () -> Version.indexCreated(Settings.builder().build())); + assertThat(e.getMessage(), containsString("[index.version.created] is not present")); } public void testIndexCreatedVersion() { diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/template/BWCTemplateTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/template/BWCTemplateTests.java new file mode 100644 index 00000000000..69c6731aa15 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/template/BWCTemplateTests.java @@ -0,0 +1,53 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.template; + +import org.elasticsearch.test.ESSingleNodeTestCase; + +import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath; + +/** + * Rudimentary tests that the templates used by Logstash and Beats + * prior to their 5.x releases work for newly created indices + */ +public class BWCTemplateTests extends ESSingleNodeTestCase { + public void testBeatsTemplatesBWC() throws Exception { + String metricBeat = copyToStringFromClasspath("/org/elasticsearch/action/admin/indices/template/metricbeat-5.0.template.json"); + String packetBeat = copyToStringFromClasspath("/org/elasticsearch/action/admin/indices/template/packetbeat-5.0.template.json"); + String fileBeat = copyToStringFromClasspath("/org/elasticsearch/action/admin/indices/template/filebeat-5.0.template.json"); + String winLogBeat = copyToStringFromClasspath("/org/elasticsearch/action/admin/indices/template/winlogbeat-5.0.template.json"); + client().admin().indices().preparePutTemplate("metricbeat").setSource(metricBeat).get(); + client().admin().indices().preparePutTemplate("packetbeat").setSource(packetBeat).get(); + client().admin().indices().preparePutTemplate("filebeat").setSource(fileBeat).get(); + client().admin().indices().preparePutTemplate("winlogbeat").setSource(winLogBeat).get(); + + client().prepareIndex("metricbeat-foo", "doc", "1").setSource("message", "foo").get(); + client().prepareIndex("packetbeat-foo", "doc", "1").setSource("message", "foo").get(); + client().prepareIndex("filebeat-foo", "doc", "1").setSource("message", "foo").get(); + client().prepareIndex("winlogbeat-foo", "doc", "1").setSource("message", "foo").get(); + } + + public void testLogstashTemplatesBWC() throws Exception { + String ls5x = copyToStringFromClasspath("/org/elasticsearch/action/admin/indices/template/logstash-5.0.template.json"); + client().admin().indices().preparePutTemplate("logstash-5x").setSource(ls5x).get(); + client().prepareIndex("logstash-foo", "doc", "1").setSource("message", "foo").get(); + } + +} diff --git a/core/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java b/core/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java index 29dd2a150a3..38328a80054 100644 --- a/core/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java +++ b/core/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java @@ -22,7 +22,6 @@ package org.elasticsearch.action.support.master; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.ElectMasterService; import org.elasticsearch.discovery.zen.fd.FaultDetection; @@ -46,6 +45,11 @@ import static org.hamcrest.Matchers.equalTo; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) public class IndexingMasterFailoverIT extends ESIntegTestCase { + @Override + protected boolean addMockZenPings() { + return false; + } + @Override protected Collection> nodePlugins() { final HashSet> classes = new HashSet<>(super.nodePlugins()); @@ -62,7 +66,6 @@ public class IndexingMasterFailoverIT extends ESIntegTestCase { logger.info("--> start 4 nodes, 3 master, 1 data"); final Settings sharedSettings = Settings.builder() - .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "zen") .put(FaultDetection.PING_TIMEOUT_SETTING.getKey(), "1s") // for hitting simulated network failures quickly .put(FaultDetection.PING_RETRIES_SETTING.getKey(), "1") // for hitting simulated network failures quickly .put("discovery.zen.join_timeout", "10s") // still long to induce failures but to long so test won't time out diff --git a/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java b/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java index 61ce8eaf251..96e90cedf7f 100644 --- a/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java @@ -137,6 +137,7 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase { THREAD_POOL = new TestThreadPool(TransportInstanceSingleOperationActionTests.class.getSimpleName()); } + @Override @Before public void setUp() throws Exception { super.setUp(); @@ -156,6 +157,7 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase { ); } + @Override @After public void tearDown() throws Exception { super.tearDown(); diff --git a/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java b/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java index de0705dcab2..4bbe0b01850 100644 --- a/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java @@ -226,12 +226,8 @@ public class UpdateRequestTests extends ESTestCase { // Related to issue #15822 public void testInvalidBodyThrowsParseException() throws Exception { UpdateRequest request = new UpdateRequest("test", "type", "1"); - try { - request.fromXContent(new byte[] { (byte) '"' }); - fail("Should have thrown a ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), equalTo("Failed to derive xcontent")); - } + Exception e = expectThrows(ElasticsearchParseException.class, () -> request.fromXContent(new byte[] { (byte) '"' })); + assertThat(e.getMessage(), equalTo("Failed to derive xcontent")); } // Related to issue 15338 diff --git a/core/src/test/java/org/elasticsearch/client/transport/TransportClientIT.java b/core/src/test/java/org/elasticsearch/client/transport/TransportClientIT.java index 93c5e29208c..dbb066dcb1b 100644 --- a/core/src/test/java/org/elasticsearch/client/transport/TransportClientIT.java +++ b/core/src/test/java/org/elasticsearch/client/transport/TransportClientIT.java @@ -32,12 +32,13 @@ import org.elasticsearch.node.NodeValidationException; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; +import org.elasticsearch.test.discovery.MockZenPing; import org.elasticsearch.transport.MockTcpTransportPlugin; import org.elasticsearch.transport.MockTransportClient; import org.elasticsearch.transport.TransportService; import java.io.IOException; -import java.util.Collections; +import java.util.Arrays; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -65,7 +66,7 @@ public class TransportClientIT extends ESIntegTestCase { .put(NetworkModule.HTTP_ENABLED.getKey(), false) .put(Node.NODE_DATA_SETTING.getKey(), false) .put("cluster.name", "foobar") - .build(), Collections.singleton(MockTcpTransportPlugin.class)).start()) { + .build(), Arrays.asList(MockTcpTransportPlugin.class, MockZenPing.TestPlugin.class)).start()) { TransportAddress transportAddress = node.injector().getInstance(TransportService.class).boundAddress().publishAddress(); client.addTransportAddress(transportAddress); // since we force transport clients there has to be one node started that we connect to. diff --git a/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java b/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java index b8e95343932..83472a1edc4 100644 --- a/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java @@ -28,7 +28,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.Discovery; -import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.ElectMasterService; import org.elasticsearch.discovery.zen.ZenDiscovery; @@ -75,15 +74,13 @@ public class MinimumMasterNodesIT extends ESIntegTestCase { } @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder().put(super.nodeSettings(nodeOrdinal)) - .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "zen").build(); + protected boolean addMockZenPings() { + return false; } public void testSimpleMinimumMasterNodes() throws Exception { Settings settings = Settings.builder() - .put("discovery.type", "zen") .put("discovery.zen.minimum_master_nodes", 2) .put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "200ms") .put("discovery.initial_state_timeout", "500ms") @@ -195,7 +192,6 @@ public class MinimumMasterNodesIT extends ESIntegTestCase { public void testMultipleNodesShutdownNonMasterNodes() throws Exception { Settings settings = Settings.builder() - .put("discovery.type", "zen") .put("discovery.zen.minimum_master_nodes", 3) .put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "1s") .put("discovery.initial_state_timeout", "500ms") @@ -271,7 +267,6 @@ public class MinimumMasterNodesIT extends ESIntegTestCase { public void testDynamicUpdateMinimumMasterNodes() throws Exception { Settings settings = Settings.builder() - .put("discovery.type", "zen") .put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "400ms") .put("discovery.initial_state_timeout", "500ms") .build(); @@ -329,7 +324,6 @@ public class MinimumMasterNodesIT extends ESIntegTestCase { public void testCanNotBringClusterDown() throws ExecutionException, InterruptedException { int nodeCount = scaledRandomIntBetween(1, 5); Settings.Builder settings = Settings.builder() - .put("discovery.type", "zen") .put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "200ms") .put("discovery.initial_state_timeout", "500ms"); @@ -368,7 +362,6 @@ public class MinimumMasterNodesIT extends ESIntegTestCase { public void testCanNotPublishWithoutMinMastNodes() throws Exception { Settings settings = Settings.builder() - .put("discovery.type", "zen") .put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "200ms") .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2) .put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "100ms") // speed things up diff --git a/core/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java b/core/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java index 4d62f9b664f..c033ad7ff27 100644 --- a/core/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java @@ -22,7 +22,6 @@ package org.elasticsearch.cluster; import org.apache.lucene.search.join.ScoreMode; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.MasterNotDiscoveredException; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.node.Node; @@ -41,18 +40,9 @@ import static org.hamcrest.Matchers.nullValue; @ClusterScope(scope = Scope.TEST, numDataNodes = 0) public class SpecificMasterNodesIT extends ESIntegTestCase { - @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder().put(super.nodeSettings(nodeOrdinal)) - .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "zen").build(); - } - protected final Settings.Builder settingsBuilder() { - return Settings.builder().put("discovery.type", "zen"); - } - public void testSimpleOnlyMasterNodeElection() throws IOException { logger.info("--> start data node / non master node"); - internalCluster().startNode(settingsBuilder().put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false).put("discovery.initial_state_timeout", "1s")); + internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false).put("discovery.initial_state_timeout", "1s")); try { assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("100ms").execute().actionGet().getState().nodes().getMasterNodeId(), nullValue()); fail("should not be able to find master"); @@ -60,7 +50,7 @@ public class SpecificMasterNodesIT extends ESIntegTestCase { // all is well, no master elected } logger.info("--> start master node"); - final String masterNodeName = internalCluster().startNode(settingsBuilder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); + final String masterNodeName = internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); assertThat(internalCluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); @@ -75,14 +65,14 @@ public class SpecificMasterNodesIT extends ESIntegTestCase { } logger.info("--> start master node"); - final String nextMasterEligibleNodeName = internalCluster().startNode(settingsBuilder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); + final String nextMasterEligibleNodeName = internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligibleNodeName)); assertThat(internalCluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligibleNodeName)); } public void testElectOnlyBetweenMasterNodes() throws IOException { logger.info("--> start data node / non master node"); - internalCluster().startNode(settingsBuilder().put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false).put("discovery.initial_state_timeout", "1s")); + internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false).put("discovery.initial_state_timeout", "1s")); try { assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("100ms").execute().actionGet().getState().nodes().getMasterNodeId(), nullValue()); fail("should not be able to find master"); @@ -90,12 +80,12 @@ public class SpecificMasterNodesIT extends ESIntegTestCase { // all is well, no master elected } logger.info("--> start master node (1)"); - final String masterNodeName = internalCluster().startNode(settingsBuilder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); + final String masterNodeName = internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); assertThat(internalCluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); logger.info("--> start master node (2)"); - final String nextMasterEligableNodeName = internalCluster().startNode(settingsBuilder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); + final String nextMasterEligableNodeName = internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); assertThat(internalCluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); @@ -112,10 +102,10 @@ public class SpecificMasterNodesIT extends ESIntegTestCase { */ public void testCustomDefaultMapping() throws Exception { logger.info("--> start master node / non data"); - internalCluster().startNode(settingsBuilder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); + internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); logger.info("--> start data node / non master node"); - internalCluster().startNode(settingsBuilder().put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false)); + internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false)); createIndex("test"); assertAcked(client().admin().indices().preparePutMapping("test").setType("_default_").setSource("timestamp", "type=date")); @@ -134,10 +124,10 @@ public class SpecificMasterNodesIT extends ESIntegTestCase { public void testAliasFilterValidation() throws Exception { logger.info("--> start master node / non data"); - internalCluster().startNode(settingsBuilder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); + internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); logger.info("--> start data node / non master node"); - internalCluster().startNode(settingsBuilder().put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false)); + internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false)); assertAcked(prepareCreate("test").addMapping("type1", "{\"type1\" : {\"properties\" : {\"table_a\" : { \"type\" : \"nested\", \"properties\" : {\"field_a\" : { \"type\" : \"keyword\" },\"field_b\" :{ \"type\" : \"keyword\" }}}}}}")); client().admin().indices().prepareAliases().addAlias("test", "a_test", QueryBuilders.nestedQuery("table_a", QueryBuilders.termQuery("table_a.field_b", "y"), ScoreMode.Avg)).get(); diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/DateMathExpressionResolverTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/DateMathExpressionResolverTests.java index 0215f94d4da..e47ca2184f8 100644 --- a/core/src/test/java/org/elasticsearch/cluster/metadata/DateMathExpressionResolverTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/DateMathExpressionResolverTests.java @@ -177,43 +177,31 @@ public class DateMathExpressionResolverTests extends ESTestCase { } public void testExpressionInvalidUnescaped() throws Exception { - try { - expressionResolver.resolve(context, Arrays.asList("<.mar}vel-{now/d}>")); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), containsString("invalid dynamic name expression")); - assertThat(e.getMessage(), containsString("invalid character at position [")); - } + Exception e = expectThrows(ElasticsearchParseException.class, + () -> expressionResolver.resolve(context, Arrays.asList("<.mar}vel-{now/d}>"))); + assertThat(e.getMessage(), containsString("invalid dynamic name expression")); + assertThat(e.getMessage(), containsString("invalid character at position [")); } public void testExpressionInvalidDateMathFormat() throws Exception { - try { - expressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{}>")); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), containsString("invalid dynamic name expression")); - assertThat(e.getMessage(), containsString("date math placeholder is open ended")); - } + Exception e = expectThrows(ElasticsearchParseException.class, + () -> expressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{}>"))); + assertThat(e.getMessage(), containsString("invalid dynamic name expression")); + assertThat(e.getMessage(), containsString("date math placeholder is open ended")); } public void testExpressionInvalidEmptyDateMathFormat() throws Exception { - try { - expressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{}}>")); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), containsString("invalid dynamic name expression")); - assertThat(e.getMessage(), containsString("missing date format")); - } + Exception e = expectThrows(ElasticsearchParseException.class, + () -> expressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{}}>"))); + assertThat(e.getMessage(), containsString("invalid dynamic name expression")); + assertThat(e.getMessage(), containsString("missing date format")); } public void testExpressionInvalidOpenEnded() throws Exception { - try { - expressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d>")); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), containsString("invalid dynamic name expression")); - assertThat(e.getMessage(), containsString("date math placeholder is open ended")); - } + Exception e = expectThrows(ElasticsearchParseException.class, + () -> expressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d>"))); + assertThat(e.getMessage(), containsString("invalid dynamic name expression")); + assertThat(e.getMessage(), containsString("date math placeholder is open ended")); } } diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java index 63b9dc05e7a..b68f3735c0a 100644 --- a/core/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java @@ -305,7 +305,7 @@ public class IndexNameExpressionResolverTests extends ESTestCase { assertEquals(1, results.length); assertEquals("bar", results[0]); - results = indexNameExpressionResolver.concreteIndexNames(context, "-foo*"); + results = indexNameExpressionResolver.concreteIndexNames(context, "*", "-foo*"); assertEquals(1, results.length); assertEquals("bar", results[0]); @@ -585,6 +585,64 @@ public class IndexNameExpressionResolverTests extends ESTestCase { assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "testX*")), equalTo(newHashSet("testXXX", "testXXY", "testXYY"))); } + public void testConcreteIndicesWildcardWithNegation() { + MetaData.Builder mdBuilder = MetaData.builder() + .put(indexBuilder("testXXX").state(State.OPEN)) + .put(indexBuilder("testXXY").state(State.OPEN)) + .put(indexBuilder("testXYY").state(State.OPEN)) + .put(indexBuilder("-testXYZ").state(State.OPEN)) + .put(indexBuilder("-testXZZ").state(State.OPEN)) + .put(indexBuilder("-testYYY").state(State.OPEN)) + .put(indexBuilder("testYYY").state(State.OPEN)) + .put(indexBuilder("testYYX").state(State.OPEN)); + ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); + + IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, + IndicesOptions.fromOptions(true, true, true, true)); + assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "testX*")), + equalTo(newHashSet("testXXX", "testXXY", "testXYY"))); + + assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "test*", "-testX*")), + equalTo(newHashSet("testYYY", "testYYX"))); + + assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "-testX*")), + equalTo(newHashSet("-testXYZ", "-testXZZ"))); + + assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "testXXY", "-testX*")), + equalTo(newHashSet("testXXY", "-testXYZ", "-testXZZ"))); + + assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "*", "--testX*")), + equalTo(newHashSet("testXXX", "testXXY", "testXYY", "testYYX", "testYYY", "-testYYY"))); + + assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "-testXXX", "test*")), + equalTo(newHashSet("testYYX", "testXXX", "testXYY", "testYYY", "testXXY"))); + + assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "test*", "-testXXX")), + equalTo(newHashSet("testYYX", "testXYY", "testYYY", "testXXY"))); + + assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "+testXXX", "+testXXY", "+testYYY", "-testYYY")), + equalTo(newHashSet("testXXX", "testXXY", "testYYY", "-testYYY"))); + + assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "testYYY", "testYYX", "testX*", "-testXXX")), + equalTo(newHashSet("testYYY", "testYYX", "testXXY", "testXYY"))); + + assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "-testXXX", "*testY*", "-testYYY")), + equalTo(newHashSet("testYYX", "testYYY", "-testYYY"))); + + String[] indexNames = indexNameExpressionResolver.concreteIndexNames(state, IndicesOptions.lenientExpandOpen(), "-doesnotexist"); + assertEquals(0, indexNames.length); + + assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(state, IndicesOptions.lenientExpandOpen(), "-*")), + equalTo(newHashSet("-testXYZ", "-testXZZ", "-testYYY"))); + + assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(state, IndicesOptions.lenientExpandOpen(), + "+testXXX", "+testXXY", "+testXYY", "-testXXY")), + equalTo(newHashSet("testXXX", "testXYY", "testXXY"))); + + indexNames = indexNameExpressionResolver.concreteIndexNames(state, IndicesOptions.lenientExpandOpen(), "*", "-*"); + assertEquals(0, indexNames.length); + } + /** * test resolving _all pattern (null, empty array or "_all") for random IndicesOptions */ diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java index 01110e796e8..bac9a681341 100644 --- a/core/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java @@ -50,9 +50,9 @@ public class WildcardExpressionResolverTests extends ESTestCase { assertThat(newHashSet(resolver.resolve(context, Arrays.asList("*"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY", "kuku"))); assertThat(newHashSet(resolver.resolve(context, Arrays.asList("*", "-kuku"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "+testYYY"))), equalTo(newHashSet("testXXX", "testYYY"))); - assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "-testXXX"))).size(), equalTo(0)); + assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "-testXXX"))), equalTo(newHashSet("testXXX", "-testXXX"))); assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "+testY*"))), equalTo(newHashSet("testXXX", "testYYY"))); - assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "-testX*"))).size(), equalTo(0)); + assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "-testX*"))), equalTo(newHashSet("testXXX"))); } public void testConvertWildcardsTests() { @@ -66,7 +66,7 @@ public class WildcardExpressionResolverTests extends ESTestCase { IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen()); assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testYY*", "alias*"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); - assertThat(newHashSet(resolver.resolve(context, Arrays.asList("-kuku"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); + assertThat(newHashSet(resolver.resolve(context, Arrays.asList("-kuku"))), equalTo(newHashSet("-kuku"))); assertThat(newHashSet(resolver.resolve(context, Arrays.asList("+test*", "-testYYY"))), equalTo(newHashSet("testXXX", "testXYY"))); assertThat(newHashSet(resolver.resolve(context, Arrays.asList("+testX*", "+testYYY"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); assertThat(newHashSet(resolver.resolve(context, Arrays.asList("+testYYY", "+testX*"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java b/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java index 9f21d133264..6243f138380 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java @@ -30,7 +30,6 @@ import org.elasticsearch.cluster.routing.allocation.command.AllocateStalePrimary import org.elasticsearch.common.collect.ImmutableOpenIntMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; -import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; @@ -63,9 +62,8 @@ public class PrimaryAllocationIT extends ESIntegTestCase { } @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder().put(super.nodeSettings(nodeOrdinal)) - .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "zen").build(); + protected boolean addMockZenPings() { + return false; } private void createStaleReplicaScenario() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceIT.java b/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceIT.java index a25e3abe65d..111b84f981a 100644 --- a/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceIT.java @@ -35,7 +35,6 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Singleton; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; @@ -68,17 +67,8 @@ public class ClusterServiceIT extends ESIntegTestCase { return Arrays.asList(TestPlugin.class); } - @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder().put(super.nodeSettings(nodeOrdinal)) - .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "zen").build(); - } - public void testAckedUpdateTask() throws Exception { - Settings settings = Settings.builder() - .put("discovery.type", "local") - .build(); - internalCluster().startNode(settings); + internalCluster().startNode(); ClusterService clusterService = internalCluster().getInstance(ClusterService.class); final AtomicBoolean allNodesAcked = new AtomicBoolean(false); @@ -151,10 +141,7 @@ public class ClusterServiceIT extends ESIntegTestCase { } public void testAckedUpdateTaskSameClusterState() throws Exception { - Settings settings = Settings.builder() - .put("discovery.type", "local") - .build(); - internalCluster().startNode(settings); + internalCluster().startNode(); ClusterService clusterService = internalCluster().getInstance(ClusterService.class); final AtomicBoolean allNodesAcked = new AtomicBoolean(false); @@ -222,10 +209,7 @@ public class ClusterServiceIT extends ESIntegTestCase { } public void testAckedUpdateTaskNoAckExpected() throws Exception { - Settings settings = Settings.builder() - .put("discovery.type", "local") - .build(); - internalCluster().startNode(settings); + internalCluster().startNode(); ClusterService clusterService = internalCluster().getInstance(ClusterService.class); final AtomicBoolean allNodesAcked = new AtomicBoolean(false); @@ -294,10 +278,7 @@ public class ClusterServiceIT extends ESIntegTestCase { } public void testAckedUpdateTaskTimeoutZero() throws Exception { - Settings settings = Settings.builder() - .put("discovery.type", "local") - .build(); - internalCluster().startNode(settings); + internalCluster().startNode(); ClusterService clusterService = internalCluster().getInstance(ClusterService.class); final AtomicBoolean allNodesAcked = new AtomicBoolean(false); @@ -371,11 +352,8 @@ public class ClusterServiceIT extends ESIntegTestCase { @TestLogging("_root:debug,org.elasticsearch.action.admin.cluster.tasks:trace") public void testPendingUpdateTask() throws Exception { - Settings settings = Settings.builder() - .put("discovery.type", "local") - .build(); - String node_0 = internalCluster().startNode(settings); - internalCluster().startCoordinatingOnlyNode(settings); + String node_0 = internalCluster().startNode(); + internalCluster().startCoordinatingOnlyNode(Settings.EMPTY); final ClusterService clusterService = internalCluster().getInstance(ClusterService.class, node_0); final CountDownLatch block1 = new CountDownLatch(1); @@ -507,7 +485,6 @@ public class ClusterServiceIT extends ESIntegTestCase { public void testLocalNodeMasterListenerCallbacks() throws Exception { Settings settings = Settings.builder() - .put("discovery.type", "zen") .put("discovery.zen.minimum_master_nodes", 1) .put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "400ms") .put("discovery.initial_state_timeout", "500ms") diff --git a/core/src/test/java/org/elasticsearch/common/TableTests.java b/core/src/test/java/org/elasticsearch/common/TableTests.java index 39b5801d7ff..c5e3c34910b 100644 --- a/core/src/test/java/org/elasticsearch/common/TableTests.java +++ b/core/src/test/java/org/elasticsearch/common/TableTests.java @@ -28,71 +28,43 @@ import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; public class TableTests extends ESTestCase { + public void testFailOnStartRowWithoutHeader() { Table table = new Table(); - try { - table.startRow(); - fail("Expected IllegalStateException"); - } catch (IllegalStateException e) { - assertThat(e.getMessage(), is("no headers added...")); - } + Exception e = expectThrows(IllegalStateException.class, () -> table.startRow()); + assertThat(e.getMessage(), is("no headers added...")); } public void testFailOnEndHeadersWithoutStart() { Table table = new Table(); - try { - table.endHeaders(); - fail("Expected IllegalStateException"); - } catch (IllegalStateException e) { - assertThat(e.getMessage(), is("no headers added...")); - } - + Exception e = expectThrows(IllegalStateException.class, () -> table.endHeaders()); + assertThat(e.getMessage(), is("no headers added...")); } public void testFailOnAddCellWithoutHeader() { Table table = new Table(); - try { - table.addCell("error"); - fail("Expected IllegalStateException"); - } catch (IllegalStateException e) { - assertThat(e.getMessage(), is("no block started...")); - } - + Exception e = expectThrows(IllegalStateException.class, () -> table.addCell("error")); + assertThat(e.getMessage(), is("no block started...")); } public void testFailOnAddCellWithoutRow() { Table table = this.getTableWithHeaders(); - try { - table.addCell("error"); - fail("Expected IllegalStateException"); - } catch (IllegalStateException e) { - assertThat(e.getMessage(), is("no block started...")); - } - + Exception e = expectThrows(IllegalStateException.class, () -> table.addCell("error")); + assertThat(e.getMessage(), is("no block started...")); } public void testFailOnEndRowWithoutStart() { Table table = this.getTableWithHeaders(); - try { - table.endRow(); - fail("Expected IllegalStateException"); - } catch (IllegalStateException e) { - assertThat(e.getMessage(), is("no row started...")); - } - + Exception e = expectThrows(IllegalStateException.class, () -> table.endRow()); + assertThat(e.getMessage(), is("no row started...")); } public void testFailOnLessCellsThanDeclared() { Table table = this.getTableWithHeaders(); table.startRow(); table.addCell("foo"); - try { - table.endRow(true); - fail("Expected IllegalStateException"); - } catch (IllegalStateException e) { - assertThat(e.getMessage(), is("mismatch on number of cells 1 in a row compared to header 2")); - } - + Exception e = expectThrows(IllegalStateException.class, () -> table.endRow()); + assertThat(e.getMessage(), is("mismatch on number of cells 1 in a row compared to header 2")); } public void testOnLessCellsThanDeclaredUnchecked() { @@ -107,13 +79,8 @@ public class TableTests extends ESTestCase { table.startRow(); table.addCell("foo"); table.addCell("bar"); - try { - table.addCell("foobar"); - fail("Expected IllegalStateException"); - } catch (IllegalStateException e) { - assertThat(e.getMessage(), is("can't add more cells to a row than the header")); - } - + Exception e = expectThrows(IllegalStateException.class, () -> table.addCell("foobar")); + assertThat(e.getMessage(), is("can't add more cells to a row than the header")); } public void testSimple() { diff --git a/core/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java index abbd6ce40aa..dc01a39cb81 100644 --- a/core/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java @@ -19,12 +19,6 @@ package org.elasticsearch.common.geo; -import org.locationtech.spatial4j.exception.InvalidShapeException; -import org.locationtech.spatial4j.shape.Circle; -import org.locationtech.spatial4j.shape.Point; -import org.locationtech.spatial4j.shape.Rectangle; -import org.locationtech.spatial4j.shape.Shape; -import org.locationtech.spatial4j.shape.impl.PointImpl; import com.vividsolutions.jts.geom.Coordinate; import com.vividsolutions.jts.geom.LineString; import com.vividsolutions.jts.geom.Polygon; @@ -35,6 +29,12 @@ import org.elasticsearch.common.geo.builders.PolygonBuilder; import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.common.geo.builders.ShapeBuilders; import org.elasticsearch.test.ESTestCase; +import org.locationtech.spatial4j.exception.InvalidShapeException; +import org.locationtech.spatial4j.shape.Circle; +import org.locationtech.spatial4j.shape.Point; +import org.locationtech.spatial4j.shape.Rectangle; +import org.locationtech.spatial4j.shape.Shape; +import org.locationtech.spatial4j.shape.impl.PointImpl; import static org.elasticsearch.test.hamcrest.ElasticsearchGeoAssertions.assertMultiLineString; import static org.elasticsearch.test.hamcrest.ElasticsearchGeoAssertions.assertMultiPolygon; @@ -183,17 +183,13 @@ public class ShapeBuilderTests extends ESTestCase { } public void testPolygonSelfIntersection() { - try { - ShapeBuilders.newPolygon(new CoordinatesBuilder() + PolygonBuilder newPolygon = ShapeBuilders.newPolygon(new CoordinatesBuilder() .coordinate(-40.0, 50.0) .coordinate(40.0, 50.0) .coordinate(-40.0, -50.0) - .coordinate(40.0, -50.0).close()) - .build(); - fail("Expected InvalidShapeException"); - } catch (InvalidShapeException e) { - assertThat(e.getMessage(), containsString("Self-intersection at or near point (0.0")); - } + .coordinate(40.0, -50.0).close()); + Exception e = expectThrows(InvalidShapeException.class, () -> newPolygon.build()); + assertThat(e.getMessage(), containsString("Self-intersection at or near point (0.0")); } public void testGeoCircle() { @@ -550,12 +546,8 @@ public class ShapeBuilderTests extends ESTestCase { .coordinate(179, -10) .coordinate(164, 0) )); - try { - builder.close().build(); - fail("Expected InvalidShapeException"); - } catch (InvalidShapeException e) { - assertThat(e.getMessage(), containsString("interior cannot share more than one point with the exterior")); - } + Exception e = expectThrows(InvalidShapeException.class, () -> builder.close().build()); + assertThat(e.getMessage(), containsString("interior cannot share more than one point with the exterior")); } public void testBoundaryShapeWithTangentialHole() { @@ -602,12 +594,8 @@ public class ShapeBuilderTests extends ESTestCase { .coordinate(176, -10) .coordinate(-177, 10) )); - try { - builder.close().build(); - fail("Expected InvalidShapeException"); - } catch (InvalidShapeException e) { - assertThat(e.getMessage(), containsString("interior cannot share more than one point with the exterior")); - } + Exception e = expectThrows(InvalidShapeException.class, () -> builder.close().build()); + assertThat(e.getMessage(), containsString("interior cannot share more than one point with the exterior")); } /** @@ -659,11 +647,7 @@ public class ShapeBuilderTests extends ESTestCase { .coordinate(-176, 4) .coordinate(180, 0) ); - try { - builder.close().build(); - fail("Expected InvalidShapeException"); - } catch (InvalidShapeException e) { - assertThat(e.getMessage(), containsString("duplicate consecutive coordinates at: (")); - } + Exception e = expectThrows(InvalidShapeException.class, () -> builder.close().build()); + assertThat(e.getMessage(), containsString("duplicate consecutive coordinates at: (")); } } diff --git a/core/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java b/core/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java index f1e6c1ba67e..44763d5ccce 100644 --- a/core/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java +++ b/core/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java @@ -97,14 +97,7 @@ public class BytesStreamsTests extends ESTestCase { BytesStreamOutput out = new BytesStreamOutput(); // bulk-write with wrong args - try { - out.writeBytes(new byte[]{}, 0, 1); - fail("expected IllegalArgumentException: length > (size-offset)"); - } - catch (IllegalArgumentException iax1) { - // expected - } - + expectThrows(IllegalArgumentException.class, () -> out.writeBytes(new byte[]{}, 0, 1)); out.close(); } @@ -333,18 +326,21 @@ public class BytesStreamsTests extends ESTestCase { } public void testNamedWriteable() throws IOException { - BytesStreamOutput out = new BytesStreamOutput(); - NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.singletonList( - new NamedWriteableRegistry.Entry(BaseNamedWriteable.class, TestNamedWriteable.NAME, TestNamedWriteable::new) - )); - TestNamedWriteable namedWriteableIn = new TestNamedWriteable(randomAsciiOfLengthBetween(1, 10), randomAsciiOfLengthBetween(1, 10)); - out.writeNamedWriteable(namedWriteableIn); - byte[] bytes = BytesReference.toBytes(out.bytes()); - StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(bytes), namedWriteableRegistry); - assertEquals(in.available(), bytes.length); - BaseNamedWriteable namedWriteableOut = in.readNamedWriteable(BaseNamedWriteable.class); - assertEquals(namedWriteableIn, namedWriteableOut); - assertEquals(0, in.available()); + try (BytesStreamOutput out = new BytesStreamOutput()) { + NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.singletonList( + new NamedWriteableRegistry.Entry(BaseNamedWriteable.class, TestNamedWriteable.NAME, TestNamedWriteable::new))); + TestNamedWriteable namedWriteableIn = new TestNamedWriteable(randomAsciiOfLengthBetween(1, 10), + randomAsciiOfLengthBetween(1, 10)); + out.writeNamedWriteable(namedWriteableIn); + byte[] bytes = BytesReference.toBytes(out.bytes()); + + try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(bytes), namedWriteableRegistry)) { + assertEquals(in.available(), bytes.length); + BaseNamedWriteable namedWriteableOut = in.readNamedWriteable(BaseNamedWriteable.class); + assertEquals(namedWriteableIn, namedWriteableOut); + assertEquals(0, in.available()); + } + } } public void testNamedWriteableList() throws IOException { @@ -367,59 +363,61 @@ public class BytesStreamsTests extends ESTestCase { } public void testNamedWriteableNotSupportedWithoutWrapping() throws IOException { - BytesStreamOutput out = new BytesStreamOutput(); - TestNamedWriteable testNamedWriteable = new TestNamedWriteable("test1", "test2"); - out.writeNamedWriteable(testNamedWriteable); - StreamInput in = StreamInput.wrap(BytesReference.toBytes(out.bytes())); - try { - in.readNamedWriteable(BaseNamedWriteable.class); - fail("Expected UnsupportedOperationException"); - } catch (UnsupportedOperationException e) { + try (BytesStreamOutput out = new BytesStreamOutput()) { + TestNamedWriteable testNamedWriteable = new TestNamedWriteable("test1", "test2"); + out.writeNamedWriteable(testNamedWriteable); + StreamInput in = StreamInput.wrap(BytesReference.toBytes(out.bytes())); + Exception e = expectThrows(UnsupportedOperationException.class, () -> in.readNamedWriteable(BaseNamedWriteable.class)); assertThat(e.getMessage(), is("can't read named writeable from StreamInput")); } } public void testNamedWriteableReaderReturnsNull() throws IOException { - BytesStreamOutput out = new BytesStreamOutput(); - NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.singletonList( - new NamedWriteableRegistry.Entry(BaseNamedWriteable.class, TestNamedWriteable.NAME, (StreamInput in) -> null) - )); - TestNamedWriteable namedWriteableIn = new TestNamedWriteable(randomAsciiOfLengthBetween(1, 10), randomAsciiOfLengthBetween(1, 10)); - out.writeNamedWriteable(namedWriteableIn); - byte[] bytes = BytesReference.toBytes(out.bytes()); - StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(bytes), namedWriteableRegistry); - assertEquals(in.available(), bytes.length); - IOException e = expectThrows(IOException.class, () -> in.readNamedWriteable(BaseNamedWriteable.class)); - assertThat(e.getMessage(), endsWith("] returned null which is not allowed and probably means it screwed up the stream.")); + try (BytesStreamOutput out = new BytesStreamOutput()) { + NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.singletonList( + new NamedWriteableRegistry.Entry(BaseNamedWriteable.class, TestNamedWriteable.NAME, (StreamInput in) -> null))); + TestNamedWriteable namedWriteableIn = new TestNamedWriteable(randomAsciiOfLengthBetween(1, 10), + randomAsciiOfLengthBetween(1, 10)); + out.writeNamedWriteable(namedWriteableIn); + byte[] bytes = BytesReference.toBytes(out.bytes()); + try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(bytes), namedWriteableRegistry)) { + assertEquals(in.available(), bytes.length); + IOException e = expectThrows(IOException.class, () -> in.readNamedWriteable(BaseNamedWriteable.class)); + assertThat(e.getMessage(), endsWith("] returned null which is not allowed and probably means it screwed up the stream.")); + } + } } public void testOptionalWriteableReaderReturnsNull() throws IOException { - BytesStreamOutput out = new BytesStreamOutput(); - out.writeOptionalWriteable(new TestNamedWriteable(randomAsciiOfLengthBetween(1, 10), randomAsciiOfLengthBetween(1, 10))); - StreamInput in = StreamInput.wrap(BytesReference.toBytes(out.bytes())); - IOException e = expectThrows(IOException.class, () -> in.readOptionalWriteable((StreamInput ignored) -> null)); - assertThat(e.getMessage(), endsWith("] returned null which is not allowed and probably means it screwed up the stream.")); + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.writeOptionalWriteable(new TestNamedWriteable(randomAsciiOfLengthBetween(1, 10), randomAsciiOfLengthBetween(1, 10))); + StreamInput in = StreamInput.wrap(BytesReference.toBytes(out.bytes())); + IOException e = expectThrows(IOException.class, () -> in.readOptionalWriteable((StreamInput ignored) -> null)); + assertThat(e.getMessage(), endsWith("] returned null which is not allowed and probably means it screwed up the stream.")); + } } public void testWriteableReaderReturnsWrongName() throws IOException { - BytesStreamOutput out = new BytesStreamOutput(); - NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.singletonList( - new NamedWriteableRegistry.Entry(BaseNamedWriteable.class, TestNamedWriteable.NAME, (StreamInput in) -> - new TestNamedWriteable(in) { - @Override - public String getWriteableName() { - return "intentionally-broken"; - } - }) - )); - TestNamedWriteable namedWriteableIn = new TestNamedWriteable(randomAsciiOfLengthBetween(1, 10), randomAsciiOfLengthBetween(1, 10)); - out.writeNamedWriteable(namedWriteableIn); - byte[] bytes = BytesReference.toBytes(out.bytes()); - StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(bytes), namedWriteableRegistry); - assertEquals(in.available(), bytes.length); - AssertionError e = expectThrows(AssertionError.class, () -> in.readNamedWriteable(BaseNamedWriteable.class)); - assertThat(e.getMessage(), - endsWith(" claims to have a different name [intentionally-broken] than it was read from [test-named-writeable].")); + try (BytesStreamOutput out = new BytesStreamOutput()) { + NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry( + Collections.singletonList(new NamedWriteableRegistry.Entry(BaseNamedWriteable.class, TestNamedWriteable.NAME, + (StreamInput in) -> new TestNamedWriteable(in) { + @Override + public String getWriteableName() { + return "intentionally-broken"; + } + }))); + TestNamedWriteable namedWriteableIn = new TestNamedWriteable(randomAsciiOfLengthBetween(1, 10), + randomAsciiOfLengthBetween(1, 10)); + out.writeNamedWriteable(namedWriteableIn); + byte[] bytes = BytesReference.toBytes(out.bytes()); + try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(bytes), namedWriteableRegistry)) { + assertEquals(in.available(), bytes.length); + AssertionError e = expectThrows(AssertionError.class, () -> in.readNamedWriteable(BaseNamedWriteable.class)); + assertThat(e.getMessage(), + endsWith(" claims to have a different name [intentionally-broken] than it was read from [test-named-writeable].")); + } + } } public void testWriteStreamableList() throws IOException { @@ -551,32 +549,13 @@ public class BytesStreamsTests extends ESTestCase { assertEquals(-1, out.position()); // writing a single byte must fail - try { - out.writeByte((byte)0); - fail("expected IllegalStateException: stream closed"); - } - catch (IllegalStateException iex1) { - // expected - } + expectThrows(IllegalArgumentException.class, () -> out.writeByte((byte)0)); // writing in bulk must fail - try { - out.writeBytes(new byte[0], 0, 0); - fail("expected IllegalStateException: stream closed"); - } - catch (IllegalStateException iex1) { - // expected - } + expectThrows(IllegalArgumentException.class, () -> out.writeBytes(new byte[0], 0, 0)); // toByteArray() must fail - try { - BytesReference.toBytes(out.bytes()); - fail("expected IllegalStateException: stream closed"); - } - catch (IllegalStateException iex1) { - // expected - } - + expectThrows(IllegalArgumentException.class, () -> BytesReference.toBytes(out.bytes())); } // create & fill byte[] with randomized data @@ -587,16 +566,15 @@ public class BytesStreamsTests extends ESTestCase { } public void testReadWriteGeoPoint() throws IOException { - { - BytesStreamOutput out = new BytesStreamOutput(); + try (BytesStreamOutput out = new BytesStreamOutput()) {; GeoPoint geoPoint = new GeoPoint(randomDouble(), randomDouble()); out.writeGenericValue(geoPoint); StreamInput wrap = out.bytes().streamInput(); GeoPoint point = (GeoPoint) wrap.readGenericValue(); assertEquals(point, geoPoint); } - { - BytesStreamOutput out = new BytesStreamOutput(); + + try (BytesStreamOutput out = new BytesStreamOutput()) { GeoPoint geoPoint = new GeoPoint(randomDouble(), randomDouble()); out.writeGeoPoint(geoPoint); StreamInput wrap = out.bytes().streamInput(); @@ -640,12 +618,12 @@ public class BytesStreamsTests extends ESTestCase { assertNotEquals(mapKeys, reverseMapKeys); - BytesStreamOutput output = new BytesStreamOutput(); - BytesStreamOutput reverseMapOutput = new BytesStreamOutput(); - output.writeMapWithConsistentOrder(map); - reverseMapOutput.writeMapWithConsistentOrder(reverseMap); + try (BytesStreamOutput output = new BytesStreamOutput(); BytesStreamOutput reverseMapOutput = new BytesStreamOutput()) { + output.writeMapWithConsistentOrder(map); + reverseMapOutput.writeMapWithConsistentOrder(reverseMap); - assertEquals(output.bytes(), reverseMapOutput.bytes()); + assertEquals(output.bytes(), reverseMapOutput.bytes()); + } } public void testReadMapByUsingWriteMapWithConsistentOrder() throws IOException { @@ -653,18 +631,20 @@ public class BytesStreamsTests extends ESTestCase { randomMap(new HashMap<>(), randomIntBetween(2, 20), () -> randomAsciiOfLength(5), () -> randomAsciiOfLength(5)); - BytesStreamOutput streamOut = new BytesStreamOutput(); - streamOut.writeMapWithConsistentOrder(streamOutMap); - StreamInput in = StreamInput.wrap(BytesReference.toBytes(streamOut.bytes())); - Map streamInMap = in.readMap(); - assertEquals(streamOutMap, streamInMap); + try (BytesStreamOutput streamOut = new BytesStreamOutput()) { + streamOut.writeMapWithConsistentOrder(streamOutMap); + StreamInput in = StreamInput.wrap(BytesReference.toBytes(streamOut.bytes())); + Map streamInMap = in.readMap(); + assertEquals(streamOutMap, streamInMap); + } } public void testWriteMapWithConsistentOrderWithLinkedHashMapShouldThrowAssertError() throws IOException { - BytesStreamOutput output = new BytesStreamOutput(); - Map map = new LinkedHashMap<>(); - Throwable e = expectThrows(AssertionError.class, () -> output.writeMapWithConsistentOrder(map)); - assertEquals(AssertionError.class, e.getClass()); + try (BytesStreamOutput output = new BytesStreamOutput()) { + Map map = new LinkedHashMap<>(); + Throwable e = expectThrows(AssertionError.class, () -> output.writeMapWithConsistentOrder(map)); + assertEquals(AssertionError.class, e.getClass()); + } } private static Map randomMap(Map map, int size, Supplier keyGenerator, Supplier valueGenerator) { diff --git a/core/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java b/core/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java index 814183281ce..6ad5a1ce32a 100644 --- a/core/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java +++ b/core/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java @@ -123,48 +123,30 @@ public class ByteSizeValueTests extends ESTestCase { } public void testFailOnMissingUnits() { - try { - ByteSizeValue.parseBytesSizeValue("23", "test"); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), containsString("failed to parse setting [test]")); - } + Exception e = expectThrows(ElasticsearchParseException.class, () -> ByteSizeValue.parseBytesSizeValue("23", "test")); + assertThat(e.getMessage(), containsString("failed to parse setting [test]")); } public void testFailOnUnknownUnits() { - try { - ByteSizeValue.parseBytesSizeValue("23jw", "test"); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), containsString("failed to parse setting [test]")); - } + Exception e = expectThrows(ElasticsearchParseException.class, () -> ByteSizeValue.parseBytesSizeValue("23jw", "test")); + assertThat(e.getMessage(), containsString("failed to parse setting [test]")); } public void testFailOnEmptyParsing() { - try { - assertThat(ByteSizeValue.parseBytesSizeValue("", "emptyParsing").toString(), is("23kb")); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), containsString("failed to parse setting [emptyParsing]")); - } + Exception e = expectThrows(ElasticsearchParseException.class, + () -> assertThat(ByteSizeValue.parseBytesSizeValue("", "emptyParsing").toString(), is("23kb"))); + assertThat(e.getMessage(), containsString("failed to parse setting [emptyParsing]")); } public void testFailOnEmptyNumberParsing() { - try { - assertThat(ByteSizeValue.parseBytesSizeValue("g", "emptyNumberParsing").toString(), is("23b")); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), containsString("failed to parse [g]")); - } + Exception e = expectThrows(ElasticsearchParseException.class, + () -> assertThat(ByteSizeValue.parseBytesSizeValue("g", "emptyNumberParsing").toString(), is("23b"))); + assertThat(e.getMessage(), containsString("failed to parse [g]")); } public void testNoDotsAllowed() { - try { - ByteSizeValue.parseBytesSizeValue("42b.", null, "test"); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), containsString("failed to parse setting [test]")); - } + Exception e = expectThrows(ElasticsearchParseException.class, () -> ByteSizeValue.parseBytesSizeValue("42b.", null, "test")); + assertThat(e.getMessage(), containsString("failed to parse setting [test]")); } public void testCompareEquality() { diff --git a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java index 58a67769730..b400b0c7a5f 100644 --- a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java @@ -128,6 +128,10 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { private ClusterDiscoveryConfiguration discoveryConfig; + @Override + protected boolean addMockZenPings() { + return false; + } @Override protected Settings nodeSettings(int nodeOrdinal) { diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java index d336534a158..c99187c7a93 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java @@ -24,9 +24,7 @@ import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; -import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -38,7 +36,6 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.discovery.Discovery; -import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.DiscoveryStats; import org.elasticsearch.discovery.zen.fd.FaultDetection; import org.elasticsearch.discovery.zen.membership.MembershipAction; @@ -46,7 +43,6 @@ import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction; import org.elasticsearch.node.Node; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.TestCustomMetaData; -import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.BytesTransportRequest; @@ -55,7 +51,6 @@ import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; import org.hamcrest.Matchers; -import org.junit.Before; import java.io.IOException; import java.net.UnknownHostException; @@ -78,32 +73,10 @@ import static org.hamcrest.Matchers.notNullValue; @TestLogging("_root:DEBUG") public class ZenDiscoveryIT extends ESIntegTestCase { - private Version previousMajorVersion; - - @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder().put(super.nodeSettings(nodeOrdinal)) - .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "zen").build(); - } - - @Before - public void computePrevMajorVersion() { - Version previousMajor; - // find a GA build whose major version is statesFound = new ArrayList<>(); final CountDownLatch nodesStopped = new CountDownLatch(1); - clusterService.add(new ClusterStateListener() { - @Override - public void clusterChanged(ClusterChangedEvent event) { - statesFound.add(event.state()); - try { - // block until both nodes have stopped to accumulate node failures - nodesStopped.await(); - } catch (InterruptedException e) { - //meh - } + clusterService.add(event -> { + statesFound.add(event.state()); + try { + // block until both nodes have stopped to accumulate node failures + nodesStopped.await(); + } catch (InterruptedException e) { + //meh } }); @@ -189,10 +158,7 @@ public class ZenDiscoveryIT extends ESIntegTestCase { } public void testNodeRejectsClusterStateWithWrongMasterNode() throws Exception { - Settings settings = Settings.builder() - .put("discovery.type", "zen") - .build(); - List nodeNames = internalCluster().startNodesAsync(2, settings).get(); + List nodeNames = internalCluster().startNodesAsync(2).get(); client().admin().cluster().prepareHealth().setWaitForNodes("2").get(); List nonMasterNodes = new ArrayList<>(nodeNames); @@ -303,10 +269,7 @@ public class ZenDiscoveryIT extends ESIntegTestCase { " }\n" + "}"; - Settings nodeSettings = Settings.builder() - .put("discovery.type", "zen") // <-- To override the local setting if set externally - .build(); - internalCluster().startNode(nodeSettings); + internalCluster().startNode(); logger.info("--> request node discovery stats"); NodesStatsResponse statsResponse = client().admin().cluster().prepareNodesStats().clear().setDiscovery(true).get(); diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java index f103a30b999..53b7f95d829 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java @@ -87,7 +87,7 @@ public class ZenDiscoveryUnitTests extends ESTestCase { currentNodes = DiscoveryNodes.builder(); currentNodes.masterNodeId("b").add(new DiscoveryNode("b", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT)); - ; + // version isn't taken into account, so randomize it to ensure this. if (randomBoolean()) { currentState.version(2); diff --git a/core/src/test/java/org/elasticsearch/index/TransportIndexFailuresIT.java b/core/src/test/java/org/elasticsearch/index/TransportIndexFailuresIT.java deleted file mode 100644 index 91a18f9c053..00000000000 --- a/core/src/test/java/org/elasticsearch/index/TransportIndexFailuresIT.java +++ /dev/null @@ -1,160 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index; - -import org.elasticsearch.action.index.IndexAction; -import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.health.ClusterHealthStatus; -import org.elasticsearch.cluster.routing.IndexShardRoutingTable; -import org.elasticsearch.cluster.routing.RoutingNodes; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.discovery.DiscoverySettings; -import org.elasticsearch.discovery.zen.fd.FaultDetection; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.transport.TransportService; - -import java.util.Arrays; -import java.util.Collection; -import java.util.List; - -import static java.util.Collections.singleton; -import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; -import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING; -import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; -import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED; -import static org.hamcrest.Matchers.equalTo; - -/** - * Test failure when index replication actions fail mid-flight - */ -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, transportClientRatio = 0) -public class TransportIndexFailuresIT extends ESIntegTestCase { - - private static final Settings nodeSettings = Settings.builder() - .put("discovery.type", "zen") // <-- To override the local setting if set externally - .put(FaultDetection.PING_TIMEOUT_SETTING.getKey(), "1s") // <-- for hitting simulated network failures quickly - .put(FaultDetection.PING_RETRIES_SETTING.getKey(), "1") // <-- for hitting simulated network failures quickly - .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") // <-- for hitting simulated network failures quickly - .put("discovery.zen.minimum_master_nodes", 1) - .build(); - - @Override - protected Collection> nodePlugins() { - return Arrays.asList(MockTransportService.TestPlugin.class); - } - - @Override - protected int numberOfShards() { - return 1; - } - - @Override - protected int numberOfReplicas() { - return 1; - } - - public void testNetworkPartitionDuringReplicaIndexOp() throws Exception { - final String INDEX = "testidx"; - - List nodes = internalCluster().startNodesAsync(2, nodeSettings).get(); - - // Create index test with 1 shard, 1 replica and ensure it is green - createIndex(INDEX); - ensureGreen(INDEX); - - // Disable allocation so the replica cannot be reallocated when it fails - Settings s = Settings.builder().put("cluster.routing.allocation.enable", "none").build(); - client().admin().cluster().prepareUpdateSettings().setTransientSettings(s).get(); - - // Determine which node holds the primary shard - ClusterState state = getNodeClusterState(nodes.get(0)); - IndexShardRoutingTable shard = state.getRoutingTable().index(INDEX).shard(0); - String primaryNode; - String replicaNode; - if (shard.getShards().get(0).primary()) { - primaryNode = nodes.get(0); - replicaNode = nodes.get(1); - } else { - primaryNode = nodes.get(1); - replicaNode = nodes.get(0); - } - logger.info("--> primary shard is on {}", primaryNode); - - // Index a document to make sure everything works well - IndexResponse resp = internalCluster().client(primaryNode).prepareIndex(INDEX, "doc").setSource("foo", "bar").get(); - assertThat("document exists on primary node", - internalCluster().client(primaryNode).prepareGet(INDEX, "doc", resp.getId()).setPreference("_only_local").get().isExists(), - equalTo(true)); - assertThat("document exists on replica node", - internalCluster().client(replicaNode).prepareGet(INDEX, "doc", resp.getId()).setPreference("_only_local").get().isExists(), - equalTo(true)); - - // Disrupt the network so indexing requests fail to replicate - logger.info("--> preventing index/replica operations"); - TransportService mockTransportService = internalCluster().getInstance(TransportService.class, primaryNode); - ((MockTransportService) mockTransportService).addFailToSendNoConnectRule( - internalCluster().getInstance(TransportService.class, replicaNode), - singleton(IndexAction.NAME + "[r]") - ); - mockTransportService = internalCluster().getInstance(TransportService.class, replicaNode); - ((MockTransportService) mockTransportService).addFailToSendNoConnectRule( - internalCluster().getInstance(TransportService.class, primaryNode), - singleton(IndexAction.NAME + "[r]") - ); - - logger.info("--> indexing into primary"); - // the replica shard should now be marked as failed because the replication operation will fail - resp = internalCluster().client(primaryNode).prepareIndex(INDEX, "doc").setSource("foo", "baz").get(); - // wait until the cluster reaches an exact yellow state, meaning replica has failed - assertBusy(new Runnable() { - @Override - public void run() { - assertThat(client().admin().cluster().prepareHealth().get().getStatus(), equalTo(ClusterHealthStatus.YELLOW)); - } - }); - assertThat("document should still be indexed and available", - client().prepareGet(INDEX, "doc", resp.getId()).get().isExists(), equalTo(true)); - - state = getNodeClusterState(randomFrom(nodes.toArray(Strings.EMPTY_ARRAY))); - RoutingNodes rn = state.getRoutingNodes(); - logger.info("--> counts: total: {}, unassigned: {}, initializing: {}, relocating: {}, started: {}", - rn.shards(input -> true).size(), - rn.shardsWithState(UNASSIGNED).size(), - rn.shardsWithState(INITIALIZING).size(), - rn.shardsWithState(RELOCATING).size(), - rn.shardsWithState(STARTED).size()); - logger.info("--> unassigned: {}, initializing: {}, relocating: {}, started: {}", - rn.shardsWithState(UNASSIGNED), - rn.shardsWithState(INITIALIZING), - rn.shardsWithState(RELOCATING), - rn.shardsWithState(STARTED)); - - assertThat("only a single shard is now active (replica should be failed and not reallocated)", - rn.shardsWithState(STARTED).size(), equalTo(1)); - } - - private ClusterState getNodeClusterState(String node) { - return internalCluster().client(node).admin().cluster().prepareState().setLocal(true).get().getState(); - } -} diff --git a/core/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java index 8226d18239a..a94ff589228 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java @@ -41,7 +41,6 @@ import org.hamcrest.CoreMatchers; import java.util.Collection; import java.util.List; import java.util.Map; -import java.lang.NumberFormatException; import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; @@ -256,85 +255,60 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { .endObject() .bytes()); - try { + expectThrows(MapperParsingException.class, () -> defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .startObject("point").field("lat", -91).field("lon", 1.3).endObject() .endObject() - .bytes()); - fail(); - } catch (MapperParsingException e) { + .bytes())); - } - - try { + expectThrows(MapperParsingException.class, () -> defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .startObject("point").field("lat", 91).field("lon", 1.3).endObject() .endObject() - .bytes()); - fail(); - } catch (MapperParsingException e) { + .bytes())); - } - - try { + expectThrows(MapperParsingException.class, () -> defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .startObject("point").field("lat", 1.2).field("lon", -181).endObject() .endObject() - .bytes()); - fail(); - } catch (MapperParsingException e) { + .bytes())); - } - - try { + expectThrows(MapperParsingException.class, () -> defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .startObject("point").field("lat", 1.2).field("lon", 181).endObject() .endObject() - .bytes()); - fail(); - } catch (MapperParsingException e) { + .bytes())); - } - - try { + MapperParsingException e = expectThrows(MapperParsingException.class, () -> defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .startObject("point").field("lat", "-").field("lon", 1.3).endObject() .endObject() - .bytes()); - fail(); - } catch (MapperParsingException e) { - assertThat(e.getRootCause(), instanceOf(NumberFormatException.class)); - assertThat(e.getRootCause().toString(), containsString("java.lang.NumberFormatException: For input string: \"-\"")); - } + .bytes())); + assertThat(e.getRootCause(), instanceOf(NumberFormatException.class)); + assertThat(e.getRootCause().toString(), containsString("java.lang.NumberFormatException: For input string: \"-\"")); - try { + e = expectThrows(MapperParsingException.class, () -> defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .startObject("point").field("lat", 1.2).field("lon", "-").endObject() .endObject() - .bytes()); - fail(); - } catch (MapperParsingException e) { - assertThat(e.getRootCause(), instanceOf(NumberFormatException.class)); - assertThat(e.getRootCause().toString(), containsString("java.lang.NumberFormatException: For input string: \"-\"")); - } + .bytes())); + assertThat(e.getRootCause(), instanceOf(NumberFormatException.class)); + assertThat(e.getRootCause().toString(), containsString("java.lang.NumberFormatException: For input string: \"-\"")); - try { + e = expectThrows(MapperParsingException.class, () -> defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .startObject("point").field("lat", "-").field("lon", "-").endObject() .endObject() - .bytes()); - fail(); - } catch (MapperParsingException e) { - assertThat(e.getRootCause(), instanceOf(NumberFormatException.class)); - assertThat(e.getRootCause().toString(), containsString("java.lang.NumberFormatException: For input string: \"-\"")); - } + .bytes())); + assertThat(e.getRootCause(), instanceOf(NumberFormatException.class)); + assertThat(e.getRootCause().toString(), containsString("java.lang.NumberFormatException: For input string: \"-\"")); } public void testNoValidateLegacyLatLonValues() throws Exception { @@ -743,92 +717,84 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { } if (version.onOrAfter(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) { - try { - String normalizeMapping = XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("properties").startObject("point").field("type", "geo_point").field("geohash", true).endObject().endObject() - .endObject().endObject().string(); - parser.parse("type", new CompressedXContent(normalizeMapping)); - } catch (MapperParsingException e) { - assertEquals(e.getMessage(), "Mapping definition for [point] has unsupported parameters: [geohash : true]"); - } + String normalizeMapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties") + .startObject("point").field("type", "geo_point").field("geohash", true).endObject().endObject().endObject().endObject() + .string(); + Exception e = expectThrows(MapperParsingException.class, () -> + parser.parse("type", new CompressedXContent(normalizeMapping))); + assertEquals(e.getMessage(), "Mapping definition for [point] has unsupported parameters: [geohash : true]"); } - try { - XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("properties").startObject("point").field("type", "geo_point"); + { + XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties") + .startObject("point").field("type", "geo_point"); if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) { xContentBuilder = xContentBuilder.field("lat_lon", true).field("geohash", true); } String validateMapping = xContentBuilder.field("validate", true).endObject().endObject().endObject().endObject().string(); - parser.parse("type", new CompressedXContent(validateMapping)); - fail("process completed successfully when " + MapperParsingException.class.getName() + " expected"); - } catch (MapperParsingException e) { + Exception e = expectThrows(MapperParsingException.class, () -> + parser.parse("type", new CompressedXContent(validateMapping))); assertEquals(e.getMessage(), "Mapping definition for [point] has unsupported parameters: [validate : true]"); } - try { + { XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("point").field("type", "geo_point"); if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) { xContentBuilder = xContentBuilder.field("lat_lon", true).field("geohash", true); } String validateMapping = xContentBuilder.field("validate_lat", true).endObject().endObject().endObject().endObject().string(); - parser.parse("type", new CompressedXContent(validateMapping)); - fail("process completed successfully when " + MapperParsingException.class.getName() + " expected"); - } catch (MapperParsingException e) { + Exception e = expectThrows(MapperParsingException.class, () -> + parser.parse("type", new CompressedXContent(validateMapping))); assertEquals(e.getMessage(), "Mapping definition for [point] has unsupported parameters: [validate_lat : true]"); } - try { + { XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("point").field("type", "geo_point"); if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) { xContentBuilder = xContentBuilder.field("lat_lon", true).field("geohash", true); } String validateMapping = xContentBuilder.field("validate_lon", true).endObject().endObject().endObject().endObject().string(); - parser.parse("type", new CompressedXContent(validateMapping)); - fail("process completed successfully when " + MapperParsingException.class.getName() + " expected"); - } catch (MapperParsingException e) { + Exception e = expectThrows(MapperParsingException.class, () -> + parser.parse("type", new CompressedXContent(validateMapping))); assertEquals(e.getMessage(), "Mapping definition for [point] has unsupported parameters: [validate_lon : true]"); } // test deprecated normalize - try { + { XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("point").field("type", "geo_point"); if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) { xContentBuilder = xContentBuilder.field("lat_lon", true).field("geohash", true); } String normalizeMapping = xContentBuilder.field("normalize", true).endObject().endObject().endObject().endObject().string(); - parser.parse("type", new CompressedXContent(normalizeMapping)); - fail("process completed successfully when " + MapperParsingException.class.getName() + " expected"); - } catch (MapperParsingException e) { + Exception e = expectThrows(MapperParsingException.class, () -> + parser.parse("type", new CompressedXContent(normalizeMapping))); assertEquals(e.getMessage(), "Mapping definition for [point] has unsupported parameters: [normalize : true]"); } - try { + { XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("point").field("type", "geo_point"); if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) { xContentBuilder = xContentBuilder.field("lat_lon", true).field("geohash", true); } String normalizeMapping = xContentBuilder.field("normalize_lat", true).endObject().endObject().endObject().endObject().string(); - parser.parse("type", new CompressedXContent(normalizeMapping)); - fail("process completed successfully when " + MapperParsingException.class.getName() + " expected"); - } catch (MapperParsingException e) { + Exception e = expectThrows(MapperParsingException.class, () -> + parser.parse("type", new CompressedXContent(normalizeMapping))); assertEquals(e.getMessage(), "Mapping definition for [point] has unsupported parameters: [normalize_lat : true]"); } - try { + { XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("point").field("type", "geo_point"); if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) { xContentBuilder = xContentBuilder.field("lat_lon", true).field("geohash", true); } String normalizeMapping = xContentBuilder.field("normalize_lon", true).endObject().endObject().endObject().endObject().string(); - parser.parse("type", new CompressedXContent(normalizeMapping)); - fail("process completed successfully when " + MapperParsingException.class.getName() + " expected"); - } catch (MapperParsingException e) { + Exception e = expectThrows(MapperParsingException.class, () -> + parser.parse("type", new CompressedXContent(normalizeMapping))); assertEquals(e.getMessage(), "Mapping definition for [point] has unsupported parameters: [normalize_lon : true]"); } } @@ -844,20 +810,17 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { String stage2Mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", false) .field("geohash", false).endObject().endObject().endObject().endObject().string(); - try { - mapperService.merge("type", new CompressedXContent(stage2Mapping), MapperService.MergeReason.MAPPING_UPDATE, false); - fail(); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("mapper [point] has different [lat_lon]")); - assertThat(e.getMessage(), containsString("mapper [point] has different [geohash]")); - assertThat(e.getMessage(), containsString("mapper [point] has different [geohash_precision]")); - } + Exception e = expectThrows(IllegalArgumentException.class, () -> + mapperService.merge("type", new CompressedXContent(stage2Mapping), MapperService.MergeReason.MAPPING_UPDATE, false)); + assertThat(e.getMessage(), containsString("mapper [point] has different [lat_lon]")); + assertThat(e.getMessage(), containsString("mapper [point] has different [geohash]")); + assertThat(e.getMessage(), containsString("mapper [point] has different [geohash_precision]")); // correct mapping and ensure no failures - stage2Mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + String stage2MappingCorrect = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true) .field("geohash", true).endObject().endObject().endObject().endObject().string(); - mapperService.merge("type", new CompressedXContent(stage2Mapping), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("type", new CompressedXContent(stage2MappingCorrect), MapperService.MergeReason.MAPPING_UPDATE, false); } public void testLegacyGeoHashSearch() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java index 89a42a884c6..ae306009f25 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java @@ -22,13 +22,8 @@ package org.elasticsearch.index.mapper; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.ObjectMapper; import org.elasticsearch.index.mapper.MapperService.MergeReason; import org.elasticsearch.index.mapper.ObjectMapper.Dynamic; -import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.index.mapper.TypeFieldMapper; import org.elasticsearch.test.ESSingleNodeTestCase; import java.io.IOException; @@ -366,22 +361,16 @@ public class NestedObjectMapperTests extends ESSingleNodeTestCase { createIndex("test1").mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE, false); // explicitly setting limit to 0 prevents nested fields - try { + Exception e = expectThrows(IllegalArgumentException.class, () -> createIndex("test2", Settings.builder().put(MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING.getKey(), 0).build()) - .mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE, false); - fail("Expected IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("Limit of nested fields [0] in index [test2] has been exceeded")); - } + .mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE, false)); + assertThat(e.getMessage(), containsString("Limit of nested fields [0] in index [test2] has been exceeded")); // setting limit to 1 with 2 nested fields fails - try { + e = expectThrows(IllegalArgumentException.class, () -> createIndex("test3", Settings.builder().put(MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING.getKey(), 1).build()) - .mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE, false); - fail("Expected IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("Limit of nested fields [1] in index [test3] has been exceeded")); - } + .mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE, false)); + assertThat(e.getMessage(), containsString("Limit of nested fields [1] in index [test3] has been exceeded")); MapperService mapperService = createIndex("test4", Settings.builder().put(MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING.getKey(), 2) .build()).mapperService(); @@ -391,12 +380,9 @@ public class NestedObjectMapperTests extends ESSingleNodeTestCase { // adding new fields from different type is not ok String mapping2 = XContentFactory.jsonBuilder().startObject().startObject("type3").startObject("properties").startObject("nested3") .field("type", "nested").startObject("properties").endObject().endObject().endObject().endObject().endObject().string(); - try { - mapperService.merge("type3", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, false); - fail("Expected IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("Limit of nested fields [2] in index [test4] has been exceeded")); - } + e = expectThrows(IllegalArgumentException.class, () -> + mapperService.merge("type3", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, false)); + assertThat(e.getMessage(), containsString("Limit of nested fields [2] in index [test4] has been exceeded")); // do not check nested fields limit if mapping is not updated createIndex("test5", Settings.builder().put(MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING.getKey(), 0).build()) diff --git a/core/src/test/java/org/elasticsearch/index/mapper/TimestampFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/TimestampFieldMapperTests.java index 33f7db973d3..6b156fa36e1 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/TimestampFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/TimestampFieldMapperTests.java @@ -30,20 +30,11 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.joda.Joda; -import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.DocumentMapperParser; -import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.index.mapper.SourceToParse; -import org.elasticsearch.index.mapper.TimestampFieldMapper; import org.elasticsearch.index.mapper.MapperService.MergeReason; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -57,7 +48,6 @@ import java.util.Collection; import java.util.LinkedHashMap; import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath; -import static org.elasticsearch.test.VersionUtils.randomVersion; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -212,12 +202,9 @@ public class TimestampFieldMapperTests extends ESSingleNodeTestCase { .field("default", (String) null) .endObject() .endObject().endObject(); - try { - createIndex("test", BW_SETTINGS).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping.string())); - fail("we should reject the mapping with a TimestampParsingException: default timestamp can not be set to null"); - } catch (TimestampParsingException e) { - assertThat(e.getDetailedMessage(), containsString("default timestamp can not be set to null")); - } + TimestampParsingException e = expectThrows(TimestampParsingException.class, () -> createIndex("test", BW_SETTINGS).mapperService() + .documentMapperParser().parse("type", new CompressedXContent(mapping.string()))); + assertThat(e.getDetailedMessage(), containsString("default timestamp can not be set to null")); } // Issue 4718: was throwing a TimestampParsingException: failed to parse timestamp [null] @@ -229,12 +216,9 @@ public class TimestampFieldMapperTests extends ESSingleNodeTestCase { .endObject() .endObject().endObject(); - try { - createIndex("test", BW_SETTINGS).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping.string())); - fail("we should reject the mapping with a TimestampParsingException: default timestamp can not be set to null"); - } catch (TimestampParsingException e) { - assertThat(e.getDetailedMessage(), containsString("default timestamp can not be set to null")); - } + TimestampParsingException e = expectThrows(TimestampParsingException.class, () -> createIndex("test", BW_SETTINGS).mapperService() + .documentMapperParser().parse("type", new CompressedXContent(mapping.string()))); + assertThat(e.getDetailedMessage(), containsString("default timestamp can not be set to null")); } // Issue 4718: was throwing a TimestampParsingException: failed to parse timestamp [null] @@ -247,12 +231,9 @@ public class TimestampFieldMapperTests extends ESSingleNodeTestCase { .endObject() .endObject().endObject(); - try { - createIndex("test", BW_SETTINGS).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping.string())); - fail("we should reject the mapping with a TimestampParsingException: default timestamp can not be set with ignore_missing set to false"); - } catch (TimestampParsingException e) { - assertThat(e.getDetailedMessage(), containsString("default timestamp can not be set with ignore_missing set to false")); - } + TimestampParsingException e = expectThrows(TimestampParsingException.class, () -> createIndex("test", BW_SETTINGS).mapperService() + .documentMapperParser().parse("type", new CompressedXContent(mapping.string()))); + assertThat(e.getDetailedMessage(), containsString("default timestamp can not be set with ignore_missing set to false")); } // Issue 4718: was throwing a TimestampParsingException: failed to parse timestamp [null] diff --git a/core/src/test/java/org/elasticsearch/index/search/geo/GeoPointParsingTests.java b/core/src/test/java/org/elasticsearch/index/search/geo/GeoPointParsingTests.java index 1af1b55a6ed..08b16d3400d 100644 --- a/core/src/test/java/org/elasticsearch/index/search/geo/GeoPointParsingTests.java +++ b/core/src/test/java/org/elasticsearch/index/search/geo/GeoPointParsingTests.java @@ -30,8 +30,8 @@ import org.elasticsearch.test.geo.RandomGeoGenerator; import java.io.IOException; -import static org.hamcrest.Matchers.is; import static org.elasticsearch.common.geo.GeoHashUtils.stringEncode; +import static org.hamcrest.Matchers.is; public class GeoPointParsingTests extends ESTestCase { static double TOLERANCE = 1E-5; @@ -112,13 +112,8 @@ public class GeoPointParsingTests extends ESTestCase { XContentParser parser = JsonXContent.jsonXContent.createParser(content.bytes()); parser.nextToken(); - - try { - GeoUtils.parseGeoPoint(parser); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), is("field must be either [lat], [lon] or [geohash]")); - } + Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); + assertThat(e.getMessage(), is("field must be either [lat], [lon] or [geohash]")); } public void testInvalidPointLatHashMix() throws IOException { @@ -130,12 +125,8 @@ public class GeoPointParsingTests extends ESTestCase { XContentParser parser = JsonXContent.jsonXContent.createParser(content.bytes()); parser.nextToken(); - try { - GeoUtils.parseGeoPoint(parser); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), is("field must be either lat/lon or geohash")); - } + Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); + assertThat(e.getMessage(), is("field must be either lat/lon or geohash")); } public void testInvalidPointLonHashMix() throws IOException { @@ -147,12 +138,8 @@ public class GeoPointParsingTests extends ESTestCase { XContentParser parser = JsonXContent.jsonXContent.createParser(content.bytes()); parser.nextToken(); - try { - GeoUtils.parseGeoPoint(parser); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), is("field must be either lat/lon or geohash")); - } + Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); + assertThat(e.getMessage(), is("field must be either lat/lon or geohash")); } public void testInvalidField() throws IOException { @@ -164,12 +151,8 @@ public class GeoPointParsingTests extends ESTestCase { XContentParser parser = JsonXContent.jsonXContent.createParser(content.bytes()); parser.nextToken(); - try { - GeoUtils.parseGeoPoint(parser); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), is("field must be either [lat], [lon] or [geohash]")); - } + Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); + assertThat(e.getMessage(), is("field must be either [lat], [lon] or [geohash]")); } private static XContentParser objectLatLon(double lat, double lon) throws IOException { diff --git a/core/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java b/core/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java index 59c0e4a3bfc..c09b2face7e 100644 --- a/core/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java +++ b/core/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java @@ -19,8 +19,6 @@ package org.elasticsearch.index.search.geo; -import org.locationtech.spatial4j.context.SpatialContext; -import org.locationtech.spatial4j.distance.DistanceUtils; import org.apache.lucene.spatial.prefix.tree.Cell; import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree; import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree; @@ -33,6 +31,8 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.test.ESTestCase; +import org.locationtech.spatial4j.context.SpatialContext; +import org.locationtech.spatial4j.distance.DistanceUtils; import java.io.IOException; @@ -439,12 +439,8 @@ public class GeoUtilsTests extends ESTestCase { BytesReference jsonBytes = jsonBuilder().startObject().field("geohash", 1.0).endObject().bytes(); XContentParser parser = XContentHelper.createParser(jsonBytes); parser.nextToken(); - try { - GeoUtils.parseGeoPoint(parser); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), containsString("geohash must be a string")); - } + Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); + assertThat(e.getMessage(), containsString("geohash must be a string")); } public void testParseGeoPointLatNoLon() throws IOException { @@ -452,12 +448,8 @@ public class GeoUtilsTests extends ESTestCase { BytesReference jsonBytes = jsonBuilder().startObject().field("lat", lat).endObject().bytes(); XContentParser parser = XContentHelper.createParser(jsonBytes); parser.nextToken(); - try { - GeoUtils.parseGeoPoint(parser); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), is("field [lon] missing")); - } + Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); + assertThat(e.getMessage(), is("field [lon] missing")); } public void testParseGeoPointLonNoLat() throws IOException { @@ -465,12 +457,8 @@ public class GeoUtilsTests extends ESTestCase { BytesReference jsonBytes = jsonBuilder().startObject().field("lon", lon).endObject().bytes(); XContentParser parser = XContentHelper.createParser(jsonBytes); parser.nextToken(); - try { - GeoUtils.parseGeoPoint(parser); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), is("field [lat] missing")); - } + Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); + assertThat(e.getMessage(), is("field [lat] missing")); } public void testParseGeoPointLonWrongType() throws IOException { @@ -478,12 +466,8 @@ public class GeoUtilsTests extends ESTestCase { BytesReference jsonBytes = jsonBuilder().startObject().field("lat", lat).field("lon", false).endObject().bytes(); XContentParser parser = XContentHelper.createParser(jsonBytes); parser.nextToken(); - try { - GeoUtils.parseGeoPoint(parser); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), is("longitude must be a number")); - } + Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); + assertThat(e.getMessage(), is("longitude must be a number")); } public void testParseGeoPointLatWrongType() throws IOException { @@ -491,12 +475,8 @@ public class GeoUtilsTests extends ESTestCase { BytesReference jsonBytes = jsonBuilder().startObject().field("lat", false).field("lon", lon).endObject().bytes(); XContentParser parser = XContentHelper.createParser(jsonBytes); parser.nextToken(); - try { - GeoUtils.parseGeoPoint(parser); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), is("latitude must be a number")); - } + Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); + assertThat(e.getMessage(), is("latitude must be a number")); } public void testParseGeoPointExtraField() throws IOException { @@ -505,12 +485,8 @@ public class GeoUtilsTests extends ESTestCase { BytesReference jsonBytes = jsonBuilder().startObject().field("lat", lat).field("lon", lon).field("foo", true).endObject().bytes(); XContentParser parser = XContentHelper.createParser(jsonBytes); parser.nextToken(); - try { - GeoUtils.parseGeoPoint(parser); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), is("field must be either [lat], [lon] or [geohash]")); - } + Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); + assertThat(e.getMessage(), is("field must be either [lat], [lon] or [geohash]")); } public void testParseGeoPointLonLatGeoHash() throws IOException { @@ -521,12 +497,8 @@ public class GeoUtilsTests extends ESTestCase { .bytes(); XContentParser parser = XContentHelper.createParser(jsonBytes); parser.nextToken(); - try { - GeoUtils.parseGeoPoint(parser); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), containsString("field must be either lat/lon or geohash")); - } + Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); + assertThat(e.getMessage(), containsString("field must be either lat/lon or geohash")); } public void testParseGeoPointArrayTooManyValues() throws IOException { @@ -539,12 +511,8 @@ public class GeoUtilsTests extends ESTestCase { while (parser.currentToken() != Token.START_ARRAY) { parser.nextToken(); } - try { - GeoUtils.parseGeoPoint(parser); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), is("only two values allowed")); - } + Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); + assertThat(e.getMessage(), is("only two values allowed")); } public void testParseGeoPointArrayWrongType() throws IOException { @@ -555,12 +523,8 @@ public class GeoUtilsTests extends ESTestCase { while (parser.currentToken() != Token.START_ARRAY) { parser.nextToken(); } - try { - GeoUtils.parseGeoPoint(parser); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), is("numeric value expected")); - } + Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); + assertThat(e.getMessage(), is("numeric value expected")); } public void testParseGeoPointInvalidType() throws IOException { @@ -569,12 +533,8 @@ public class GeoUtilsTests extends ESTestCase { while (parser.currentToken() != Token.VALUE_NUMBER) { parser.nextToken(); } - try { - GeoUtils.parseGeoPoint(parser); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), is("geo_point expected")); - } + Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); + assertThat(e.getMessage(), is("geo_point expected")); } public void testPrefixTreeCellSizes() { diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 335cffba493..bd918457e21 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -470,8 +470,6 @@ public class IndexShardTests extends IndexShardTestCase { throw new RuntimeException(ex); } } - - ; }; thread[i].start(); } @@ -1172,6 +1170,7 @@ public class IndexShardTests extends IndexShardTestCase { throw new RuntimeException("boom"); } + @Override public IndexSearcher wrap(IndexSearcher searcher) throws EngineException { return searcher; } diff --git a/core/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java b/core/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java index 8b61a399a9b..bcb29b66148 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java @@ -64,9 +64,8 @@ public class ShardPathTests extends ESTestCase { assumeTrue("This test tests multi data.path but we only got one", paths.length > 1); int id = randomIntBetween(1, 10); ShardStateMetaData.FORMAT.write(new ShardStateMetaData(id, true, indexUUID, AllocationId.newInitializing()), paths); - ShardPath.loadShardPath(logger, env, shardId, IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings)); - fail("Expected IllegalStateException"); - } catch (IllegalStateException e) { + Exception e = expectThrows(IllegalStateException.class, () -> + ShardPath.loadShardPath(logger, env, shardId, IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings))); assertThat(e.getMessage(), containsString("more than one shard state found")); } } @@ -81,9 +80,8 @@ public class ShardPathTests extends ESTestCase { Path path = randomFrom(paths); int id = randomIntBetween(1, 10); ShardStateMetaData.FORMAT.write(new ShardStateMetaData(id, true, "0xDEADBEEF", AllocationId.newInitializing()), path); - ShardPath.loadShardPath(logger, env, shardId, IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings)); - fail("Expected IllegalStateException"); - } catch (IllegalStateException e) { + Exception e = expectThrows(IllegalStateException.class, () -> + ShardPath.loadShardPath(logger, env, shardId, IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings))); assertThat(e.getMessage(), containsString("expected: foobar on shard path")); } } @@ -91,12 +89,8 @@ public class ShardPathTests extends ESTestCase { public void testIllegalCustomDataPath() { Index index = new Index("foo", "foo"); final Path path = createTempDir().resolve(index.getUUID()).resolve("0"); - try { - new ShardPath(true, path, path, new ShardId(index, 0)); - fail("Expected IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), is("shard state path must be different to the data path when using custom data paths")); - } + Exception e = expectThrows(IllegalArgumentException.class, () -> new ShardPath(true, path, path, new ShardId(index, 0))); + assertThat(e.getMessage(), is("shard state path must be different to the data path when using custom data paths")); } public void testValidCtor() { diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java b/core/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java index 13dad84eb9b..2734fd78284 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java @@ -45,7 +45,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; @@ -444,21 +443,15 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase { public void testAllMissingStrict() throws Exception { createIndex("test1"); - try { + expectThrows(IndexNotFoundException.class, () -> client().prepareSearch("test2") .setQuery(matchAllQuery()) - .execute().actionGet(); - fail("Exception should have been thrown."); - } catch (IndexNotFoundException e) { - } + .execute().actionGet()); - try { + expectThrows(IndexNotFoundException.class, () -> client().prepareSearch("test2","test3") .setQuery(matchAllQuery()) - .execute().actionGet(); - fail("Exception should have been thrown."); - } catch (IndexNotFoundException e) { - } + .execute().actionGet()); //you should still be able to run empty searches without things blowing up client().prepareSearch().setQuery(matchAllQuery()).execute().actionGet(); diff --git a/core/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java b/core/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java index 4f97264af9f..6f9668c368e 100644 --- a/core/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java +++ b/core/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java @@ -70,22 +70,16 @@ public class OpenCloseIndexIT extends ESIntegTestCase { public void testSimpleCloseMissingIndex() { Client client = client(); - try { - client.admin().indices().prepareClose("test1").execute().actionGet(); - fail("Expected IndexNotFoundException"); - } catch (IndexNotFoundException e) { - assertThat(e.getMessage(), is("no such index")); - } + Exception e = expectThrows(IndexNotFoundException.class, () -> + client.admin().indices().prepareClose("test1").execute().actionGet()); + assertThat(e.getMessage(), is("no such index")); } public void testSimpleOpenMissingIndex() { Client client = client(); - try { - client.admin().indices().prepareOpen("test1").execute().actionGet(); - fail("Expected IndexNotFoundException"); - } catch (IndexNotFoundException e) { - assertThat(e.getMessage(), is("no such index")); - } + Exception e = expectThrows(IndexNotFoundException.class, () -> + client.admin().indices().prepareOpen("test1").execute().actionGet()); + assertThat(e.getMessage(), is("no such index")); } public void testCloseOneMissingIndex() { @@ -93,12 +87,9 @@ public class OpenCloseIndexIT extends ESIntegTestCase { createIndex("test1"); ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet(); assertThat(healthResponse.isTimedOut(), equalTo(false)); - try { - client.admin().indices().prepareClose("test1", "test2").execute().actionGet(); - fail("Expected IndexNotFoundException"); - } catch (IndexNotFoundException e) { - assertThat(e.getMessage(), is("no such index")); - } + Exception e = expectThrows(IndexNotFoundException.class, () -> + client.admin().indices().prepareClose("test1", "test2").execute().actionGet()); + assertThat(e.getMessage(), is("no such index")); } public void testCloseOneMissingIndexIgnoreMissing() { @@ -117,12 +108,9 @@ public class OpenCloseIndexIT extends ESIntegTestCase { createIndex("test1"); ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet(); assertThat(healthResponse.isTimedOut(), equalTo(false)); - try { - client.admin().indices().prepareOpen("test1", "test2").execute().actionGet(); - fail("Expected IndexNotFoundException"); - } catch (IndexNotFoundException e) { - assertThat(e.getMessage(), is("no such index")); - } + Exception e = expectThrows(IndexNotFoundException.class, () -> + client.admin().indices().prepareOpen("test1", "test2").execute().actionGet()); + assertThat(e.getMessage(), is("no such index")); } public void testOpenOneMissingIndexIgnoreMissing() { @@ -204,42 +192,30 @@ public class OpenCloseIndexIT extends ESIntegTestCase { public void testCloseNoIndex() { Client client = client(); - try { - client.admin().indices().prepareClose().execute().actionGet(); - fail("Expected ActionRequestValidationException"); - } catch (ActionRequestValidationException e) { - assertThat(e.getMessage(), containsString("index is missing")); - } + Exception e = expectThrows(ActionRequestValidationException.class, () -> + client.admin().indices().prepareClose().execute().actionGet()); + assertThat(e.getMessage(), containsString("index is missing")); } public void testCloseNullIndex() { Client client = client(); - try { - client.admin().indices().prepareClose((String[])null).execute().actionGet(); - fail("Expected ActionRequestValidationException"); - } catch (ActionRequestValidationException e) { - assertThat(e.getMessage(), containsString("index is missing")); - } + Exception e = expectThrows(ActionRequestValidationException.class, () -> + client.admin().indices().prepareClose((String[])null).execute().actionGet()); + assertThat(e.getMessage(), containsString("index is missing")); } public void testOpenNoIndex() { Client client = client(); - try { - client.admin().indices().prepareOpen().execute().actionGet(); - fail("Expected ActionRequestValidationException"); - } catch (ActionRequestValidationException e) { - assertThat(e.getMessage(), containsString("index is missing")); - } + Exception e = expectThrows(ActionRequestValidationException.class, () -> + client.admin().indices().prepareOpen().execute().actionGet()); + assertThat(e.getMessage(), containsString("index is missing")); } public void testOpenNullIndex() { Client client = client(); - try { - client.admin().indices().prepareOpen((String[])null).execute().actionGet(); - fail("Expected ActionRequestValidationException"); - } catch (ActionRequestValidationException e) { - assertThat(e.getMessage(), containsString("index is missing")); - } + Exception e = expectThrows(ActionRequestValidationException.class, () -> + client.admin().indices().prepareOpen((String[])null).execute().actionGet()); + assertThat(e.getMessage(), containsString("index is missing")); } public void testOpenAlreadyOpenedIndex() { diff --git a/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java b/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java index 5a257cc64fd..8582ca0e02f 100644 --- a/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java +++ b/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java @@ -42,7 +42,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.index.Index; @@ -77,9 +76,8 @@ import static org.hamcrest.Matchers.instanceOf; public class RareClusterStateIT extends ESIntegTestCase { @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder().put(super.nodeSettings(nodeOrdinal)) - .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "zen").build(); + protected boolean addMockZenPings() { + return false; } @Override @@ -173,9 +171,7 @@ public class RareClusterStateIT extends ESIntegTestCase { @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/14932") public void testDeleteCreateInOneBulk() throws Exception { - internalCluster().startNodesAsync(2, Settings.builder() - .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "zen") - .build()).get(); + internalCluster().startNodesAsync(2).get(); assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut()); prepareCreate("test").setSettings(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, true).addMapping("type").get(); ensureGreen("test"); diff --git a/core/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java b/core/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java index e66eeb48766..4e43537099b 100644 --- a/core/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java +++ b/core/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java @@ -819,7 +819,7 @@ public class SearchFieldsIT extends ESIntegTestCase { assertThat(searchResponse.getHits().getAt(0).fields().get("float_field").value(), equalTo((Object) 5.0)); assertThat(searchResponse.getHits().getAt(0).fields().get("double_field").value(), equalTo((Object) 6.0d)); assertThat(searchResponse.getHits().getAt(0).fields().get("date_field").value(), equalTo((Object) 1332374400000L)); - assertThat(searchResponse.getHits().getAt(0).fields().get("boolean_field").value(), equalTo((Object) 1L)); + assertThat(searchResponse.getHits().getAt(0).fields().get("boolean_field").value(), equalTo((Object) true)); assertThat(searchResponse.getHits().getAt(0).fields().get("text_field").value(), equalTo("foo")); assertThat(searchResponse.getHits().getAt(0).fields().get("keyword_field").value(), equalTo("foo")); } diff --git a/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java b/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java index 1d54b412d6c..6ee6f56c536 100644 --- a/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java +++ b/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java @@ -535,13 +535,10 @@ public class SearchQueryIT extends ESIntegTestCase { searchResponse = client().prepareSearch().setQuery(queryStringQuery("future:[now/d TO now+2M/d]").lowercaseExpandedTerms(false)).get(); assertHitCount(searchResponse, 1L); - try { - client().prepareSearch().setQuery(queryStringQuery("future:[now/D TO now+2M/d]").lowercaseExpandedTerms(false)).get(); - fail("expected SearchPhaseExecutionException (total failure)"); - } catch (SearchPhaseExecutionException e) { - assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST)); - assertThat(e.toString(), containsString("unit [D] not supported for date math")); - } + SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, () -> client().prepareSearch() + .setQuery(queryStringQuery("future:[now/D TO now+2M/d]").lowercaseExpandedTerms(false)).get()); + assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST)); + assertThat(e.toString(), containsString("unit [D] not supported for date math")); } // Issue #7880 @@ -776,12 +773,7 @@ public class SearchQueryIT extends ESIntegTestCase { searchResponse = client().prepareSearch().setQuery(matchQuery("double", "2")).get(); assertHitCount(searchResponse, 1L); assertFirstHit(searchResponse, hasId("2")); - try { - client().prepareSearch().setQuery(matchQuery("double", "2 3 4")).get(); - fail("SearchPhaseExecutionException should have been thrown"); - } catch (SearchPhaseExecutionException ex) { - // number format exception - } + expectThrows(SearchPhaseExecutionException.class, () -> client().prepareSearch().setQuery(matchQuery("double", "2 3 4")).get()); } public void testMultiMatchQuery() throws Exception { @@ -1777,15 +1769,11 @@ public class SearchQueryIT extends ESIntegTestCase { refresh(); //has_child fails if executed on "simple" index - try { - client().prepareSearch("simple") - .setQuery(hasChildQuery("child", matchQuery("text", "value"), ScoreMode.None)).get(); - fail("Should have failed as has_child query can only be executed against parent-child types"); - } catch (SearchPhaseExecutionException e) { - assertThat(e.shardFailures().length, greaterThan(0)); - for (ShardSearchFailure shardSearchFailure : e.shardFailures()) { - assertThat(shardSearchFailure.reason(), containsString("no mapping found for type [child]")); - } + SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, + () -> client().prepareSearch("simple").setQuery(hasChildQuery("child", matchQuery("text", "value"), ScoreMode.None)).get()); + assertThat(e.shardFailures().length, greaterThan(0)); + for (ShardSearchFailure shardSearchFailure : e.shardFailures()) { + assertThat(shardSearchFailure.reason(), containsString("no mapping found for type [child]")); } //has_child doesn't get parsed for "simple" index @@ -1983,14 +1971,10 @@ public class SearchQueryIT extends ESIntegTestCase { assertThat(searchResponse.getHits().getAt(0).getId(), is("3")); // When we use long values, it means we have ms since epoch UTC based so we don't apply any transformation - try { + Exception e = expectThrows(SearchPhaseExecutionException.class, () -> client().prepareSearch("test") .setQuery(QueryBuilders.rangeQuery("date").from(1388534400000L).to(1388537940999L).timeZone("+01:00")) - .get(); - fail("A Range Filter using ms since epoch with a TimeZone should raise a ParsingException"); - } catch (SearchPhaseExecutionException e) { - // We expect it - } + .get()); searchResponse = client().prepareSearch("test") .setQuery(QueryBuilders.rangeQuery("date").from("2014-01-01").to("2014-01-01T00:59:00").timeZone("-01:00")) @@ -2005,14 +1989,10 @@ public class SearchQueryIT extends ESIntegTestCase { assertThat(searchResponse.getHits().getAt(0).getId(), is("4")); // A Range Filter on a numeric field with a TimeZone should raise an exception - try { + e = expectThrows(SearchPhaseExecutionException.class, () -> client().prepareSearch("test") .setQuery(QueryBuilders.rangeQuery("num").from("0").to("4").timeZone("-01:00")) - .get(); - fail("A Range Filter on a numeric field with a TimeZone should raise a ParsingException"); - } catch (SearchPhaseExecutionException e) { - // We expect it - } + .get()); } public void testSearchEmptyDoc() { diff --git a/core/src/test/java/org/elasticsearch/search/sort/ScoreSortBuilderTests.java b/core/src/test/java/org/elasticsearch/search/sort/ScoreSortBuilderTests.java index 7ed0c900151..bffa1326630 100644 --- a/core/src/test/java/org/elasticsearch/search/sort/ScoreSortBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/sort/ScoreSortBuilderTests.java @@ -26,8 +26,6 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.search.DocValueFormat; -import org.junit.Rule; -import org.junit.rules.ExpectedException; import java.io.IOException; @@ -49,16 +47,12 @@ public class ScoreSortBuilderTests extends AbstractSortTestCase new ScoreSortBuilder().order(null)); + assertEquals("sort order cannot be null.", e.getMessage()); } /** diff --git a/core/src/test/java/org/elasticsearch/search/sort/SortModeTests.java b/core/src/test/java/org/elasticsearch/search/sort/SortModeTests.java index 29deb6dd76d..e0329347dba 100644 --- a/core/src/test/java/org/elasticsearch/search/sort/SortModeTests.java +++ b/core/src/test/java/org/elasticsearch/search/sort/SortModeTests.java @@ -20,16 +20,11 @@ package org.elasticsearch.search.sort; import org.elasticsearch.test.ESTestCase; -import org.junit.Rule; -import org.junit.rules.ExpectedException; import java.util.Locale; public class SortModeTests extends ESTestCase { - @Rule - public ExpectedException exceptionRule = ExpectedException.none(); - public void testSortMode() { // we rely on these ordinals in serialization, so changing them breaks bwc. assertEquals(0, SortMode.MIN.ordinal()); @@ -50,16 +45,11 @@ public class SortModeTests extends ESTestCase { } } - public void testParseNull() { - exceptionRule.expect(NullPointerException.class); - exceptionRule.expectMessage("input string is null"); - SortMode.fromString(null); - } + public void testParsingFromStringExceptions() { + Exception e = expectThrows(NullPointerException.class, () -> SortMode.fromString(null)); + assertEquals("input string is null", e.getMessage()); - public void testIllegalArgument() { - exceptionRule.expect(IllegalArgumentException.class); - exceptionRule.expectMessage("Unknown SortMode [xyz]"); - SortMode.fromString("xyz"); + e = expectThrows(IllegalArgumentException.class, () -> SortMode.fromString("xyz")); + assertEquals("Unknown SortMode [xyz]", e.getMessage()); } - } diff --git a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index fd57a1198e0..0e777cbd97a 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -44,7 +44,6 @@ import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.zen.ElectMasterService; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.index.store.IndexStore; @@ -59,7 +58,6 @@ import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.action.admin.cluster.RestClusterStateAction; import org.elasticsearch.rest.action.admin.cluster.RestGetRepositoriesAction; import org.elasticsearch.snapshots.mockstore.MockRepository; -import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.test.InternalTestCluster; @@ -93,13 +91,6 @@ import static org.hamcrest.Matchers.nullValue; @ClusterScope(scope = Scope.TEST, numDataNodes = 0, transportClientRatio = 0) public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCase { - @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder().put(super.nodeSettings(nodeOrdinal)) - // TODO only restorePersistentSettingsTest needs this maybe factor out? - .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "zen").build(); - } - @Override protected Collection> nodePlugins() { return Arrays.asList(MockRepository.Plugin.class); diff --git a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 200ec6ac4b1..bf69f6016de 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -57,9 +57,9 @@ import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.IndexStore; -import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.indices.InvalidIndexNameException; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.RepositoriesService; @@ -693,7 +693,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType("fs").setSettings(Settings.builder().put("location", repositoryLocation))); - prepareCreate("test-idx").setSettings(Settings.builder().put("index.allocation.max_retries", Integer.MAX_VALUE)).get(); + createIndex("test-idx"); ensureGreen(); logger.info("--> indexing some data"); diff --git a/core/src/test/java/org/elasticsearch/tribe/TribeIT.java b/core/src/test/java/org/elasticsearch/tribe/TribeIT.java index 89a32c11828..0bc4974f285 100644 --- a/core/src/test/java/org/elasticsearch/tribe/TribeIT.java +++ b/core/src/test/java/org/elasticsearch/tribe/TribeIT.java @@ -33,19 +33,15 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.set.Sets; -import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.MasterNotDiscoveredException; -import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing; import org.elasticsearch.node.Node; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.NodeConfigurationSource; import org.elasticsearch.transport.MockTcpTransportPlugin; -import org.elasticsearch.transport.Transport; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -54,7 +50,6 @@ import java.io.IOException; import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.function.Consumer; @@ -66,6 +61,7 @@ import java.util.stream.StreamSupport; import static java.util.stream.Collectors.toSet; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; @@ -195,22 +191,12 @@ public class TribeIT extends ESIntegTestCase { settings.put(Node.NODE_MASTER_SETTING.getKey(), true); settings.put(NetworkModule.HTTP_ENABLED.getKey(), false); settings.put(NetworkModule.TRANSPORT_TYPE_SETTING.getKey(), MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME); - settings.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local"); doWithAllClusters(filter, c -> { String tribeSetting = "tribe." + c.getClusterName() + "."; settings.put(tribeSetting + ClusterName.CLUSTER_NAME_SETTING.getKey(), c.getClusterName()); settings.put(tribeSetting + DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "100ms"); settings.put(tribeSetting + NetworkModule.TRANSPORT_TYPE_SETTING.getKey(), MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME); - settings.put(tribeSetting + DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local"); - - Set hosts = new HashSet<>(); - for (Transport transport : c.getInstances(Transport.class)) { - TransportAddress address = transport.boundAddress().publishAddress(); - hosts.add(address.getAddress() + ":" + address.getPort()); - } - settings.putArray(tribeSetting + UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.getKey(), - hosts.toArray(new String[hosts.size()])); }); return settings; @@ -497,7 +483,7 @@ public class TribeIT extends ESIntegTestCase { assertBusy(() -> { ClusterState state = client().admin().cluster().prepareState().setNodes(true).get().getState(); Set nodes = StreamSupport.stream(state.getNodes().spliterator(), false).map(DiscoveryNode::getName).collect(toSet()); - assertThat(nodes.containsAll(expectedNodes), is(true)); + assertThat(nodes, containsInAnyOrder(expectedNodes.toArray())); }); } diff --git a/core/src/test/resources/org/elasticsearch/action/admin/indices/template/filebeat-5.0.template.json b/core/src/test/resources/org/elasticsearch/action/admin/indices/template/filebeat-5.0.template.json new file mode 100644 index 00000000000..3b6e373b556 --- /dev/null +++ b/core/src/test/resources/org/elasticsearch/action/admin/indices/template/filebeat-5.0.template.json @@ -0,0 +1,69 @@ +{ + "mappings": { + "_default_": { + "_all": { + "norms": false + }, + "_meta": { + "version": "5.0.0-beta1" + }, + "dynamic_templates": [ + { + "fields": { + "mapping": { + "ignore_above": 1024, + "type": "keyword" + }, + "match_mapping_type": "string", + "path_match": "fields.*" + } + } + ], + "properties": { + "@timestamp": { + "type": "date" + }, + "beat": { + "properties": { + "hostname": { + "ignore_above": 1024, + "type": "keyword" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "input_type": { + "ignore_above": 1024, + "type": "keyword" + }, + "message": { + "norms": false, + "type": "text" + }, + "offset": { + "type": "long" + }, + "source": { + "ignore_above": 1024, + "type": "keyword" + }, + "tags": { + "ignore_above": 1024, + "type": "keyword" + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + } + } + } + }, + "order": 0, + "settings": { + "index.refresh_interval": "5s" + }, + "template": "filebeat-*" +} diff --git a/core/src/test/resources/org/elasticsearch/action/admin/indices/template/logstash-5.0.template.json b/core/src/test/resources/org/elasticsearch/action/admin/indices/template/logstash-5.0.template.json new file mode 100644 index 00000000000..fc6406c457a --- /dev/null +++ b/core/src/test/resources/org/elasticsearch/action/admin/indices/template/logstash-5.0.template.json @@ -0,0 +1,45 @@ +{ + "template" : "logstash-*", + "settings" : { + "index.refresh_interval" : "5s" + }, + "mappings" : { + "_default_" : { + "_all" : {"enabled" : true, "norms" : false}, + "dynamic_templates" : [ { + "message_field" : { + "match" : "message", + "match_mapping_type" : "string", + "mapping" : { + "type" : "string", "index" : "analyzed", "norms" : false, + "fielddata" : { "format" : "disabled" } + } + } + }, { + "string_fields" : { + "match" : "*", + "match_mapping_type" : "string", + "mapping" : { + "type" : "text", "norms" : false, + "fields" : { + "keyword" : { "type": "keyword" } + } + } + } + } ], + "properties" : { + "@timestamp": { "type": "date", "include_in_all": false }, + "@version": { "type": "keyword", "include_in_all": false }, + "geoip" : { + "dynamic": true, + "properties" : { + "ip": { "type": "ip" }, + "location" : { "type" : "geo_point" }, + "latitude" : { "type" : "half_float" }, + "longitude" : { "type" : "half_float" } + } + } + } + } + } +} diff --git a/core/src/test/resources/org/elasticsearch/action/admin/indices/template/metricbeat-5.0.template.json b/core/src/test/resources/org/elasticsearch/action/admin/indices/template/metricbeat-5.0.template.json new file mode 100644 index 00000000000..57786288db6 --- /dev/null +++ b/core/src/test/resources/org/elasticsearch/action/admin/indices/template/metricbeat-5.0.template.json @@ -0,0 +1,2658 @@ +{ + "mappings": { + "_default_": { + "_all": { + "norms": false + }, + "_meta": { + "version": "5.0.0-beta1" + }, + "dynamic_templates": [ + { + "fields": { + "mapping": { + "ignore_above": 1024, + "type": "keyword" + }, + "match_mapping_type": "string", + "path_match": "fields.*" + } + } + ], + "properties": { + "@timestamp": { + "type": "date" + }, + "apache": { + "properties": { + "status": { + "properties": { + "bytes_per_request": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "bytes_per_sec": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "connections": { + "properties": { + "async": { + "properties": { + "closing": { + "type": "long" + }, + "keep_alive": { + "type": "long" + }, + "writing": { + "type": "long" + } + } + }, + "total": { + "type": "long" + } + } + }, + "cpu": { + "properties": { + "children_system": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "children_user": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "load": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "system": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "user": { + "scaling_factor": 1000, + "type": "scaled_float" + } + } + }, + "hostname": { + "ignore_above": 1024, + "type": "keyword" + }, + "load": { + "properties": { + "1": { + "scaling_factor": 100, + "type": "scaled_float" + }, + "15": { + "scaling_factor": 100, + "type": "scaled_float" + }, + "5": { + "scaling_factor": 100, + "type": "scaled_float" + } + } + }, + "requests_per_sec": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "scoreboard": { + "properties": { + "closing_connection": { + "type": "long" + }, + "dns_lookup": { + "type": "long" + }, + "gracefully_finishing": { + "type": "long" + }, + "idle_cleanup": { + "type": "long" + }, + "keepalive": { + "type": "long" + }, + "logging": { + "type": "long" + }, + "open_slot": { + "type": "long" + }, + "reading_request": { + "type": "long" + }, + "sending_reply": { + "type": "long" + }, + "starting_up": { + "type": "long" + }, + "total": { + "type": "long" + }, + "waiting_for_connection": { + "type": "long" + } + } + }, + "total_accesses": { + "type": "long" + }, + "total_kbytes": { + "type": "long" + }, + "uptime": { + "properties": { + "server_uptime": { + "type": "long" + }, + "uptime": { + "type": "long" + } + } + }, + "workers": { + "properties": { + "busy": { + "type": "long" + }, + "idle": { + "type": "long" + } + } + } + } + } + } + }, + "beat": { + "properties": { + "hostname": { + "ignore_above": 1024, + "type": "keyword" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "haproxy": { + "properties": { + "info": { + "properties": { + "compress": { + "properties": { + "bps": { + "properties": { + "in": { + "type": "long" + }, + "out": { + "type": "long" + }, + "rate_limit": { + "type": "long" + } + } + } + } + }, + "conn": { + "properties": { + "rate": { + "properties": { + "limit": { + "type": "long" + }, + "value": { + "type": "long" + } + } + } + } + }, + "cum": { + "properties": { + "conns": { + "type": "long" + }, + "req": { + "type": "long" + }, + "ssl_conns": { + "type": "long" + } + } + }, + "curr": { + "properties": { + "conns": { + "type": "long" + }, + "ssl_conns": { + "type": "long" + } + } + }, + "idle_pct": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "max": { + "properties": { + "conn": { + "properties": { + "rate": { + "type": "long" + }, + "value": { + "type": "long" + } + } + }, + "hard_conn": { + "type": "long" + }, + "pipes": { + "type": "long" + }, + "sess_rate": { + "type": "long" + }, + "sock": { + "type": "long" + }, + "ssl": { + "properties": { + "conns": { + "type": "long" + }, + "rate": { + "type": "long" + } + } + }, + "zlib_mem_usage": { + "type": "long" + } + } + }, + "mem_max_bytes": { + "type": "long" + }, + "nb_proc": { + "type": "long" + }, + "pid": { + "type": "long" + }, + "pipes": { + "properties": { + "free": { + "type": "long" + }, + "used": { + "type": "long" + } + } + }, + "process_num": { + "type": "long" + }, + "run_queue": { + "type": "long" + }, + "sess": { + "properties": { + "rate": { + "properties": { + "limit": { + "type": "long" + }, + "value": { + "type": "long" + } + } + } + } + }, + "ssl": { + "properties": { + "backend": { + "properties": { + "key_rate": { + "type": "long" + }, + "max_key_rate": { + "type": "long" + } + } + }, + "cache_misses": { + "type": "long" + }, + "cached_lookups": { + "type": "long" + }, + "frontend": { + "properties": { + "key_rate": { + "type": "long" + }, + "max_key_rate": { + "type": "long" + }, + "session_reuse_pct": { + "type": "long" + } + } + }, + "rate": { + "properties": { + "limit": { + "type": "long" + }, + "value": { + "type": "long" + } + } + } + } + }, + "tasks": { + "type": "long" + }, + "ulimit_n": { + "type": "long" + }, + "uptime_sec": { + "type": "long" + }, + "zlib_mem_usage": { + "type": "long" + } + } + }, + "stat": { + "properties": { + "act": { + "type": "long" + }, + "bck": { + "type": "long" + }, + "bin": { + "type": "long" + }, + "bout": { + "type": "long" + }, + "check": { + "properties": { + "code": { + "type": "long" + }, + "duration": { + "type": "long" + }, + "status": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "chkdown": { + "type": "long" + }, + "chkfail": { + "type": "long" + }, + "cli_abrt": { + "type": "long" + }, + "comp": { + "properties": { + "byp": { + "type": "long" + }, + "in": { + "type": "long" + }, + "out": { + "type": "long" + }, + "rsp": { + "type": "long" + } + } + }, + "component_type": { + "type": "long" + }, + "ctime": { + "type": "long" + }, + "downtime": { + "type": "long" + }, + "dreq": { + "type": "long" + }, + "dresp": { + "type": "long" + }, + "econ": { + "type": "long" + }, + "ereq": { + "type": "long" + }, + "eresp": { + "type": "long" + }, + "hanafail": { + "type": "long" + }, + "hrsp": { + "properties": { + "1xx": { + "type": "long" + }, + "2xx": { + "type": "long" + }, + "3xx": { + "type": "long" + }, + "4xx": { + "type": "long" + }, + "5xx": { + "type": "long" + }, + "other": { + "type": "long" + } + } + }, + "iid": { + "type": "long" + }, + "last": { + "properties": { + "agt": { + "ignore_above": 1024, + "type": "keyword" + }, + "chk": { + "ignore_above": 1024, + "type": "keyword" + }, + "sess": { + "type": "long" + } + } + }, + "lastchg": { + "type": "long" + }, + "lbtot": { + "type": "long" + }, + "pid": { + "type": "long" + }, + "pxname": { + "ignore_above": 1024, + "type": "keyword" + }, + "qcur": { + "type": "long" + }, + "qlimit": { + "type": "long" + }, + "qmax": { + "type": "long" + }, + "qtime": { + "type": "long" + }, + "rate": { + "properties": { + "lim": { + "type": "long" + }, + "max": { + "type": "long" + }, + "value": { + "type": "long" + } + } + }, + "req": { + "properties": { + "rate": { + "properties": { + "max": { + "type": "long" + }, + "value": { + "type": "long" + } + } + }, + "tot": { + "type": "long" + } + } + }, + "rtime": { + "type": "long" + }, + "scur": { + "type": "long" + }, + "sid": { + "type": "long" + }, + "slim": { + "type": "long" + }, + "smax": { + "type": "long" + }, + "srv_abrt": { + "type": "long" + }, + "status": { + "ignore_above": 1024, + "type": "keyword" + }, + "stot": { + "ignore_above": 1024, + "type": "keyword" + }, + "svname": { + "ignore_above": 1024, + "type": "keyword" + }, + "throttle": { + "type": "long" + }, + "tracked": { + "type": "long" + }, + "ttime": { + "type": "long" + }, + "weight": { + "type": "long" + }, + "wredis": { + "type": "long" + }, + "wretr": { + "type": "long" + } + } + } + } + }, + "metricset": { + "properties": { + "host": { + "ignore_above": 1024, + "type": "keyword" + }, + "module": { + "ignore_above": 1024, + "type": "keyword" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "rtt": { + "type": "long" + } + } + }, + "mongodb": { + "properties": { + "status": { + "properties": { + "asserts": { + "properties": { + "msg": { + "type": "long" + }, + "regular": { + "type": "long" + }, + "rollovers": { + "type": "long" + }, + "user": { + "type": "long" + }, + "warning": { + "type": "long" + } + } + }, + "background_flushing": { + "properties": { + "average": { + "properties": { + "ms": { + "type": "long" + } + } + }, + "flushes": { + "type": "long" + }, + "last": { + "properties": { + "ms": { + "type": "long" + } + } + }, + "last_finished": { + "type": "date" + }, + "total": { + "properties": { + "ms": { + "type": "long" + } + } + } + } + }, + "connections": { + "properties": { + "available": { + "type": "long" + }, + "current": { + "type": "long" + }, + "total_created": { + "type": "long" + } + } + }, + "extra_info": { + "properties": { + "heap_usage": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "page_faults": { + "type": "long" + } + } + }, + "journaling": { + "properties": { + "commits": { + "type": "long" + }, + "commits_in_write_lock": { + "type": "long" + }, + "compression": { + "type": "long" + }, + "early_commits": { + "type": "long" + }, + "journaled": { + "properties": { + "mb": { + "type": "long" + } + } + }, + "times": { + "properties": { + "commits": { + "properties": { + "ms": { + "type": "long" + } + } + }, + "commits_in_write_lock": { + "properties": { + "ms": { + "type": "long" + } + } + }, + "dt": { + "properties": { + "ms": { + "type": "long" + } + } + }, + "prep_log_buffer": { + "properties": { + "ms": { + "type": "long" + } + } + }, + "remap_private_view": { + "properties": { + "ms": { + "type": "long" + } + } + }, + "write_to_data_files": { + "properties": { + "ms": { + "type": "long" + } + } + }, + "write_to_journal": { + "properties": { + "ms": { + "type": "long" + } + } + } + } + }, + "write_to_data_files": { + "properties": { + "mb": { + "type": "long" + } + } + } + } + }, + "local_time": { + "type": "date" + }, + "memory": { + "properties": { + "bits": { + "type": "long" + }, + "mapped": { + "properties": { + "mb": { + "type": "long" + } + } + }, + "mapped_with_journal": { + "properties": { + "mb": { + "type": "long" + } + } + }, + "resident": { + "properties": { + "mb": { + "type": "long" + } + } + }, + "virtual": { + "properties": { + "mb": { + "type": "long" + } + } + } + } + }, + "network": { + "properties": { + "in": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "out": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "requests": { + "type": "long" + } + } + }, + "opcounters": { + "properties": { + "command": { + "type": "long" + }, + "delete": { + "type": "long" + }, + "getmore": { + "type": "long" + }, + "insert": { + "type": "long" + }, + "query": { + "type": "long" + }, + "update": { + "type": "long" + } + } + }, + "opcounters_replicated": { + "properties": { + "command": { + "type": "long" + }, + "delete": { + "type": "long" + }, + "getmore": { + "type": "long" + }, + "insert": { + "type": "long" + }, + "query": { + "type": "long" + }, + "update": { + "type": "long" + } + } + }, + "storage_engine": { + "properties": { + "name": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "uptime": { + "properties": { + "ms": { + "type": "long" + } + } + }, + "version": { + "ignore_above": 1024, + "type": "keyword" + }, + "write_backs_queued": { + "type": "boolean" + } + } + } + } + }, + "mysql": { + "properties": { + "status": { + "properties": { + "aborted": { + "properties": { + "clients": { + "type": "long" + }, + "connects": { + "type": "long" + } + } + }, + "binlog": { + "properties": { + "cache": { + "properties": { + "disk_use": { + "type": "long" + }, + "use": { + "type": "long" + } + } + } + } + }, + "bytes": { + "properties": { + "received": { + "type": "long" + }, + "sent": { + "type": "long" + } + } + }, + "connections": { + "type": "long" + }, + "created": { + "properties": { + "tmp": { + "properties": { + "disk_tables": { + "type": "long" + }, + "files": { + "type": "long" + }, + "tables": { + "type": "long" + } + } + } + } + }, + "delayed": { + "properties": { + "errors": { + "type": "long" + }, + "insert_threads": { + "type": "long" + }, + "writes": { + "type": "long" + } + } + }, + "flush_commands": { + "type": "long" + }, + "max_used_connections": { + "type": "long" + }, + "open": { + "properties": { + "files": { + "type": "long" + }, + "streams": { + "type": "long" + }, + "tables": { + "type": "long" + } + } + }, + "opened_tables": { + "type": "long" + }, + "threads": { + "properties": { + "cached": { + "type": "long" + }, + "connected": { + "type": "long" + }, + "created": { + "type": "long" + }, + "running": { + "type": "long" + } + } + } + } + } + } + }, + "nginx": { + "properties": { + "stubstatus": { + "properties": { + "accepts": { + "type": "long" + }, + "active": { + "type": "long" + }, + "current": { + "type": "long" + }, + "dropped": { + "type": "long" + }, + "handled": { + "type": "long" + }, + "hostname": { + "ignore_above": 1024, + "type": "keyword" + }, + "reading": { + "type": "long" + }, + "requests": { + "type": "long" + }, + "waiting": { + "type": "long" + }, + "writing": { + "type": "long" + } + } + } + } + }, + "postgresql": { + "properties": { + "activity": { + "properties": { + "application_name": { + "ignore_above": 1024, + "type": "keyword" + }, + "backend_start": { + "type": "date" + }, + "client": { + "properties": { + "address": { + "ignore_above": 1024, + "type": "keyword" + }, + "hostname": { + "ignore_above": 1024, + "type": "keyword" + }, + "port": { + "type": "long" + } + } + }, + "database": { + "properties": { + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "oid": { + "type": "long" + } + } + }, + "pid": { + "type": "long" + }, + "query": { + "ignore_above": 1024, + "type": "keyword" + }, + "query_start": { + "type": "date" + }, + "state": { + "ignore_above": 1024, + "type": "keyword" + }, + "state_change": { + "type": "date" + }, + "transaction_start": { + "type": "date" + }, + "user": { + "properties": { + "id": { + "type": "long" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "waiting": { + "type": "boolean" + } + } + }, + "bgwriter": { + "properties": { + "buffers": { + "properties": { + "allocated": { + "type": "long" + }, + "backend": { + "type": "long" + }, + "backend_fsync": { + "type": "long" + }, + "checkpoints": { + "type": "long" + }, + "clean": { + "type": "long" + }, + "clean_full": { + "type": "long" + } + } + }, + "checkpoints": { + "properties": { + "requested": { + "type": "long" + }, + "scheduled": { + "type": "long" + }, + "times": { + "properties": { + "sync": { + "properties": { + "ms": { + "type": "float" + } + } + }, + "write": { + "properties": { + "ms": { + "type": "float" + } + } + } + } + } + } + }, + "stats_reset": { + "type": "date" + } + } + }, + "database": { + "properties": { + "blocks": { + "properties": { + "hit": { + "type": "long" + }, + "read": { + "type": "long" + }, + "time": { + "properties": { + "read": { + "properties": { + "ms": { + "type": "long" + } + } + }, + "write": { + "properties": { + "ms": { + "type": "long" + } + } + } + } + } + } + }, + "conflicts": { + "type": "long" + }, + "deadlocks": { + "type": "long" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "number_of_backends": { + "type": "long" + }, + "oid": { + "type": "long" + }, + "rows": { + "properties": { + "deleted": { + "type": "long" + }, + "fetched": { + "type": "long" + }, + "inserted": { + "type": "long" + }, + "returned": { + "type": "long" + }, + "updated": { + "type": "long" + } + } + }, + "stats_reset": { + "type": "date" + }, + "temporary": { + "properties": { + "bytes": { + "type": "long" + }, + "files": { + "type": "long" + } + } + }, + "transactions": { + "properties": { + "commit": { + "type": "long" + }, + "rollback": { + "type": "long" + } + } + } + } + } + } + }, + "redis": { + "properties": { + "info": { + "properties": { + "clients": { + "properties": { + "biggest_input_buf": { + "type": "long" + }, + "blocked": { + "type": "long" + }, + "connected": { + "type": "long" + }, + "longest_output_list": { + "type": "long" + } + } + }, + "cluster": { + "properties": { + "enabled": { + "type": "boolean" + } + } + }, + "cpu": { + "properties": { + "used": { + "properties": { + "sys": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "sys_children": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "user": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "user_children": { + "scaling_factor": 1000, + "type": "scaled_float" + } + } + } + } + }, + "memory": { + "properties": { + "allocator": { + "ignore_above": 1024, + "type": "keyword" + }, + "used": { + "properties": { + "lua": { + "type": "long" + }, + "peak": { + "type": "long" + }, + "rss": { + "type": "long" + }, + "value": { + "type": "long" + } + } + } + } + }, + "persistence": { + "properties": { + "aof": { + "properties": { + "bgrewrite": { + "properties": { + "last_status": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "enabled": { + "type": "boolean" + }, + "rewrite": { + "properties": { + "current_time": { + "properties": { + "sec": { + "type": "long" + } + } + }, + "in_progress": { + "type": "boolean" + }, + "last_time": { + "properties": { + "sec": { + "type": "long" + } + } + }, + "scheduled": { + "type": "boolean" + } + } + }, + "write": { + "properties": { + "last_status": { + "ignore_above": 1024, + "type": "keyword" + } + } + } + } + }, + "loading": { + "type": "boolean" + }, + "rdb": { + "properties": { + "bgsave": { + "properties": { + "current_time": { + "properties": { + "sec": { + "type": "long" + } + } + }, + "in_progress": { + "type": "boolean" + }, + "last_status": { + "ignore_above": 1024, + "type": "keyword" + }, + "last_time": { + "properties": { + "sec": { + "type": "long" + } + } + } + } + }, + "last_save": { + "properties": { + "changes_since": { + "type": "long" + }, + "time": { + "type": "long" + } + } + } + } + } + } + }, + "replication": { + "properties": { + "backlog": { + "properties": { + "active": { + "type": "long" + }, + "first_byte_offset": { + "type": "long" + }, + "histlen": { + "type": "long" + }, + "size": { + "type": "long" + } + } + }, + "connected_slaves": { + "type": "long" + }, + "master_offset": { + "type": "long" + }, + "role": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "server": { + "properties": { + "arch_bits": { + "ignore_above": 1024, + "type": "keyword" + }, + "build_id": { + "ignore_above": 1024, + "type": "keyword" + }, + "config_file": { + "ignore_above": 1024, + "type": "keyword" + }, + "gcc_version": { + "ignore_above": 1024, + "type": "keyword" + }, + "git_dirty": { + "ignore_above": 1024, + "type": "keyword" + }, + "git_sha1": { + "ignore_above": 1024, + "type": "keyword" + }, + "hz": { + "type": "long" + }, + "lru_clock": { + "type": "long" + }, + "mode": { + "ignore_above": 1024, + "type": "keyword" + }, + "multiplexing_api": { + "ignore_above": 1024, + "type": "keyword" + }, + "os": { + "ignore_above": 1024, + "type": "keyword" + }, + "process_id": { + "type": "long" + }, + "run_id": { + "ignore_above": 1024, + "type": "keyword" + }, + "tcp_port": { + "type": "long" + }, + "uptime": { + "type": "long" + }, + "version": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "stats": { + "properties": { + "commands_processed": { + "type": "long" + }, + "connections": { + "properties": { + "received": { + "type": "long" + }, + "rejected": { + "type": "long" + } + } + }, + "instantaneous": { + "properties": { + "input_kbps": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "ops_per_sec": { + "type": "long" + }, + "output_kbps": { + "scaling_factor": 1000, + "type": "scaled_float" + } + } + }, + "keys": { + "properties": { + "evicted": { + "type": "long" + }, + "expired": { + "type": "long" + } + } + }, + "keyspace": { + "properties": { + "hits": { + "type": "long" + }, + "misses": { + "type": "long" + } + } + }, + "latest_fork_usec": { + "type": "long" + }, + "migrate_cached_sockets": { + "type": "long" + }, + "net": { + "properties": { + "input": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "output": { + "properties": { + "bytes": { + "type": "long" + } + } + } + } + }, + "pubsub": { + "properties": { + "channels": { + "type": "long" + }, + "patterns": { + "type": "long" + } + } + }, + "sync": { + "properties": { + "full": { + "type": "long" + }, + "partial": { + "properties": { + "err": { + "type": "long" + }, + "ok": { + "type": "long" + } + } + } + } + } + } + } + } + }, + "keyspace": { + "properties": { + "avg_ttl": { + "type": "long" + }, + "expires": { + "type": "long" + }, + "id": { + "ignore_above": 1024, + "type": "keyword" + }, + "keys": { + "type": "long" + } + } + } + } + }, + "system": { + "properties": { + "core": { + "properties": { + "id": { + "type": "long" + }, + "idle": { + "properties": { + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "ticks": { + "type": "long" + } + } + }, + "iowait": { + "properties": { + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "ticks": { + "type": "long" + } + } + }, + "irq": { + "properties": { + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "ticks": { + "type": "long" + } + } + }, + "nice": { + "properties": { + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "ticks": { + "type": "long" + } + } + }, + "softirq": { + "properties": { + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "ticks": { + "type": "long" + } + } + }, + "steal": { + "properties": { + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "ticks": { + "type": "long" + } + } + }, + "system": { + "properties": { + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "ticks": { + "type": "long" + } + } + }, + "user": { + "properties": { + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "ticks": { + "type": "long" + } + } + } + } + }, + "cpu": { + "properties": { + "idle": { + "properties": { + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "ticks": { + "type": "long" + } + } + }, + "iowait": { + "properties": { + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "ticks": { + "type": "long" + } + } + }, + "irq": { + "properties": { + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "ticks": { + "type": "long" + } + } + }, + "nice": { + "properties": { + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "ticks": { + "type": "long" + } + } + }, + "softirq": { + "properties": { + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "ticks": { + "type": "long" + } + } + }, + "steal": { + "properties": { + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "ticks": { + "type": "long" + } + } + }, + "system": { + "properties": { + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "ticks": { + "type": "long" + } + } + }, + "user": { + "properties": { + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "ticks": { + "type": "long" + } + } + } + } + }, + "diskio": { + "properties": { + "io": { + "properties": { + "time": { + "type": "long" + } + } + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "read": { + "properties": { + "bytes": { + "type": "long" + }, + "count": { + "type": "long" + }, + "time": { + "type": "long" + } + } + }, + "serial_number": { + "ignore_above": 1024, + "type": "keyword" + }, + "write": { + "properties": { + "bytes": { + "type": "long" + }, + "count": { + "type": "long" + }, + "time": { + "type": "long" + } + } + } + } + }, + "filesystem": { + "properties": { + "available": { + "type": "long" + }, + "device_name": { + "ignore_above": 1024, + "type": "keyword" + }, + "files": { + "type": "long" + }, + "free": { + "type": "long" + }, + "free_files": { + "type": "long" + }, + "mount_point": { + "ignore_above": 1024, + "type": "keyword" + }, + "total": { + "type": "long" + }, + "used": { + "properties": { + "bytes": { + "type": "long" + }, + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + } + } + } + } + }, + "fsstat": { + "properties": { + "count": { + "type": "long" + }, + "total_files": { + "type": "long" + }, + "total_size": { + "properties": { + "free": { + "type": "long" + }, + "total": { + "type": "long" + }, + "used": { + "type": "long" + } + } + } + } + }, + "load": { + "properties": { + "1": { + "scaling_factor": 100, + "type": "scaled_float" + }, + "15": { + "scaling_factor": 100, + "type": "scaled_float" + }, + "5": { + "scaling_factor": 100, + "type": "scaled_float" + }, + "norm": { + "properties": { + "1": { + "scaling_factor": 100, + "type": "scaled_float" + }, + "15": { + "scaling_factor": 100, + "type": "scaled_float" + }, + "5": { + "scaling_factor": 100, + "type": "scaled_float" + } + } + } + } + }, + "memory": { + "properties": { + "actual": { + "properties": { + "free": { + "type": "long" + }, + "used": { + "properties": { + "bytes": { + "type": "long" + }, + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + } + } + } + } + }, + "free": { + "type": "long" + }, + "swap": { + "properties": { + "free": { + "type": "long" + }, + "total": { + "type": "long" + }, + "used": { + "properties": { + "bytes": { + "type": "long" + }, + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + } + } + } + } + }, + "total": { + "type": "long" + }, + "used": { + "properties": { + "bytes": { + "type": "long" + }, + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + } + } + } + } + }, + "network": { + "properties": { + "in": { + "properties": { + "bytes": { + "type": "long" + }, + "dropped": { + "type": "long" + }, + "errors": { + "type": "long" + }, + "packets": { + "type": "long" + } + } + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "out": { + "properties": { + "bytes": { + "type": "long" + }, + "dropped": { + "type": "long" + }, + "errors": { + "type": "long" + }, + "packets": { + "type": "long" + } + } + } + } + }, + "process": { + "properties": { + "cgroup": { + "properties": { + "blkio": { + "properties": { + "id": { + "ignore_above": 1024, + "type": "keyword" + }, + "path": { + "ignore_above": 1024, + "type": "keyword" + }, + "total": { + "properties": { + "bytes": { + "type": "long" + }, + "ios": { + "type": "long" + } + } + } + } + }, + "cpu": { + "properties": { + "cfs": { + "properties": { + "period": { + "properties": { + "us": { + "type": "long" + } + } + }, + "quota": { + "properties": { + "us": { + "type": "long" + } + } + }, + "shares": { + "type": "long" + } + } + }, + "id": { + "ignore_above": 1024, + "type": "keyword" + }, + "path": { + "ignore_above": 1024, + "type": "keyword" + }, + "rt": { + "properties": { + "period": { + "properties": { + "us": { + "type": "long" + } + } + }, + "runtime": { + "properties": { + "us": { + "type": "long" + } + } + } + } + }, + "stats": { + "properties": { + "periods": { + "type": "long" + }, + "throttled": { + "properties": { + "ns": { + "type": "long" + }, + "periods": { + "type": "long" + } + } + } + } + } + } + }, + "cpuacct": { + "properties": { + "id": { + "ignore_above": 1024, + "type": "keyword" + }, + "path": { + "ignore_above": 1024, + "type": "keyword" + }, + "stats": { + "properties": { + "system": { + "properties": { + "ns": { + "type": "long" + } + } + }, + "user": { + "properties": { + "ns": { + "type": "long" + } + } + } + } + }, + "total": { + "properties": { + "ns": { + "type": "long" + } + } + } + } + }, + "id": { + "ignore_above": 1024, + "type": "keyword" + }, + "memory": { + "properties": { + "id": { + "ignore_above": 1024, + "type": "keyword" + }, + "kmem": { + "properties": { + "failures": { + "type": "long" + }, + "limit": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "usage": { + "properties": { + "bytes": { + "type": "long" + }, + "max": { + "properties": { + "bytes": { + "type": "long" + } + } + } + } + } + } + }, + "kmem_tcp": { + "properties": { + "failures": { + "type": "long" + }, + "limit": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "usage": { + "properties": { + "bytes": { + "type": "long" + }, + "max": { + "properties": { + "bytes": { + "type": "long" + } + } + } + } + } + } + }, + "mem": { + "properties": { + "failures": { + "type": "long" + }, + "limit": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "usage": { + "properties": { + "bytes": { + "type": "long" + }, + "max": { + "properties": { + "bytes": { + "type": "long" + } + } + } + } + } + } + }, + "memsw": { + "properties": { + "failures": { + "type": "long" + }, + "limit": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "usage": { + "properties": { + "bytes": { + "type": "long" + }, + "max": { + "properties": { + "bytes": { + "type": "long" + } + } + } + } + } + } + }, + "path": { + "ignore_above": 1024, + "type": "keyword" + }, + "stats": { + "properties": { + "active_anon": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "active_file": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "cache": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "hierarchical_memory_limit": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "hierarchical_memsw_limit": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "inactive_anon": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "inactive_file": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "major_page_faults": { + "type": "long" + }, + "mapped_file": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "page_faults": { + "type": "long" + }, + "pages_in": { + "type": "long" + }, + "pages_out": { + "type": "long" + }, + "rss": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "rss_huge": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "swap": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "unevictable": { + "properties": { + "bytes": { + "type": "long" + } + } + } + } + } + } + }, + "path": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "cmdline": { + "ignore_above": 1024, + "type": "keyword" + }, + "cpu": { + "properties": { + "start_time": { + "type": "date" + }, + "system": { + "type": "long" + }, + "total": { + "properties": { + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "ticks": { + "type": "long" + } + } + }, + "user": { + "type": "long" + } + } + }, + "fd": { + "properties": { + "limit": { + "properties": { + "hard": { + "type": "long" + }, + "soft": { + "type": "long" + } + } + }, + "open": { + "type": "long" + } + } + }, + "memory": { + "properties": { + "rss": { + "properties": { + "bytes": { + "type": "long" + }, + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + } + } + }, + "share": { + "type": "long" + }, + "size": { + "type": "long" + } + } + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "pgid": { + "type": "long" + }, + "pid": { + "type": "long" + }, + "ppid": { + "type": "long" + }, + "state": { + "ignore_above": 1024, + "type": "keyword" + }, + "username": { + "ignore_above": 1024, + "type": "keyword" + } + } + } + } + }, + "tags": { + "ignore_above": 1024, + "type": "keyword" + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + }, + "zookeeper": { + "properties": { + "mntr": { + "properties": { + "approximate_data_size": { + "type": "long" + }, + "ephemerals_count": { + "type": "long" + }, + "followers": { + "type": "long" + }, + "hostname": { + "ignore_above": 1024, + "type": "keyword" + }, + "latency": { + "properties": { + "avg": { + "type": "long" + }, + "max": { + "type": "long" + }, + "min": { + "type": "long" + } + } + }, + "max_file_descriptor_count": { + "type": "long" + }, + "num_alive_connections": { + "type": "long" + }, + "open_file_descriptor_count": { + "type": "long" + }, + "outstanding_requests": { + "type": "long" + }, + "packets": { + "properties": { + "received": { + "type": "long" + }, + "sent": { + "type": "long" + } + } + }, + "pending_syncs": { + "type": "long" + }, + "server_state": { + "ignore_above": 1024, + "type": "keyword" + }, + "synced_followers": { + "type": "long" + }, + "version": { + "ignore_above": 1024, + "type": "keyword" + }, + "watch_count": { + "type": "long" + }, + "znode_count": { + "type": "long" + } + } + } + } + } + } + } + }, + "order": 0, + "settings": { + "index.refresh_interval": "5s" + }, + "template": "metricbeat-*" +} diff --git a/core/src/test/resources/org/elasticsearch/action/admin/indices/template/packetbeat-5.0.template.json b/core/src/test/resources/org/elasticsearch/action/admin/indices/template/packetbeat-5.0.template.json new file mode 100644 index 00000000000..22b889176aa --- /dev/null +++ b/core/src/test/resources/org/elasticsearch/action/admin/indices/template/packetbeat-5.0.template.json @@ -0,0 +1,1360 @@ +{ + "mappings": { + "_default_": { + "_all": { + "norms": false + }, + "_meta": { + "version": "5.0.0-beta1" + }, + "dynamic_templates": [ + { + "fields": { + "mapping": { + "ignore_above": 1024, + "type": "keyword" + }, + "match_mapping_type": "string", + "path_match": "fields.*" + } + }, + { + "amqp.headers": { + "mapping": { + "ignore_above": 1024, + "type": "keyword" + }, + "match_mapping_type": "string", + "path_match": "amqp.headers.*" + } + }, + { + "cassandra.response.supported": { + "mapping": { + "ignore_above": 1024, + "type": "keyword" + }, + "match_mapping_type": "string", + "path_match": "cassandra.response.supported.*" + } + }, + { + "http.request.headers": { + "mapping": { + "ignore_above": 1024, + "type": "keyword" + }, + "match_mapping_type": "string", + "path_match": "http.request.headers.*" + } + }, + { + "http.response.headers": { + "mapping": { + "ignore_above": 1024, + "type": "keyword" + }, + "match_mapping_type": "string", + "path_match": "http.response.headers.*" + } + } + ], + "properties": { + "@timestamp": { + "type": "date" + }, + "amqp": { + "properties": { + "app-id": { + "ignore_above": 1024, + "type": "keyword" + }, + "auto-delete": { + "type": "boolean" + }, + "class-id": { + "type": "long" + }, + "consumer-count": { + "type": "long" + }, + "consumer-tag": { + "ignore_above": 1024, + "type": "keyword" + }, + "content-encoding": { + "ignore_above": 1024, + "type": "keyword" + }, + "content-type": { + "ignore_above": 1024, + "type": "keyword" + }, + "correlation-id": { + "ignore_above": 1024, + "type": "keyword" + }, + "delivery-mode": { + "ignore_above": 1024, + "type": "keyword" + }, + "delivery-tag": { + "type": "long" + }, + "durable": { + "type": "boolean" + }, + "exchange": { + "ignore_above": 1024, + "type": "keyword" + }, + "exchange-type": { + "ignore_above": 1024, + "type": "keyword" + }, + "exclusive": { + "type": "boolean" + }, + "expiration": { + "ignore_above": 1024, + "type": "keyword" + }, + "if-empty": { + "type": "boolean" + }, + "if-unused": { + "type": "boolean" + }, + "immediate": { + "type": "boolean" + }, + "mandatory": { + "type": "boolean" + }, + "message-count": { + "type": "long" + }, + "message-id": { + "ignore_above": 1024, + "type": "keyword" + }, + "method-id": { + "type": "long" + }, + "multiple": { + "type": "boolean" + }, + "no-ack": { + "type": "boolean" + }, + "no-local": { + "type": "boolean" + }, + "no-wait": { + "type": "boolean" + }, + "passive": { + "type": "boolean" + }, + "priority": { + "type": "long" + }, + "queue": { + "ignore_above": 1024, + "type": "keyword" + }, + "redelivered": { + "type": "boolean" + }, + "reply-code": { + "type": "long" + }, + "reply-text": { + "ignore_above": 1024, + "type": "keyword" + }, + "reply-to": { + "ignore_above": 1024, + "type": "keyword" + }, + "routing-key": { + "ignore_above": 1024, + "type": "keyword" + }, + "timestamp": { + "ignore_above": 1024, + "type": "keyword" + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + }, + "user-id": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "beat": { + "properties": { + "hostname": { + "ignore_above": 1024, + "type": "keyword" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "bytes_in": { + "type": "long" + }, + "bytes_out": { + "type": "long" + }, + "cassandra": { + "properties": { + "request": { + "properties": { + "headers": { + "properties": { + "flags": { + "ignore_above": 1024, + "type": "keyword" + }, + "length": { + "type": "long" + }, + "op": { + "ignore_above": 1024, + "type": "keyword" + }, + "stream": { + "ignore_above": 1024, + "type": "keyword" + }, + "version": { + "type": "long" + } + } + }, + "query": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "response": { + "properties": { + "authentication": { + "properties": { + "class": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "error": { + "properties": { + "code": { + "type": "long" + }, + "details": { + "properties": { + "alive": { + "type": "long" + }, + "arg_types": { + "ignore_above": 1024, + "type": "keyword" + }, + "blockfor": { + "type": "long" + }, + "data_present": { + "type": "boolean" + }, + "function": { + "ignore_above": 1024, + "type": "keyword" + }, + "keyspace": { + "ignore_above": 1024, + "type": "keyword" + }, + "num_failures": { + "ignore_above": 1024, + "type": "keyword" + }, + "read_consistency": { + "ignore_above": 1024, + "type": "keyword" + }, + "received": { + "type": "long" + }, + "required": { + "type": "long" + }, + "stmt_id": { + "ignore_above": 1024, + "type": "keyword" + }, + "table": { + "ignore_above": 1024, + "type": "keyword" + }, + "write_type": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "msg": { + "ignore_above": 1024, + "type": "keyword" + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "event": { + "properties": { + "change": { + "ignore_above": 1024, + "type": "keyword" + }, + "host": { + "ignore_above": 1024, + "type": "keyword" + }, + "schema_change": { + "properties": { + "args": { + "ignore_above": 1024, + "type": "keyword" + }, + "change": { + "ignore_above": 1024, + "type": "keyword" + }, + "keyspace": { + "ignore_above": 1024, + "type": "keyword" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "object": { + "ignore_above": 1024, + "type": "keyword" + }, + "table": { + "ignore_above": 1024, + "type": "keyword" + }, + "target": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "headers": { + "properties": { + "flags": { + "ignore_above": 1024, + "type": "keyword" + }, + "length": { + "type": "long" + }, + "op": { + "ignore_above": 1024, + "type": "keyword" + }, + "stream": { + "ignore_above": 1024, + "type": "keyword" + }, + "version": { + "type": "long" + } + } + }, + "result": { + "properties": { + "keyspace": { + "ignore_above": 1024, + "type": "keyword" + }, + "prepared": { + "properties": { + "prepared_id": { + "ignore_above": 1024, + "type": "keyword" + }, + "req_meta": { + "properties": { + "col_count": { + "type": "long" + }, + "flags": { + "ignore_above": 1024, + "type": "keyword" + }, + "keyspace": { + "ignore_above": 1024, + "type": "keyword" + }, + "paging_state": { + "ignore_above": 1024, + "type": "keyword" + }, + "pkey_columns": { + "type": "long" + }, + "table": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "resp_meta": { + "properties": { + "col_count": { + "type": "long" + }, + "flags": { + "ignore_above": 1024, + "type": "keyword" + }, + "keyspace": { + "ignore_above": 1024, + "type": "keyword" + }, + "paging_state": { + "ignore_above": 1024, + "type": "keyword" + }, + "pkey_columns": { + "type": "long" + }, + "table": { + "ignore_above": 1024, + "type": "keyword" + } + } + } + } + }, + "rows": { + "properties": { + "meta": { + "properties": { + "col_count": { + "type": "long" + }, + "flags": { + "ignore_above": 1024, + "type": "keyword" + }, + "keyspace": { + "ignore_above": 1024, + "type": "keyword" + }, + "paging_state": { + "ignore_above": 1024, + "type": "keyword" + }, + "pkey_columns": { + "type": "long" + }, + "table": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "num_rows": { + "type": "long" + } + } + }, + "schema_change": { + "properties": { + "args": { + "ignore_above": 1024, + "type": "keyword" + }, + "change": { + "ignore_above": 1024, + "type": "keyword" + }, + "keyspace": { + "ignore_above": 1024, + "type": "keyword" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "object": { + "ignore_above": 1024, + "type": "keyword" + }, + "table": { + "ignore_above": 1024, + "type": "keyword" + }, + "target": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "warnings": { + "ignore_above": 1024, + "type": "keyword" + } + } + } + } + }, + "client_ip": { + "ignore_above": 1024, + "type": "keyword" + }, + "client_location": { + "type": "geo_point" + }, + "client_port": { + "ignore_above": 1024, + "type": "keyword" + }, + "client_proc": { + "ignore_above": 1024, + "type": "keyword" + }, + "client_server": { + "ignore_above": 1024, + "type": "keyword" + }, + "client_service": { + "ignore_above": 1024, + "type": "keyword" + }, + "connection_id": { + "ignore_above": 1024, + "type": "keyword" + }, + "connecttime": { + "type": "long" + }, + "cpu_time": { + "type": "long" + }, + "dest": { + "properties": { + "ip": { + "ignore_above": 1024, + "type": "keyword" + }, + "ip_location": { + "type": "geo_point" + }, + "ipv6": { + "ignore_above": 1024, + "type": "keyword" + }, + "ipv6_location": { + "type": "geo_point" + }, + "mac": { + "ignore_above": 1024, + "type": "keyword" + }, + "outer_ip": { + "ignore_above": 1024, + "type": "keyword" + }, + "outer_ip_location": { + "type": "geo_point" + }, + "outer_ipv6": { + "ignore_above": 1024, + "type": "keyword" + }, + "outer_ipv6_location": { + "type": "geo_point" + }, + "port": { + "ignore_above": 1024, + "type": "keyword" + }, + "stats": { + "properties": { + "net_bytes_total": { + "type": "long" + }, + "net_packets_total": { + "type": "long" + } + } + } + } + }, + "direction": { + "ignore_above": 1024, + "type": "keyword" + }, + "dns": { + "properties": { + "additionals": { + "properties": { + "class": { + "ignore_above": 1024, + "type": "keyword" + }, + "data": { + "ignore_above": 1024, + "type": "keyword" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "ttl": { + "type": "long" + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "additionals_count": { + "type": "long" + }, + "answers": { + "properties": { + "class": { + "ignore_above": 1024, + "type": "keyword" + }, + "data": { + "ignore_above": 1024, + "type": "keyword" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "ttl": { + "type": "long" + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "answers_count": { + "type": "long" + }, + "authorities": { + "properties": { + "class": { + "ignore_above": 1024, + "type": "keyword" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "authorities_count": { + "type": "long" + }, + "flags": { + "properties": { + "authentic_data": { + "type": "boolean" + }, + "authoritative": { + "type": "boolean" + }, + "checking_disabled": { + "type": "boolean" + }, + "recursion_available": { + "type": "boolean" + }, + "recursion_desired": { + "type": "boolean" + }, + "truncated_response": { + "type": "boolean" + } + } + }, + "id": { + "type": "long" + }, + "op_code": { + "ignore_above": 1024, + "type": "keyword" + }, + "opt": { + "properties": { + "do": { + "type": "boolean" + }, + "ext_rcode": { + "ignore_above": 1024, + "type": "keyword" + }, + "udp_size": { + "type": "long" + }, + "version": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "question": { + "properties": { + "class": { + "ignore_above": 1024, + "type": "keyword" + }, + "etld_plus_one": { + "ignore_above": 1024, + "type": "keyword" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "response_code": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "dnstime": { + "type": "long" + }, + "domloadtime": { + "type": "long" + }, + "final": { + "ignore_above": 1024, + "type": "keyword" + }, + "flow_id": { + "ignore_above": 1024, + "type": "keyword" + }, + "http": { + "properties": { + "request": { + "properties": { + "body": { + "norms": false, + "type": "text" + }, + "params": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "response": { + "properties": { + "body": { + "ignore_above": 1024, + "type": "keyword" + }, + "code": { + "ignore_above": 1024, + "type": "keyword" + }, + "phrase": { + "ignore_above": 1024, + "type": "keyword" + } + } + } + } + }, + "icmp": { + "properties": { + "request": { + "properties": { + "code": { + "type": "long" + }, + "message": { + "ignore_above": 1024, + "type": "keyword" + }, + "type": { + "type": "long" + } + } + }, + "response": { + "properties": { + "code": { + "type": "long" + }, + "message": { + "ignore_above": 1024, + "type": "keyword" + }, + "type": { + "type": "long" + } + } + }, + "version": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "icmp_id": { + "ignore_above": 1024, + "type": "keyword" + }, + "ip": { + "ignore_above": 1024, + "type": "keyword" + }, + "last_time": { + "type": "date" + }, + "loadtime": { + "type": "long" + }, + "memcache": { + "properties": { + "protocol_type": { + "ignore_above": 1024, + "type": "keyword" + }, + "request": { + "properties": { + "automove": { + "ignore_above": 1024, + "type": "keyword" + }, + "bytes": { + "type": "long" + }, + "cas_unique": { + "type": "long" + }, + "command": { + "ignore_above": 1024, + "type": "keyword" + }, + "count_values": { + "type": "long" + }, + "delta": { + "type": "long" + }, + "dest_class": { + "type": "long" + }, + "exptime": { + "type": "long" + }, + "flags": { + "type": "long" + }, + "initial": { + "type": "long" + }, + "line": { + "ignore_above": 1024, + "type": "keyword" + }, + "noreply": { + "type": "boolean" + }, + "opaque": { + "type": "long" + }, + "opcode": { + "ignore_above": 1024, + "type": "keyword" + }, + "opcode_value": { + "type": "long" + }, + "quiet": { + "type": "boolean" + }, + "raw_args": { + "ignore_above": 1024, + "type": "keyword" + }, + "sleep_us": { + "type": "long" + }, + "source_class": { + "type": "long" + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + }, + "vbucket": { + "type": "long" + }, + "verbosity": { + "type": "long" + } + } + }, + "response": { + "properties": { + "bytes": { + "type": "long" + }, + "cas_unique": { + "type": "long" + }, + "command": { + "ignore_above": 1024, + "type": "keyword" + }, + "count_values": { + "type": "long" + }, + "error_msg": { + "ignore_above": 1024, + "type": "keyword" + }, + "flags": { + "type": "long" + }, + "opaque": { + "type": "long" + }, + "opcode": { + "ignore_above": 1024, + "type": "keyword" + }, + "opcode_value": { + "type": "long" + }, + "status": { + "ignore_above": 1024, + "type": "keyword" + }, + "status_code": { + "type": "long" + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + }, + "value": { + "type": "long" + }, + "version": { + "ignore_above": 1024, + "type": "keyword" + } + } + } + } + }, + "method": { + "ignore_above": 1024, + "type": "keyword" + }, + "mongodb": { + "properties": { + "cursorId": { + "ignore_above": 1024, + "type": "keyword" + }, + "error": { + "ignore_above": 1024, + "type": "keyword" + }, + "fullCollectionName": { + "ignore_above": 1024, + "type": "keyword" + }, + "numberReturned": { + "type": "long" + }, + "numberToReturn": { + "type": "long" + }, + "numberToSkip": { + "type": "long" + }, + "query": { + "ignore_above": 1024, + "type": "keyword" + }, + "returnFieldsSelector": { + "ignore_above": 1024, + "type": "keyword" + }, + "selector": { + "ignore_above": 1024, + "type": "keyword" + }, + "startingFrom": { + "ignore_above": 1024, + "type": "keyword" + }, + "update": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "mysql": { + "properties": { + "affected_rows": { + "type": "long" + }, + "error_code": { + "type": "long" + }, + "error_message": { + "ignore_above": 1024, + "type": "keyword" + }, + "insert_id": { + "ignore_above": 1024, + "type": "keyword" + }, + "iserror": { + "type": "boolean" + }, + "num_fields": { + "ignore_above": 1024, + "type": "keyword" + }, + "num_rows": { + "ignore_above": 1024, + "type": "keyword" + }, + "query": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "nfs": { + "properties": { + "minor_version": { + "type": "long" + }, + "opcode": { + "ignore_above": 1024, + "type": "keyword" + }, + "status": { + "ignore_above": 1024, + "type": "keyword" + }, + "tag": { + "ignore_above": 1024, + "type": "keyword" + }, + "version": { + "type": "long" + } + } + }, + "notes": { + "ignore_above": 1024, + "type": "keyword" + }, + "outer_vlan": { + "ignore_above": 1024, + "type": "keyword" + }, + "params": { + "norms": false, + "type": "text" + }, + "path": { + "ignore_above": 1024, + "type": "keyword" + }, + "pgsql": { + "properties": { + "error_code": { + "type": "long" + }, + "error_message": { + "ignore_above": 1024, + "type": "keyword" + }, + "error_severity": { + "ignore_above": 1024, + "type": "keyword" + }, + "iserror": { + "type": "boolean" + }, + "num_fields": { + "ignore_above": 1024, + "type": "keyword" + }, + "num_rows": { + "ignore_above": 1024, + "type": "keyword" + }, + "query": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "port": { + "ignore_above": 1024, + "type": "keyword" + }, + "proc": { + "ignore_above": 1024, + "type": "keyword" + }, + "query": { + "ignore_above": 1024, + "type": "keyword" + }, + "real_ip": { + "ignore_above": 1024, + "type": "keyword" + }, + "redis": { + "properties": { + "error": { + "ignore_above": 1024, + "type": "keyword" + }, + "return_value": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "release": { + "ignore_above": 1024, + "type": "keyword" + }, + "request": { + "norms": false, + "type": "text" + }, + "resource": { + "ignore_above": 1024, + "type": "keyword" + }, + "response": { + "norms": false, + "type": "text" + }, + "responsetime": { + "type": "long" + }, + "rpc": { + "properties": { + "auth_flavor": { + "ignore_above": 1024, + "type": "keyword" + }, + "call_size": { + "type": "long" + }, + "cred": { + "properties": { + "gid": { + "type": "long" + }, + "gids": { + "ignore_above": 1024, + "type": "keyword" + }, + "machinename": { + "ignore_above": 1024, + "type": "keyword" + }, + "stamp": { + "type": "long" + }, + "uid": { + "type": "long" + } + } + }, + "reply_size": { + "type": "long" + }, + "status": { + "ignore_above": 1024, + "type": "keyword" + }, + "time": { + "type": "long" + }, + "time_str": { + "ignore_above": 1024, + "type": "keyword" + }, + "xid": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "server": { + "ignore_above": 1024, + "type": "keyword" + }, + "service": { + "ignore_above": 1024, + "type": "keyword" + }, + "source": { + "properties": { + "ip": { + "ignore_above": 1024, + "type": "keyword" + }, + "ip_location": { + "type": "geo_point" + }, + "ipv6": { + "ignore_above": 1024, + "type": "keyword" + }, + "ipv6_location": { + "type": "geo_point" + }, + "mac": { + "ignore_above": 1024, + "type": "keyword" + }, + "outer_ip": { + "ignore_above": 1024, + "type": "keyword" + }, + "outer_ip_location": { + "type": "geo_point" + }, + "outer_ipv6": { + "ignore_above": 1024, + "type": "keyword" + }, + "outer_ipv6_location": { + "type": "geo_point" + }, + "port": { + "ignore_above": 1024, + "type": "keyword" + }, + "stats": { + "properties": { + "net_bytes_total": { + "type": "long" + }, + "net_packets_total": { + "type": "long" + } + } + } + } + }, + "start_time": { + "type": "date" + }, + "status": { + "ignore_above": 1024, + "type": "keyword" + }, + "tags": { + "ignore_above": 1024, + "type": "keyword" + }, + "thrift": { + "properties": { + "exceptions": { + "ignore_above": 1024, + "type": "keyword" + }, + "params": { + "ignore_above": 1024, + "type": "keyword" + }, + "return_value": { + "ignore_above": 1024, + "type": "keyword" + }, + "service": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "transport": { + "ignore_above": 1024, + "type": "keyword" + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + }, + "vlan": { + "ignore_above": 1024, + "type": "keyword" + } + } + } + }, + "order": 0, + "settings": { + "index.refresh_interval": "5s" + }, + "template": "packetbeat-*" +} diff --git a/core/src/test/resources/org/elasticsearch/action/admin/indices/template/winlogbeat-5.0.template.json b/core/src/test/resources/org/elasticsearch/action/admin/indices/template/winlogbeat-5.0.template.json new file mode 100644 index 00000000000..017a3b0c326 --- /dev/null +++ b/core/src/test/resources/org/elasticsearch/action/admin/indices/template/winlogbeat-5.0.template.json @@ -0,0 +1,162 @@ +{ + "mappings": { + "_default_": { + "_all": { + "norms": false + }, + "_meta": { + "version": "5.0.0-beta1" + }, + "dynamic_templates": [ + { + "fields": { + "mapping": { + "ignore_above": 1024, + "type": "keyword" + }, + "match_mapping_type": "string", + "path_match": "fields.*" + } + }, + { + "event_data": { + "mapping": { + "ignore_above": 1024, + "type": "keyword" + }, + "match_mapping_type": "string", + "path_match": "event_data.*" + } + }, + { + "user_data": { + "mapping": { + "ignore_above": 1024, + "type": "keyword" + }, + "match_mapping_type": "string", + "path_match": "user_data.*" + } + } + ], + "properties": { + "@timestamp": { + "type": "date" + }, + "activity_id": { + "ignore_above": 1024, + "type": "keyword" + }, + "beat": { + "properties": { + "hostname": { + "ignore_above": 1024, + "type": "keyword" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "computer_name": { + "ignore_above": 1024, + "type": "keyword" + }, + "event_id": { + "type": "long" + }, + "keywords": { + "ignore_above": 1024, + "type": "keyword" + }, + "level": { + "ignore_above": 1024, + "type": "keyword" + }, + "log_name": { + "ignore_above": 1024, + "type": "keyword" + }, + "message": { + "norms": false, + "type": "text" + }, + "message_error": { + "ignore_above": 1024, + "type": "keyword" + }, + "opcode": { + "ignore_above": 1024, + "type": "keyword" + }, + "process_id": { + "type": "long" + }, + "provider_guid": { + "ignore_above": 1024, + "type": "keyword" + }, + "record_number": { + "ignore_above": 1024, + "type": "keyword" + }, + "related_activity_id": { + "ignore_above": 1024, + "type": "keyword" + }, + "source_name": { + "ignore_above": 1024, + "type": "keyword" + }, + "tags": { + "ignore_above": 1024, + "type": "keyword" + }, + "task": { + "ignore_above": 1024, + "type": "keyword" + }, + "thread_id": { + "type": "long" + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + }, + "user": { + "properties": { + "domain": { + "ignore_above": 1024, + "type": "keyword" + }, + "identifier": { + "ignore_above": 1024, + "type": "keyword" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "version": { + "type": "long" + }, + "xml": { + "norms": false, + "type": "text" + } + } + } + }, + "order": 0, + "settings": { + "index.refresh_interval": "5s" + }, + "template": "winlogbeat-*" +} diff --git a/dev-tools/prepare_release_candidate.py b/dev-tools/prepare_release_candidate.py deleted file mode 100644 index 84c10f4b9e4..00000000000 --- a/dev-tools/prepare_release_candidate.py +++ /dev/null @@ -1,417 +0,0 @@ -# Licensed to Elasticsearch under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on -# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, -# either express or implied. See the License for the specific -# language governing permissions and limitations under the License. - -# Prepare a release -# -# 1. Update the Version.java to remove the snapshot bit -# 2. Remove the -SNAPSHOT suffix in all pom.xml files -# -# USAGE: -# -# python3 ./dev-tools/prepare-release.py -# -# Note: Ensure the script is run from the elasticsearch top level directory -# - -import fnmatch -import argparse -from prepare_release_update_documentation import update_reference_docs -import subprocess -import tempfile -import re -import os -import shutil -from functools import partial -import sys - -VERSION_FILE = 'core/src/main/java/org/elasticsearch/Version.java' -POM_FILE = 'pom.xml' -MAIL_TEMPLATE = """ -Hi all - -The new release candidate for %(version)s is now available, including the x-plugins and RPM/deb repos. This release is based on: - - * Elasticsearch commit: %(hash)s - https://github.com/elastic/elasticsearch/commit/%(hash)s - * X-Plugins commit: FILL_IN_X-PLUGINS_HASH - https://github.com/elastic/x-plugins/commit/FILL_IN_X-PLUGINS_HASH - -The packages may be downloaded from the following URLs: - - * ZIP - http://%(bucket)s/elasticsearch/staging/%(version)s-%(hash)s/org/elasticsearch/distribution/zip/elasticsearch/%(version)s/elasticsearch-%(version)s.zip - * tar.gz - http://%(bucket)s/elasticsearch/staging/%(version)s-%(hash)s/org/elasticsearch/distribution/tar/elasticsearch/%(version)s/elasticsearch-%(version)s.tar.gz - * RPM - http://%(bucket)s/elasticsearch/staging/%(version)s-%(hash)s/org/elasticsearch/distribution/rpm/elasticsearch/%(version)s/elasticsearch-%(version)s.rpm - * deb - http://%(bucket)s/elasticsearch/staging/%(version)s-%(hash)s/org/elasticsearch/distribution/deb/elasticsearch/%(version)s/elasticsearch-%(version)s.deb - -Plugins can be installed as follows: - - ES_JAVA_OPTS="-Des.plugins.staging=true" bin/elasticsearch-plugin install cloud-aws - -The same goes for the x-plugins: - - ES_JAVA_OPTS="-Des.plugins.staging=true" bin/elasticsearch-plugin install license - ES_JAVA_OPTS="-Des.plugins.staging=true" bin/elasticsearch-plugin install marvel-agent - ES_JAVA_OPTS="-Des.plugins.staging=true" bin/elasticsearch-plugin install shield - ES_JAVA_OPTS="-Des.plugins.staging=true" bin/elasticsearch-plugin install watcher - -To install the deb from an APT repo: - -APT line sources.list line: - -deb http://%(bucket)s/elasticsearch/staging/%(version)s-%(hash)s/repos/%(package_repo_version)s/debian/ stable main - -To install the RPM, create a YUM file like: - - /etc/yum.repos.d/elasticsearch.repo - -containing: - -[elasticsearch-2.0] -name=Elasticsearch repository for packages -baseurl=http://%(bucket)s/elasticsearch/staging/%(version)s-%(hash)s/repos/%(package_repo_version)s/centos -gpgcheck=1 -gpgkey=http://packages.elastic.co/GPG-KEY-elasticsearch -enabled=1 - -To smoke-test the release please run: - - python3 -B ./dev-tools/smoke_test_rc.py --version %(version)s --hash %(hash)s --plugins license,shield,watcher - -NOTE: this script requires JAVA_HOME to point to a Java 7 Runtime - -""" - -# console colors -COLOR_OK = '\033[92m' -COLOR_END = '\033[0m' -COLOR_FAIL = '\033[91m' - -def run(command, env_vars=None): - if env_vars: - for key, value in env_vars.items(): - os.putenv(key, value) - print('*** Running: %s%s%s' % (COLOR_OK, command, COLOR_END)) - if os.system(command): - raise RuntimeError(' FAILED: %s' % (command)) - -def ensure_checkout_is_clean(): - # Make sure no local mods: - s = subprocess.check_output('git diff --shortstat', shell=True).decode('utf-8') - if len(s) > 0: - raise RuntimeError('git diff --shortstat is non-empty got:\n%s' % s) - - # Make sure no untracked files: - s = subprocess.check_output('git status', shell=True).decode('utf-8', errors='replace') - if 'Untracked files:' in s: - if 'dev-tools/__pycache__/' in s: - print('*** NOTE: invoke python with -B to prevent __pycache__ directories ***') - raise RuntimeError('git status shows untracked files got:\n%s' % s) - - # Make sure we have all changes from origin: - if 'is behind' in s: - raise RuntimeError('git status shows not all changes pulled from origin; try running "git pull origin" in this branch got:\n%s' % (s)) - - # Make sure we no local unpushed changes (this is supposed to be a clean area): - if 'is ahead' in s: - raise RuntimeError('git status shows local commits; try running "git fetch origin", "git checkout ", "git reset --hard origin/" in this branch got:\n%s' % (s)) - -# Reads the given file and applies the -# callback to it. If the callback changed -# a line the given file is replaced with -# the modified input. -def process_file(file_path, line_callback): - fh, abs_path = tempfile.mkstemp() - modified = False - with open(abs_path,'w', encoding='utf-8') as new_file: - with open(file_path, encoding='utf-8') as old_file: - for line in old_file: - new_line = line_callback(line) - modified = modified or (new_line != line) - new_file.write(new_line) - os.close(fh) - if modified: - #Remove original file - os.remove(file_path) - #Move new file - shutil.move(abs_path, file_path) - return True - else: - # nothing to do - just remove the tmp file - os.remove(abs_path) - return False - -# Moves the Version.java file from a snapshot to a release -def remove_version_snapshot(version_file, release): - # 1.0.0.Beta1 -> 1_0_0_Beta1 - release = release.replace('.', '_') - release = release.replace('-', '_') - pattern = 'new Version(V_%s_ID, true' % (release) - replacement = 'new Version(V_%s_ID, false' % (release) - def callback(line): - return line.replace(pattern, replacement) - processed = process_file(version_file, callback) - if not processed: - raise RuntimeError('failed to remove snapshot version for %s' % (release)) - -def rename_local_meta_files(path): - for root, _, file_names in os.walk(path): - for file_name in fnmatch.filter(file_names, 'maven-metadata-local.xml*'): - full_path = os.path.join(root, file_name) - os.rename(full_path, os.path.join(root, file_name.replace('-local', ''))) - -# Checks the pom.xml for the release version. -# This method fails if the pom file has no SNAPSHOT version set ie. -# if the version is already on a release version we fail. -# Returns the next version string ie. 0.90.7 -def find_release_version(): - with open('pom.xml', encoding='utf-8') as file: - for line in file: - match = re.search(r'(.+)-SNAPSHOT', line) - if match: - return match.group(1) - raise RuntimeError('Could not find release version in branch') - -# Checks if the produced RPM is signed with the supplied GPG key -def ensure_rpm_is_signed(rpm, gpg_key): - rpm_check_signature_cmd = 'rpm -v -K %s | grep -qi %s' % (rpm, gpg_key) - try: - subprocess.check_output(rpm_check_signature_cmd, shell=True) - except: - raise RuntimeError('Aborting. RPM does not seem to be signed, check with: rpm -v -K %s' % rpm) - -# Checks if a command exists, needed for external binaries -def check_command_exists(name, cmd): - try: - subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError: - raise RuntimeError('Could not run command %s - please make sure it is installed and in $PATH' % (name)) - -def run_and_print(text, run_function): - try: - print(text, end='') - run_function() - print(COLOR_OK + 'OK' + COLOR_END) - return True - except RuntimeError: - print(COLOR_FAIL + 'NOT OK' + COLOR_END) - return False - -def check_env_var(text, env_var): - try: - print(text, end='') - os.environ[env_var] - print(COLOR_OK + 'OK' + COLOR_END) - return True - except KeyError: - print(COLOR_FAIL + 'NOT OK' + COLOR_END) - return False - -def check_environment_and_commandline_tools(check_only): - checks = list() - checks.append(check_env_var('Checking for AWS env configuration AWS_SECRET_KEY... ', 'AWS_SECRET_KEY')) - checks.append(check_env_var('Checking for AWS env configuration AWS_ACCESS_KEY... ', 'AWS_ACCESS_KEY')) - checks.append(run_and_print('Checking command: rpm... ', partial(check_command_exists, 'rpm', 'rpm --version'))) - checks.append(run_and_print('Checking command: dpkg... ', partial(check_command_exists, 'dpkg', 'dpkg --version'))) - checks.append(run_and_print('Checking command: gpg... ', partial(check_command_exists, 'gpg', 'gpg --version'))) - checks.append(run_and_print('Checking command: expect... ', partial(check_command_exists, 'expect', 'expect -v'))) - checks.append(run_and_print('Checking command: createrepo... ', partial(check_command_exists, 'createrepo', 'createrepo --version'))) - checks.append(run_and_print('Checking command: s3cmd... ', partial(check_command_exists, 's3cmd', 's3cmd --version'))) - checks.append(run_and_print('Checking command: deb-s3... ', partial(check_command_exists, 'deb-s3', 'deb-s3 -h'))) - checks.append(run_and_print('Checking command: rpm-s3... ', partial(check_command_exists, 'rpm-s3', 'rpm-s3 -h'))) - - if check_only: - sys.exit(0) - - if False in checks: - print("Exiting due to failing checks") - sys.exit(0) - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description='Builds and publishes a Elasticsearch Release') - parser.add_argument('--deploy-sonatype', dest='deploy_sonatype', action='store_true', - help='Installs and Deploys the release on a sonatype staging repository.') - parser.add_argument('--deploy-s3', dest='deploy_s3', action='store_true', - help='Pushes artifacts to the S3 staging area') - parser.add_argument('--deploy-s3-repos', dest='deploy_s3_repos', action='store_true', - help='Creates package repositories in S3 repo') - parser.add_argument('--no-install', dest='no_install', action='store_true', - help='Does not run "mvn install", expects this to be run already and reuses artifacts from local repo, only useful with --deploy-s3/--deploy-s3-repos, after sonatype deplomeny to ensure same artifacts') - parser.add_argument('--skip-doc-check', dest='skip_doc_check', action='store_false', - help='Skips any checks for pending documentation changes') - parser.add_argument('--skip-tests', dest='skip_tests', action='store_true', - help='Skips any test runs') - parser.add_argument('--gpg-key', dest='gpg_key', default="D88E42B4", - help='Allows you to specify a different gpg_key to be used instead of the default release key') - parser.add_argument('--bucket', '-b', dest='bucket', default="download.elasticsearch.org", - help='Allows you to specify a different s3 bucket to upload the artifacts to') - parser.add_argument('--quiet', dest='quiet', action='store_true', - help='Runs the script in quiet mode') - parser.add_argument('--check', dest='check', action='store_true', - help='Checks and reports for all requirements and then exits') - - # by default, we only run mvn install and don't push anything repo - parser.set_defaults(deploy_sonatype=False) - parser.set_defaults(deploy_s3=False) - parser.set_defaults(deploy_s3_repos=False) - parser.set_defaults(no_install=False) - # other defaults - parser.set_defaults(skip_doc_check=False) - parser.set_defaults(quiet=False) - parser.set_defaults(skip_tests=False) - - args = parser.parse_args() - skip_doc_check = args.skip_doc_check - gpg_key = args.gpg_key - bucket = args.bucket - deploy_sonatype = args.deploy_sonatype - deploy_s3 = args.deploy_s3 - deploy_s3_repos = args.deploy_s3_repos - run_mvn_install = not args.no_install - skip_tests = args.skip_tests - - check_environment_and_commandline_tools(args.check) - - if not run_mvn_install and deploy_sonatype: - print('Using --no-install and --deploy-sonatype together does not work. Exiting') - sys.exit(-1) - - print('*** Preparing a release candidate: ', end='') - print('deploy sonatype: %s%s%s' % (COLOR_OK if deploy_sonatype else COLOR_FAIL, 'yes' if deploy_sonatype else 'no', COLOR_END), end='') - print(', deploy s3: %s%s%s' % (COLOR_OK if deploy_s3 else COLOR_FAIL, 'yes' if deploy_s3 else 'no', COLOR_END), end='') - print(', deploy s3 repos: %s%s%s' % (COLOR_OK if deploy_s3_repos else COLOR_FAIL, 'yes' if deploy_s3_repos else 'no', COLOR_END), end='') - print('') - - shortHash = subprocess.check_output('git log --pretty=format:"%h" -n 1', shell=True).decode('utf-8') - releaseDirectory = os.getenv('HOME') + '/elastic-releases' - release_version = find_release_version() - localRepo = '%s/elasticsearch-%s-%s' % (releaseDirectory, release_version, shortHash) - localRepoElasticsearch = localRepo + '/org/elasticsearch' - - ensure_checkout_is_clean() - if not re.match('(\d+\.\d+)\.*',release_version): - raise RuntimeError('illegal release version format: %s' % (release_version)) - package_repo_version = '%s.x' % re.match('(\d+)\.*', release_version).group(1) - - print('*** Preparing release version: [%s]' % release_version) - - if not skip_doc_check: - print('*** Check for pending documentation changes') - pending_files = update_reference_docs(release_version) - if pending_files: - raise RuntimeError('pending coming[%s] documentation changes found in %s' % (release_version, pending_files)) - - run('cd dev-tools && mvn versions:set -DnewVersion=%s -DgenerateBackupPoms=false' % (release_version)) - run('cd rest-api-spec && mvn versions:set -DnewVersion=%s -DgenerateBackupPoms=false' % (release_version)) - run('mvn versions:set -DnewVersion=%s -DgenerateBackupPoms=false' % (release_version)) - - remove_version_snapshot(VERSION_FILE, release_version) - - print('*** Done removing snapshot version. DO NOT COMMIT THIS, WHEN CREATING A RELEASE CANDIDATE.') - - if not os.path.exists(releaseDirectory): - os.mkdir(releaseDirectory) - if os.path.exists(localRepoElasticsearch) and run_mvn_install: - print('clean local repository %s' % localRepoElasticsearch) - shutil.rmtree(localRepoElasticsearch) - - mvn_target = 'deploy' if deploy_sonatype else 'install' - tests = '-DskipTests' if skip_tests else '-Dskip.integ.tests=true' - install_command = 'mvn clean %s -Prelease %s -Dgpg.key="%s" -Dpackaging.rpm.rpmbuild=/usr/bin/rpmbuild -Drpm.sign=true -Dmaven.repo.local=%s -Dno.commit.pattern="\\bno(n|)commit\\b" -Dforbidden.test.signatures=""' % (mvn_target, tests, gpg_key, localRepo) - clean_repo_command = 'find %s -name _remote.repositories -exec rm {} \;' % (localRepoElasticsearch) - - if not run_mvn_install: - print('') - print('*** By choosing --no-install we assume you ran the following commands successfully:') - print(' %s' % (install_command)) - print(' 1. Remove all _remote.repositories: %s' % (clean_repo_command)) - rename_metadata_files_command = 'for i in $(find %s -name "maven-metadata-local.xml*") ; do mv "$i" "${i/-local/}" ; done' % (localRepoElasticsearch) - print(' 2. Rename all maven metadata files: %s' % (rename_metadata_files_command)) - else: - for cmd in [install_command, clean_repo_command]: - run(cmd) - rename_local_meta_files(localRepoElasticsearch) - - rpm = '%s/distribution/rpm/elasticsearch/%s/elasticsearch-%s.rpm' % (localRepoElasticsearch, release_version, release_version) - print('Ensuring that RPM has been signed') - ensure_rpm_is_signed(rpm, gpg_key) - - # repository push commands - s3cmd_sync_to_staging_bucket_cmd = 's3cmd sync -P %s s3://%s/elasticsearch/staging/%s-%s/org/' % (localRepoElasticsearch, bucket, release_version, shortHash) - s3_bucket_sync_to = '%s/elasticsearch/staging/%s-%s/repos/' % (bucket, release_version, shortHash) - s3cmd_sync_official_repo_cmd = 's3cmd sync s3://packages.elasticsearch.org/elasticsearch/%s s3://%s' % (package_repo_version, s3_bucket_sync_to) - - debs3_prefix = 'elasticsearch/staging/%s-%s/repos/%s/debian' % (release_version, shortHash, package_repo_version) - debs3_upload_cmd = 'deb-s3 upload --preserve-versions %s/distribution/deb/elasticsearch/%s/elasticsearch-%s.deb -b %s --prefix %s --sign %s --arch amd64' % (localRepoElasticsearch, release_version, release_version, bucket, debs3_prefix, gpg_key) - debs3_list_cmd = 'deb-s3 list -b %s --prefix %s' % (bucket, debs3_prefix) - debs3_verify_cmd = 'deb-s3 verify -b %s --prefix %s' % (bucket, debs3_prefix) - rpms3_prefix = 'elasticsearch/staging/%s-%s/repos/%s/centos' % (release_version, shortHash, package_repo_version) - # external-1 is the alias name for the us-east-1 region. This is used by rpm-s3 to construct the hostname - rpms3_upload_cmd = 'rpm-s3 -v -b %s -p %s --sign --visibility public-read -k 100 %s -r external-1' % (bucket, rpms3_prefix, rpm) - - if deploy_s3: - run(s3cmd_sync_to_staging_bucket_cmd) - else: - print('') - print('*** To push a release candidate to s3 run: ') - print(' 1. Sync %s into S3 bucket' % (localRepoElasticsearch)) - print (' %s' % (s3cmd_sync_to_staging_bucket_cmd)) - - if deploy_s3_repos: - print('*** Syncing official package repository into staging s3 bucket') - run(s3cmd_sync_official_repo_cmd) - print('*** Uploading debian package (you will be prompted for the passphrase!)') - run(debs3_upload_cmd) - run(debs3_list_cmd) - run(debs3_verify_cmd) - print('*** Uploading rpm package (you will be prompted for the passphrase!)') - run(rpms3_upload_cmd) - else: - print('*** To create repositories on S3 run:') - print(' 1. Sync existing repo into staging: %s' % s3cmd_sync_official_repo_cmd) - print(' 2. Upload debian package (and sign it): %s' % debs3_upload_cmd) - print(' 3. List all debian packages: %s' % debs3_list_cmd) - print(' 4. Verify debian packages: %s' % debs3_verify_cmd) - print(' 5. Upload RPM: %s' % rpms3_upload_cmd) - print('') - print('NOTE: the above mvn command will promt you several times for the GPG passphrase of the key you specified you can alternatively pass it via -Dgpg.passphrase=yourPassPhrase') - print(' since RPM signing doesn\'t support gpg-agents the recommended way to set the password is to add a release profile to your settings.xml:') - print(""" - - - release - - YourPasswordGoesHere - - - - """) - print('NOTE: Running s3cmd might require you to create a config file with your credentials, if the s3cmd does not support suppliying them via the command line!') - - print('*** Once the release is deployed and published send out the following mail to dev@elastic.co:') - string_format_dict = {'version' : release_version, 'hash': shortHash, 'package_repo_version' : package_repo_version, 'bucket': bucket} - print(MAIL_TEMPLATE % string_format_dict) - - print('') - print('You can verify that pushing to the staging repository pushed all the artifacts by running (log into sonatype to find out the correct id):') - print(' python -B dev-tools/validate-maven-repository.py %s https://oss.sonatype.org/service/local/repositories/orgelasticsearch-IDTOFINDOUT/content/org/elasticsearch ' %(localRepoElasticsearch)) - - print('') - print('To publish the release and the repo on S3 execute the following commands:') - print(' s3cmd cp --recursive s3://%(bucket)s/elasticsearch/staging/%(version)s-%(hash)s/repos/%(package_repo_version)s/ s3://packages.elasticsearch.org/elasticsearch/%(package_repo_version)s' % string_format_dict) - print(' s3cmd cp --recursive s3://%(bucket)s/elasticsearch/staging/%(version)s-%(hash)s/org/ s3://%(bucket)s/elasticsearch/release/org' % string_format_dict) - print('Now go ahead and tag the release:') - print(' git tag -a v%(version)s %(hash)s' % string_format_dict) - print(' git push origin v%(version)s' % string_format_dict ) - - diff --git a/dev-tools/validate-maven-repository.py b/dev-tools/validate-maven-repository.py deleted file mode 100644 index 6bf84a3a185..00000000000 --- a/dev-tools/validate-maven-repository.py +++ /dev/null @@ -1,130 +0,0 @@ -# Licensed to Elasticsearch under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on -# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, -# either express or implied. See the License for the specific -# language governing permissions and limitations under the License. - -# Helper python script to check if a sonatype staging repo contains -# all the required files compared to a local repository -# -# The script does the following steps -# -# 1. Scans the local maven repo for all files in /org/elasticsearch -# 2. Opens a HTTP connection to the staging repo -# 3. Executes a HEAD request for each file found in step one -# 4. Compares the content-length response header with the real file size -# 5. Return an error if those two numbers differ -# -# A pre requirement to run this, is to find out via the oss.sonatype.org web UI, how that repo is named -# - After logging in you go to 'Staging repositories' and search for the one you just created -# - Click into the `Content` tab -# - Open any artifact (not a directory) -# - Copy the link of `Repository Path` on the right and reuse that part of the URL -# -# Alternatively you can just use the name of the repository and reuse the rest (ie. the repository -# named for the example below would have been named orgelasticsearch-1012) -# -# -# Example call -# python dev-tools/validate-maven-repository.py /path/to/repo/org/elasticsearch/ \ -# https://oss.sonatype.org/service/local/repositories/orgelasticsearch-1012/content/org/elasticsearch - -import sys -import os -import httplib -import urlparse -import re - -# Draw a simple progress bar, a couple of hundred HEAD requests might take a while -# Note, when drawing this, it uses the carriage return character, so you should not -# write anything in between -def drawProgressBar(percent, barLen = 40): - sys.stdout.write("\r") - progress = "" - for i in range(barLen): - if i < int(barLen * percent): - progress += "=" - else: - progress += " " - sys.stdout.write("[ %s ] %.2f%%" % (progress, percent * 100)) - sys.stdout.flush() - -if __name__ == "__main__": - if len(sys.argv) != 3: - print 'Usage: %s [user:pass]' % (sys.argv[0]) - print '' - print 'Example: %s /tmp/my-maven-repo/org/elasticsearch https://oss.sonatype.org/service/local/repositories/orgelasticsearch-1012/content/org/elasticsearch' % (sys.argv[0]) - else: - sys.argv[1] = re.sub('/$', '', sys.argv[1]) - sys.argv[2] = re.sub('/$', '', sys.argv[2]) - - localMavenRepo = sys.argv[1] - endpoint = sys.argv[2] - - filesToCheck = [] - foundSignedFiles = False - - for root, dirs, files in os.walk(localMavenRepo): - for file in files: - # no metadata files (they get renamed from maven-metadata-local.xml to maven-metadata.xml while deploying) - # no .properties and .repositories files (they don't get uploaded) - if not file.startswith('maven-metadata') and not file.endswith('.properties') and not file.endswith('.repositories'): - filesToCheck.append(os.path.join(root, file)) - if file.endswith('.asc'): - foundSignedFiles = True - - print "Need to check %i files" % len(filesToCheck) - if not foundSignedFiles: - print '### Warning: No signed .asc files found' - - # set up http - parsed_uri = urlparse.urlparse(endpoint) - domain = parsed_uri.netloc - if parsed_uri.scheme == 'https': - conn = httplib.HTTPSConnection(domain) - else: - conn = httplib.HTTPConnection(domain) - #conn.set_debuglevel(5) - - drawProgressBar(0) - errors = [] - for idx, file in enumerate(filesToCheck): - request_uri = parsed_uri.path + file[len(localMavenRepo):] - conn.request("HEAD", request_uri) - res = conn.getresponse() - res.read() # useless call for head, but prevents httplib.ResponseNotReady raise - - absolute_url = parsed_uri.scheme + '://' + parsed_uri.netloc + request_uri - if res.status == 200: - content_length = res.getheader('content-length') - local_file_size = os.path.getsize(file) - if int(content_length) != int(local_file_size): - errors.append('LENGTH MISMATCH: %s differs in size. local %s <=> %s remote' % (absolute_url, content_length, local_file_size)) - elif res.status == 404: - errors.append('MISSING: %s' % absolute_url) - elif res.status == 301 or res.status == 302: - errors.append('REDIRECT: %s to %s' % (absolute_url, res.getheader('location'))) - else: - errors.append('ERROR: %s http response: %s %s' %(absolute_url, res.status, res.reason)) - - # update progressbar at the end - drawProgressBar((idx+1)/float(len(filesToCheck))) - - print - - if len(errors) != 0: - print 'The following errors occurred (%s out of %s files)' % (len(errors), len(filesToCheck)) - print - for error in errors: - print error - sys.exit(-1) diff --git a/docs/build.gradle b/docs/build.gradle index 69c53f847d0..19b7be6cead 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -178,7 +178,7 @@ integTest { configFile 'userdict_ja.txt' configFile 'KeywordTokenizer.rbbi' // Whitelist reindexing from the local node so we can test it. - setting 'reindex.remote.whitelist', 'myself' + setting 'reindex.remote.whitelist', '127.0.0.1:*' } } diff --git a/docs/java-api/search.asciidoc b/docs/java-api/search.asciidoc index 8fd47762ade..f5f20b30a9c 100644 --- a/docs/java-api/search.asciidoc +++ b/docs/java-api/search.asciidoc @@ -81,12 +81,12 @@ documentation [source,java] -------------------------------------------------- -SearchRequestBuilder srb1 = node.client() +SearchRequestBuilder srb1 = client .prepareSearch().setQuery(QueryBuilders.queryStringQuery("elasticsearch")).setSize(1); -SearchRequestBuilder srb2 = node.client() +SearchRequestBuilder srb2 = client .prepareSearch().setQuery(QueryBuilders.matchQuery("name", "kimchy")).setSize(1); -MultiSearchResponse sr = node.client().prepareMultiSearch() +MultiSearchResponse sr = client.prepareMultiSearch() .add(srb1) .add(srb2) .execute().actionGet(); @@ -107,7 +107,7 @@ The following code shows how to add two aggregations within your search: [source,java] -------------------------------------------------- -SearchResponse sr = node.client().prepareSearch() +SearchResponse sr = client.prepareSearch() .setQuery(QueryBuilders.matchAllQuery()) .addAggregation( AggregationBuilders.terms("agg1").field("field") diff --git a/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc b/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc index ba9899f9d68..3c3e9b8d13d 100644 --- a/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc @@ -268,6 +268,4 @@ params:: Optional. An object whose contents will be passed as variable "_agg" : {} } -------------------------------------------------- -reduce_params:: Optional. An object whose contents will be passed as variables to the `reduce_script`. This can be useful to allow the user to control - the behavior of the reduce phase. If this is not specified the variable will be undefined in the reduce_script execution. diff --git a/docs/reference/api-conventions.asciidoc b/docs/reference/api-conventions.asciidoc index e5a1be88123..282b5528aa4 100644 --- a/docs/reference/api-conventions.asciidoc +++ b/docs/reference/api-conventions.asciidoc @@ -219,6 +219,7 @@ Some examples are: `2015-01-01||+1M/d`:: `2015-01-01` plus one month, rounded down to the nearest day. [float] +[[common-options-response-filtering]] === Response Filtering All REST APIs accept a `filter_path` parameter that can be used to reduce diff --git a/docs/reference/docs/reindex.asciidoc b/docs/reference/docs/reindex.asciidoc index 3c5b0bbfc92..7d1d26d44c5 100644 --- a/docs/reference/docs/reindex.asciidoc +++ b/docs/reference/docs/reindex.asciidoc @@ -410,8 +410,8 @@ basic auth or the password will be sent in plain text. Remote hosts have to be explicitly whitelisted in elasticsearch.yaml using the `reindex.remote.whitelist` property. It can be set to a comma delimited list of allowed remote `host` and `port` combinations (e.g. -`otherhost:9200, another:9200`). Scheme is ignored by the whitelist - only host -and port are used. +`otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*`). Scheme is +ignored by the whitelist - only host and port are used. This feature should work with remote clusters of any version of Elasticsearch you are likely to find. This should allow you to upgrade from any version of diff --git a/docs/reference/index-modules/slowlog.asciidoc b/docs/reference/index-modules/slowlog.asciidoc index 423968bb071..2ac33a1cdf5 100644 --- a/docs/reference/index-modules/slowlog.asciidoc +++ b/docs/reference/index-modules/slowlog.asciidoc @@ -24,7 +24,7 @@ index.search.slowlog.threshold.fetch.debug: 500ms index.search.slowlog.threshold.fetch.trace: 200ms -------------------------------------------------- -All of the above settings are _dynamic_ and can be set per-index. +All of the above settings are _dynamic_ and are set per-index. By default, none are enabled (set to `-1`). Levels (`warn`, `info`, `debug`, `trace`) allow to control under which logging level the log @@ -42,7 +42,7 @@ level. The logging file is configured by default using the following configuration (found in `log4j2.properties`): -[source,yaml] +[source,properties] -------------------------------------------------- appender.index_search_slowlog_rolling.type = RollingFile appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling @@ -67,8 +67,8 @@ logger.index_search_slowlog_rolling.additivity = false The indexing slow log, similar in functionality to the search slow log. The log file name ends with `_index_indexing_slowlog.log`. Log and -the thresholds are configured in the elasticsearch.yml file in the same -way as the search slowlog. Index slowlog sample: +the thresholds are configured in the same way as the search slowlog. +Index slowlog sample: [source,yaml] -------------------------------------------------- @@ -80,23 +80,31 @@ index.indexing.slowlog.level: info index.indexing.slowlog.source: 1000 -------------------------------------------------- -All of the above settings are _dynamic_ and can be set per-index. +All of the above settings are _dynamic_ and are set per-index. By default Elasticsearch will log the first 1000 characters of the _source in the slowlog. You can change that with `index.indexing.slowlog.source`. Setting it to `false` or `0` will skip logging the source entirely an setting it to `true` will log the entire source regardless of size. -The index slow log file is configured by default in the `logging.yml` +The index slow log file is configured by default in the `log4j2.properties` file: -[source,yaml] +[source,properties] -------------------------------------------------- -index_indexing_slow_log_file: - type: dailyRollingFile - file: ${path.logs}/${cluster.name}_index_indexing_slowlog.log - datePattern: "'.'yyyy-MM-dd" - layout: - type: pattern - conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" +appender.index_indexing_slowlog_rolling.type = RollingFile +appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling +appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs}_index_indexing_slowlog.log +appender.index_indexing_slowlog_rolling.layout.type = PatternLayout +appender.index_indexing_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.10000m%n +appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs}_index_indexing_slowlog-%d{yyyy-MM-dd}.log +appender.index_indexing_slowlog_rolling.policies.type = Policies +appender.index_indexing_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy +appender.index_indexing_slowlog_rolling.policies.time.interval = 1 +appender.index_indexing_slowlog_rolling.policies.time.modulate = true + +logger.index_indexing_slowlog.name = index.indexing.slowlog.index +logger.index_indexing_slowlog.level = trace +logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling +logger.index_indexing_slowlog.additivity = false -------------------------------------------------- diff --git a/docs/reference/indices/rollover-index.asciidoc b/docs/reference/indices/rollover-index.asciidoc index 0c06ce43d3c..04e79ca5a08 100644 --- a/docs/reference/indices/rollover-index.asciidoc +++ b/docs/reference/indices/rollover-index.asciidoc @@ -34,7 +34,7 @@ POST /logs_write/_rollover <2> // TEST[s/# Add > 1000 documents to logs-000001/POST _reindex?refresh\n{"source":{"index":"twitter"},"dest":{"index":"logs-000001"}}/] <1> Creates an index called `logs-0000001` with the alias `logs_write`. <2> If the index pointed to by `logs_write` was created 7 or more days ago, or - contains 1,000 or more documents, then the `logs-0002` index is created + contains 1,000 or more documents, then the `logs-000002` index is created and the `logs_write` alias is updated to point to `logs-000002`. The above request might return the following response: diff --git a/docs/reference/indices/templates.asciidoc b/docs/reference/indices/templates.asciidoc index 39cbdbf0178..95cd32578a0 100644 --- a/docs/reference/indices/templates.asciidoc +++ b/docs/reference/indices/templates.asciidoc @@ -181,3 +181,50 @@ for indices of that start with `te*`, source will still be enabled. Note, for mappings, the merging is "deep", meaning that specific object/property based mappings can easily be added/overridden on higher order templates, with lower order templates providing the basis. + +[float] +[[versioning-templates]] +=== Template Versioning + +Templates can optionally add a `version` number, which can be any integer value, +in order to simplify template management by external systems. The `version` +field is completely optional and it is meant solely for external management of +templates. To unset a `version`, simply replace the template without specifying +one. + +[source,js] +-------------------------------------------------- +PUT /_template/template_1 +{ + "template" : "*", + "order" : 0, + "settings" : { + "number_of_shards" : 1 + }, + "version": 123 +} +-------------------------------------------------- +// CONSOLE + +To check for the `version`, you can +<> +using `filter_path` to limit the response to just the `version`: + +[source,js] +-------------------------------------------------- +GET /_template/template_1?filter_path=*.version +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +This should give a small response that makes it both easy and inexpensive to parse: + +[source,js] +-------------------------------------------------- +{ + "template_1" : { + "version" : 123 + } +} +-------------------------------------------------- +// TESTRESPONSE diff --git a/docs/reference/ingest/ingest-node.asciidoc b/docs/reference/ingest/ingest-node.asciidoc index 013275c0466..9f11caed1ac 100644 --- a/docs/reference/ingest/ingest-node.asciidoc +++ b/docs/reference/ingest/ingest-node.asciidoc @@ -90,6 +90,56 @@ For each returned pipeline, the source and the version are returned. The version is useful for knowing which version of the pipeline the node has. You can specify multiple IDs to return more than one pipeline. Wildcards are also supported. +[float] +[[versioning-pipelines]] +==== Pipeline Versioning + +Pipelines can optionally add a `version` number, which can be any integer value, +in order to simplify pipeline management by external systems. The `version` +field is completely optional and it is meant solely for external management of +pipelines. To unset a `version`, simply replace the pipeline without specifying +one. + +[source,js] +-------------------------------------------------- +PUT _ingest/pipeline/my-pipeline-id +{ + "description" : "describe pipeline", + "version" : 123, + "processors" : [ + { + "set" : { + "field": "foo", + "value": "bar" + } + } + ] +} +-------------------------------------------------- +// CONSOLE + +To check for the `version`, you can +<> +using `filter_path` to limit the response to just the `version`: + +[source,js] +-------------------------------------------------- +GET /_ingest/pipeline/my-pipeline-id?filter_path=*.version +-------------------------------------------------- +// TEST[continued] + +This should give a small response that makes it both easy and inexpensive to parse: + +[source,js] +-------------------------------------------------- +{ + "my-pipeline-id" : { + "version" : 123 + } +} +-------------------------------------------------- +// TESTRESPONSE + [[delete-pipeline-api]] === Delete Pipeline API @@ -1382,14 +1432,16 @@ caching see <>. .Script Options [options="header"] |====== -| Name | Required | Default | Description -| `lang` | no | - | The scripting language -| `file` | no | - | The script file to refer to -| `id` | no | - | The stored script id to refer to -| `inline` | no | - | An inline script to be executed -| `params` | no | - | Script Parameters +| Name | Required | Default | Description +| `lang` | no | "painless" | The scripting language +| `file` | no | - | The script file to refer to +| `id` | no | - | The stored script id to refer to +| `inline` | no | - | An inline script to be executed +| `params` | no | - | Script Parameters |====== +One of `file`, `id`, `inline` options must be provided in order to properly reference a script to execute. + You can access the current ingest document from within the script context by using the `ctx` variable. The following example sets a new field called `field_a_plus_b_times_c` to be the sum of two existing @@ -1639,4 +1691,4 @@ pipeline should be used: -------------------------------------------------- The reason for this is that Ingest doesn't know how to automatically cast -a scalar field to an object field. \ No newline at end of file +a scalar field to an object field. diff --git a/docs/reference/modules/cluster/allocation_filtering.asciidoc b/docs/reference/modules/cluster/allocation_filtering.asciidoc index 437f243c018..c173e4eb385 100644 --- a/docs/reference/modules/cluster/allocation_filtering.asciidoc +++ b/docs/reference/modules/cluster/allocation_filtering.asciidoc @@ -6,6 +6,31 @@ allocation of shards to nodes, cluster-level shard allocation filtering allows you to allow or disallow the allocation of shards from *any* index to particular nodes. +The available _dynamic_ cluster settings are as follows, where `{attribute}` +refers to an arbitrary node attribute.: + +`cluster.routing.allocation.include.{attribute}`:: + + Allocate shards to a node whose `{attribute}` has at least one of the + comma-separated values. + +`cluster.routing.allocation.require.{attribute}`:: + + Only allocate shards to a node whose `{attribute}` has _all_ of the + comma-separated values. + +`cluster.routing.allocation.exclude.{attribute}`:: + + Do not allocate shards to a node whose `{attribute}` has _none_ of the + comma-separated values. + +These special attributes are also supported: + +[horizontal] +`_name`:: Match nodes by node names +`_ip`:: Match nodes by IP addresses (the IP address associated with the hostname) +`_host`:: Match nodes by hostnames + The typical use case for cluster-wide shard allocation filtering is when you want to decommission a node, and you would like to move the shards from that node to other nodes in the cluster before shutting it down. @@ -27,35 +52,8 @@ NOTE: Shards will only be relocated if it is possible to do so without breaking another routing constraint, such as never allocating a primary and replica shard to the same node. -Cluster-wide shard allocation filtering works in the same way as index-level -shard allocation filtering (see <> for details). - -The available _dynamic_ cluster settings are as follows, where `{attribute}` -refers to an arbitrary node attribute.: - -`cluster.routing.allocation.include.{attribute}`:: - - Assign the index to a node whose `{attribute}` has at least one of the - comma-separated values. - -`cluster.routing.allocation.require.{attribute}`:: - - Assign the index to a node whose `{attribute}` has _all_ of the - comma-separated values. - -`cluster.routing.allocation.exclude.{attribute}`:: - - Assign the index to a node whose `{attribute}` has _none_ of the - comma-separated values. - -These special attributes are also supported: - -[horizontal] -`_name`:: Match nodes by node name -`_ip`:: Match nodes by IP address (the IP address associated with the hostname) -`_host`:: Match nodes by hostname - -All attribute values can be specified with wildcards, eg: +In addition to listing multiple values as a comma-separated list, all +attribute values can be specified with wildcards, eg: [source,js] ------------------------ diff --git a/docs/reference/query-dsl/regexp-syntax.asciidoc b/docs/reference/query-dsl/regexp-syntax.asciidoc index 68ca5912458..6a929ba98d5 100644 --- a/docs/reference/query-dsl/regexp-syntax.asciidoc +++ b/docs/reference/query-dsl/regexp-syntax.asciidoc @@ -203,7 +203,7 @@ For string `"abcd"`: ===== Optional operators These operators are available by default as the `flags` parameter defaults to `ALL`. -Different flag combinations (concatened with `"\"`) can be used to enable/disable +Different flag combinations (concatenated with `"|"`) can be used to enable/disable specific operators: { diff --git a/docs/reference/search/request/highlighting.asciidoc b/docs/reference/search/request/highlighting.asciidoc index 2e7bc9f1805..73ade7a47d6 100644 --- a/docs/reference/search/request/highlighting.asciidoc +++ b/docs/reference/search/request/highlighting.asciidoc @@ -15,14 +15,14 @@ GET /_search }, "highlight" : { "fields" : { - "content" : {} + "comment" : {} } } } -------------------------------------------------- // CONSOLE -In the above case, the `content` field will be highlighted for each +In the above case, the `comment` field will be highlighted for each search hit (there will be another element in each search hit, called `highlight`, which includes the highlighted fields and the highlighted fragments). @@ -71,14 +71,14 @@ natural languages, not as well with fields containing for instance html markup * Treats the document as the whole corpus, and scores individual sentences as if they were documents in this corpus, using the BM25 algorithm -Here is an example of setting the `content` field in the index mapping to allow for +Here is an example of setting the `comment` field in the index mapping to allow for highlighting using the postings highlighter on it: [source,js] -------------------------------------------------- { "type_name" : { - "content" : {"index_options" : "offsets"} + "comment" : {"index_options" : "offsets"} } } -------------------------------------------------- @@ -113,7 +113,7 @@ will be used instead of the plain highlighter. The fast vector highlighter: for things like phrase matches being sorted above term matches when highlighting a Boosting Query that boosts phrase matches over term matches -Here is an example of setting the `content` field to allow for +Here is an example of setting the `comment` field to allow for highlighting using the fast vector highlighter on it (this will cause the index to be bigger): @@ -121,7 +121,7 @@ the index to be bigger): -------------------------------------------------- { "type_name" : { - "content" : {"term_vector" : "with_positions_offsets"} + "comment" : {"term_vector" : "with_positions_offsets"} } } -------------------------------------------------- @@ -142,7 +142,7 @@ GET /_search }, "highlight" : { "fields" : { - "content" : {"type" : "plain"} + "comment" : {"type" : "plain"} } } } @@ -163,7 +163,7 @@ GET /_search }, "highlight" : { "fields" : { - "content" : {"force_source" : true} + "comment" : {"force_source" : true} } } } @@ -241,7 +241,7 @@ GET /_search "highlight" : { "tags_schema" : "styled", "fields" : { - "content" : {} + "comment" : {} } } } @@ -271,7 +271,7 @@ GET /_search }, "highlight" : { "fields" : { - "content" : {"fragment_size" : 150, "number_of_fragments" : 3} + "comment" : {"fragment_size" : 150, "number_of_fragments" : 3} } } } @@ -294,7 +294,7 @@ GET /_search "highlight" : { "order" : "score", "fields" : { - "content" : {"fragment_size" : 150, "number_of_fragments" : 3} + "comment" : {"fragment_size" : 150, "number_of_fragments" : 3} } } } @@ -317,7 +317,7 @@ GET /_search "highlight" : { "fields" : { "_all" : {}, - "bio.title" : {"number_of_fragments" : 0} + "blog.title" : {"number_of_fragments" : 0} } } } @@ -345,7 +345,7 @@ GET /_search }, "highlight" : { "fields" : { - "content" : { + "comment" : { "fragment_size" : 150, "number_of_fragments" : 3, "no_match_size": 150 @@ -375,7 +375,7 @@ GET /_search "stored_fields": [ "_id" ], "query" : { "match": { - "content": { + "comment": { "query": "foo bar" } } @@ -385,7 +385,7 @@ GET /_search "query": { "rescore_query" : { "match_phrase": { - "content": { + "comment": { "query": "foo bar", "slop": 1 } @@ -397,21 +397,21 @@ GET /_search "highlight" : { "order" : "score", "fields" : { - "content" : { + "comment" : { "fragment_size" : 150, "number_of_fragments" : 3, "highlight_query": { "bool": { "must": { "match": { - "content": { + "comment": { "query": "foo bar" } } }, "should": { "match_phrase": { - "content": { + "comment": { "query": "foo bar", "slop": 1, "boost": 10.0 @@ -452,9 +452,9 @@ GET /_search "fragment_size" : 150, "fields" : { "_all" : { "pre_tags" : [""], "post_tags" : [""] }, - "bio.title" : { "number_of_fragments" : 0 }, - "bio.author" : { "number_of_fragments" : 0 }, - "bio.content" : { "number_of_fragments" : 5, "order" : "score" } + "blog.title" : { "number_of_fragments" : 0 }, + "blog.author" : { "number_of_fragments" : 0 }, + "blog.comment" : { "number_of_fragments" : 5, "order" : "score" } } } } @@ -508,8 +508,8 @@ ways. All `matched_fields` must have `term_vector` set to combined is loaded so only that field would benefit from having `store` set to `yes`. -In the following examples `content` is analyzed by the `english` -analyzer and `content.plain` is analyzed by the `standard` analyzer. +In the following examples `comment` is analyzed by the `english` +analyzer and `comment.plain` is analyzed by the `standard` analyzer. [source,js] -------------------------------------------------- @@ -517,15 +517,15 @@ GET /_search { "query": { "query_string": { - "query": "content.plain:running scissors", - "fields": ["content"] + "query": "comment.plain:running scissors", + "fields": ["comment"] } }, "highlight": { "order": "score", "fields": { - "content": { - "matched_fields": ["content", "content.plain"], + "comment": { + "matched_fields": ["comment", "comment.plain"], "type" : "fvh" } } @@ -546,14 +546,14 @@ GET /_search "query": { "query_string": { "query": "running scissors", - "fields": ["content", "content.plain^10"] + "fields": ["comment", "comment.plain^10"] } }, "highlight": { "order": "score", "fields": { - "content": { - "matched_fields": ["content", "content.plain"], + "comment": { + "matched_fields": ["comment", "comment.plain"], "type" : "fvh" } } @@ -572,14 +572,14 @@ GET /_search "query": { "query_string": { "query": "running scissors", - "fields": ["content", "content.plain^10"] + "fields": ["comment", "comment.plain^10"] } }, "highlight": { "order": "score", "fields": { - "content": { - "matched_fields": ["content.plain"], + "comment": { + "matched_fields": ["comment.plain"], "type" : "fvh" } } @@ -590,7 +590,7 @@ GET /_search The above query wouldn't highlight "run" or "scissor" but shows that it is just fine not to list the field to which the matches are combined -(`content`) in the matched fields. +(`comment`) in the matched fields. [NOTE] Technically it is also fine to add fields to `matched_fields` that @@ -606,7 +606,7 @@ There is a small amount of overhead involved with setting -------------------------------------------------- "highlight": { "fields": { - "content": {} + "comment": {} } } -------------------------------------------------- @@ -615,8 +615,8 @@ to -------------------------------------------------- "highlight": { "fields": { - "content": { - "matched_fields": ["content"], + "comment": { + "matched_fields": ["comment"], "type" : "fvh" } } diff --git a/modules/build.gradle b/modules/build.gradle index d5b207625c1..b3dbde24936 100644 --- a/modules/build.gradle +++ b/modules/build.gradle @@ -25,6 +25,11 @@ subprojects { // for local ES plugins, the name of the plugin is the same as the directory name project.name } + + run { + // these cannot be run with the normal distribution, since they are included in it! + distribution = 'integ-test-zip' + } if (project.file('src/main/packaging').exists()) { throw new InvalidModelException("Modules cannot contain packaging files") diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ScriptProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ScriptProcessor.java index 94f335cc12f..7129d2e3343 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ScriptProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ScriptProcessor.java @@ -69,6 +69,10 @@ public final class ScriptProcessor extends AbstractProcessor { return TYPE; } + Script getScript() { + return script; + } + public static final class Factory implements Processor.Factory { private final ScriptService scriptService; @@ -80,7 +84,7 @@ public final class ScriptProcessor extends AbstractProcessor { @Override public ScriptProcessor create(Map registry, String processorTag, Map config) throws Exception { - String lang = readStringProperty(TYPE, processorTag, config, "lang"); + String lang = readOptionalStringProperty(TYPE, processorTag, config, "lang"); String inline = readOptionalStringProperty(TYPE, processorTag, config, "inline"); String file = readOptionalStringProperty(TYPE, processorTag, config, "file"); String id = readOptionalStringProperty(TYPE, processorTag, config, "id"); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorFactoryTests.java index 27eeb80670a..938b5be7f76 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorFactoryTests.java @@ -20,7 +20,7 @@ package org.elasticsearch.ingest.common; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; import org.elasticsearch.test.ESTestCase; import org.junit.Before; @@ -29,18 +29,48 @@ import java.util.Collections; import java.util.HashMap; import java.util.Map; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.mockito.Mockito.mock; public class ScriptProcessorFactoryTests extends ESTestCase { private ScriptProcessor.Factory factory; + private static final Map ingestScriptParamToType; + static { + Map map = new HashMap<>(); + map.put("id", "stored"); + map.put("inline", "inline"); + map.put("file", "file"); + ingestScriptParamToType = Collections.unmodifiableMap(map); + } @Before public void init() { factory = new ScriptProcessor.Factory(mock(ScriptService.class)); } + public void testFactoryValidationWithDefaultLang() throws Exception { + Map configMap = new HashMap<>(); + String randomType = randomFrom("id", "inline", "file"); + configMap.put(randomType, "foo"); + ScriptProcessor processor = factory.create(null, randomAsciiOfLength(10), configMap); + assertThat(processor.getScript().getLang(), equalTo(Script.DEFAULT_SCRIPT_LANG)); + assertThat(processor.getScript().getType().toString(), equalTo(ingestScriptParamToType.get(randomType))); + assertThat(processor.getScript().getParams(), equalTo(Collections.emptyMap())); + } + + public void testFactoryValidationWithParams() throws Exception { + Map configMap = new HashMap<>(); + String randomType = randomFrom("id", "inline", "file"); + Map randomParams = Collections.singletonMap(randomAsciiOfLength(10), randomAsciiOfLength(10)); + configMap.put(randomType, "foo"); + configMap.put("params", randomParams); + ScriptProcessor processor = factory.create(null, randomAsciiOfLength(10), configMap); + assertThat(processor.getScript().getLang(), equalTo(Script.DEFAULT_SCRIPT_LANG)); + assertThat(processor.getScript().getType().toString(), equalTo(ingestScriptParamToType.get(randomType))); + assertThat(processor.getScript().getParams(), equalTo(randomParams)); + } public void testFactoryValidationForMultipleScriptingTypes() throws Exception { Map configMap = new HashMap<>(); diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.txt index 3757e4fb76c..19af8204f28 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.txt @@ -101,6 +101,12 @@ class org.elasticsearch.index.fielddata.ScriptDocValues.GeoPoints -> org.elastic double geohashDistanceWithDefault(String,double) } +class org.elasticsearch.index.fielddata.ScriptDocValues.Booleans -> org.elasticsearch.index.fielddata.ScriptDocValues$Booleans extends List,Collection,Iterable,Object { + Boolean get(int) + boolean getValue() + List getValues() +} + # for testing. # currently FeatureTest exposes overloaded constructor, field load store, and overloaded static methods class org.elasticsearch.painless.FeatureTest -> org.elasticsearch.painless.FeatureTest extends Object { diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/plan_a/30_search.yaml b/modules/lang-painless/src/test/resources/rest-api-spec/test/plan_a/30_search.yaml index f1051ba7106..d92c0e41e6c 100644 --- a/modules/lang-painless/src/test/resources/rest-api-spec/test/plan_a/30_search.yaml +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/plan_a/30_search.yaml @@ -6,19 +6,19 @@ index: test type: test id: 1 - body: { "test": "value beck", "num1": 1.0 } + body: { "test": "value beck", "num1": 1.0, "bool": true } - do: index: index: test type: test id: 2 - body: { "test": "value beck", "num1": 2.0 } + body: { "test": "value beck", "num1": 2.0, "bool": false } - do: index: index: test type: test id: 3 - body: { "test": "value beck", "num1": 3.0 } + body: { "test": "value beck", "num1": 3.0, "bool": true } - do: indices.refresh: {} @@ -95,6 +95,19 @@ - match: { hits.hits.1.fields.sNum1.0: 2.0 } - match: { hits.hits.2.fields.sNum1.0: 3.0 } + - do: + index: test + search: + body: + query: + script: + script: + inline: "doc['bool'].value == false" + lang: painless + + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "2" } + --- "Custom Script Boost": diff --git a/modules/reindex/build.gradle b/modules/reindex/build.gradle index fa4c44d9224..be8fb7defc0 100644 --- a/modules/reindex/build.gradle +++ b/modules/reindex/build.gradle @@ -26,13 +26,13 @@ esplugin { integTest { cluster { // Whitelist reindexing from the local node so we can test it. - setting 'reindex.remote.whitelist', 'myself' + setting 'reindex.remote.whitelist', '127.0.0.1:*' } } run { // Whitelist reindexing from the local node so we can test it. - setting 'reindex.remote.whitelist', 'myself' + setting 'reindex.remote.whitelist', '127.0.0.1:*' } dependencies { diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java index 33aca028351..d3f76a17866 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java @@ -28,6 +28,11 @@ import org.apache.http.impl.client.BasicCredentialsProvider; import org.apache.http.impl.nio.reactor.IOReactorConfig; import org.apache.http.message.BasicHeader; import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.automaton.Automata; +import org.apache.lucene.util.automaton.Automaton; +import org.apache.lucene.util.automaton.CharacterRunAutomaton; +import org.apache.lucene.util.automaton.MinimizationOperations; +import org.apache.lucene.util.automaton.Operations; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.bulk.BackoffPolicy; @@ -44,8 +49,10 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.uid.Versions; +import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -65,11 +72,9 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import java.util.ArrayList; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiFunction; import java.util.function.Function; @@ -87,7 +92,7 @@ public class TransportReindexAction extends HandledTransportAction remoteWhitelist; + private final CharacterRunAutomaton remoteWhitelist; private final HttpServer httpServer; @Inject @@ -100,7 +105,7 @@ public class TransportReindexAction extends HandledTransportAction(REMOTE_CLUSTER_WHITELIST.get(settings)); + remoteWhitelist = buildRemoteWhitelist(REMOTE_CLUSTER_WHITELIST.get(settings)); this.httpServer = httpServer; } @@ -128,22 +133,35 @@ public class TransportReindexAction extends HandledTransportAction whitelist, RemoteInfo remoteInfo, TransportAddress publishAddress) { - if (remoteInfo == null) return; + static void checkRemoteWhitelist(CharacterRunAutomaton whitelist, RemoteInfo remoteInfo, TransportAddress publishAddress) { + if (remoteInfo == null) { + return; + } String check = remoteInfo.getHost() + ':' + remoteInfo.getPort(); - if (whitelist.contains(check)) return; - /* - * For testing we support the key "myself" to allow connecting to the local node. We can't just change the setting to include the - * local node because it is intentionally not a dynamic setting for security purposes. We can't use something like "localhost:9200" - * because we don't know up front which port we'll get because the tests bind to port 0. Instead we try to resolve it here, taking - * "myself" to mean "my published http address". - */ - if (whitelist.contains("myself") && publishAddress != null && publishAddress.toString().equals(check)) { + if (whitelist.run(check)) { return; } throw new IllegalArgumentException('[' + check + "] not whitelisted in " + REMOTE_CLUSTER_WHITELIST.getKey()); } + /** + * Build the {@link CharacterRunAutomaton} that represents the reindex-from-remote whitelist and make sure that it doesn't whitelist + * the world. + */ + static CharacterRunAutomaton buildRemoteWhitelist(List whitelist) { + if (whitelist.isEmpty()) { + return new CharacterRunAutomaton(Automata.makeEmpty()); + } + Automaton automaton = Regex.simpleMatchToAutomaton(whitelist.toArray(Strings.EMPTY_ARRAY)); + automaton = MinimizationOperations.minimize(automaton, Operations.DEFAULT_MAX_DETERMINIZED_STATES); + if (Operations.isTotal(automaton)) { + throw new IllegalArgumentException("Refusing to start because whitelist " + whitelist + " accepts all addresses. " + + "This would allow users to reindex-from-remote any URL they like effectively having Elasticsearch make HTTP GETs " + + "for them."); + } + return new CharacterRunAutomaton(automaton); + } + /** * Throws an ActionRequestValidationException if the request tries to index * back into the same index or into an index that points to two indexes. diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWhitelistTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWhitelistTests.java index 2ea8fa6514d..40d929f13cc 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWhitelistTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWhitelistTests.java @@ -27,11 +27,14 @@ import org.junit.Before; import java.net.InetAddress; import java.net.UnknownHostException; -import java.util.HashSet; -import java.util.Set; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; -import static java.util.Collections.emptySet; +import static java.util.Collections.singletonList; +import static org.elasticsearch.index.reindex.TransportReindexAction.buildRemoteWhitelist; import static org.elasticsearch.index.reindex.TransportReindexAction.checkRemoteWhitelist; /** @@ -46,45 +49,89 @@ public class ReindexFromRemoteWhitelistTests extends ESTestCase { } public void testLocalRequestWithoutWhitelist() { - checkRemoteWhitelist(emptySet(), null, localhostOrNone()); + checkRemoteWhitelist(buildRemoteWhitelist(emptyList()), null, localhostOrNone()); } public void testLocalRequestWithWhitelist() { - checkRemoteWhitelist(randomWhitelist(), null, localhostOrNone()); + checkRemoteWhitelist(buildRemoteWhitelist(randomWhitelist()), null, localhostOrNone()); } public void testWhitelistedRemote() { - Set whitelist = randomWhitelist(); + List whitelist = randomWhitelist(); String[] inList = whitelist.iterator().next().split(":"); String host = inList[0]; int port = Integer.valueOf(inList[1]); - checkRemoteWhitelist(whitelist, new RemoteInfo(randomAsciiOfLength(5), host, port, new BytesArray("test"), null, null, emptyMap()), + checkRemoteWhitelist(buildRemoteWhitelist(whitelist), + new RemoteInfo(randomAsciiOfLength(5), host, port, new BytesArray("test"), null, null, emptyMap()), localhostOrNone()); } - public void testMyselfInWhitelistRemote() throws UnknownHostException { - Set whitelist = randomWhitelist(); - whitelist.add("myself"); + public void testWhitelistedByPrefix() { + checkRemoteWhitelist(buildRemoteWhitelist(singletonList("*.example.com:9200")), + new RemoteInfo(randomAsciiOfLength(5), "es.example.com", 9200, new BytesArray("test"), null, null, emptyMap()), + localhostOrNone()); + checkRemoteWhitelist(buildRemoteWhitelist(singletonList("*.example.com:9200")), + new RemoteInfo(randomAsciiOfLength(5), "6e134134a1.us-east-1.aws.example.com", 9200, + new BytesArray("test"), null, null, emptyMap()), + localhostOrNone()); + } + + public void testWhitelistedBySuffix() { + checkRemoteWhitelist(buildRemoteWhitelist(singletonList("es.example.com:*")), + new RemoteInfo(randomAsciiOfLength(5), "es.example.com", 9200, new BytesArray("test"), null, null, emptyMap()), + localhostOrNone()); + } + + public void testWhitelistedByInfix() { + checkRemoteWhitelist(buildRemoteWhitelist(singletonList("es*.example.com:9200")), + new RemoteInfo(randomAsciiOfLength(5), "es1.example.com", 9200, new BytesArray("test"), null, null, emptyMap()), + localhostOrNone()); + } + + + public void testLoopbackInWhitelistRemote() throws UnknownHostException { + List whitelist = randomWhitelist(); + whitelist.add("127.0.0.1:*"); TransportAddress publishAddress = new TransportAddress(InetAddress.getByAddress(new byte[] {0x7f,0x00,0x00,0x01}), 9200); - checkRemoteWhitelist(whitelist, - new RemoteInfo(randomAsciiOfLength(5), "127.0.0.1", 9200, new BytesArray("test"), null, null, emptyMap()), publishAddress); + checkRemoteWhitelist(buildRemoteWhitelist(whitelist), + new RemoteInfo(randomAsciiOfLength(5), "127.0.0.1", 9200, new BytesArray("test"), null, null, emptyMap()), + publishAddress); } public void testUnwhitelistedRemote() { int port = between(1, Integer.MAX_VALUE); RemoteInfo remoteInfo = new RemoteInfo(randomAsciiOfLength(5), "not in list", port, new BytesArray("test"), null, null, emptyMap()); + List whitelist = randomBoolean() ? randomWhitelist() : emptyList(); Exception e = expectThrows(IllegalArgumentException.class, - () -> checkRemoteWhitelist(randomWhitelist(), remoteInfo, localhostOrNone())); + () -> checkRemoteWhitelist(buildRemoteWhitelist(whitelist), remoteInfo, localhostOrNone())); assertEquals("[not in list:" + port + "] not whitelisted in reindex.remote.whitelist", e.getMessage()); } - private Set randomWhitelist() { + public void testRejectMatchAll() { + assertMatchesTooMuch(singletonList("*")); + assertMatchesTooMuch(singletonList("**")); + assertMatchesTooMuch(singletonList("***")); + assertMatchesTooMuch(Arrays.asList("realstuff", "*")); + assertMatchesTooMuch(Arrays.asList("*", "realstuff")); + List random = randomWhitelist(); + random.add("*"); + assertMatchesTooMuch(random); + } + + private void assertMatchesTooMuch(List whitelist) { + Exception e = expectThrows(IllegalArgumentException.class, () -> buildRemoteWhitelist(whitelist)); + assertEquals("Refusing to start because whitelist " + whitelist + " accepts all addresses. " + + "This would allow users to reindex-from-remote any URL they like effectively having Elasticsearch make HTTP GETs " + + "for them.", e.getMessage()); + } + + private List randomWhitelist() { int size = between(1, 100); - Set set = new HashSet<>(size); - while (set.size() < size) { - set.add(randomAsciiOfLength(5) + ':' + between(1, Integer.MAX_VALUE)); + List whitelist = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + whitelist.add(randomAsciiOfLength(5) + ':' + between(1, Integer.MAX_VALUE)); } - return set; + return whitelist; } private TransportAddress localhostOrNone() { diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWithAuthTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWithAuthTests.java index e3401e4cf39..b4ac273b43b 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWithAuthTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWithAuthTests.java @@ -72,7 +72,7 @@ public class ReindexFromRemoteWithAuthTests extends ESSingleNodeTestCase { // Weird incantation required to test with netty settings.put(NetworkModule.HTTP_ENABLED.getKey(), true); // Whitelist reindexing from the http host we're going to use - settings.put(TransportReindexAction.REMOTE_CLUSTER_WHITELIST.getKey(), "myself"); + settings.put(TransportReindexAction.REMOTE_CLUSTER_WHITELIST.getKey(), "127.0.0.1:*"); settings.put(NetworkModule.HTTP_TYPE_KEY, Netty4Plugin.NETTY_HTTP_TRANSPORT_NAME); return settings.build(); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java index bd43a32945e..ccd3e5a873e 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java @@ -109,7 +109,7 @@ public class RetryTests extends ESSingleNodeTestCase { // Enable http so we can test retries on reindex from remote. In this case the "remote" cluster is just this cluster. settings.put(NetworkModule.HTTP_ENABLED.getKey(), true); // Whitelist reindexing from the http host we're going to use - settings.put(TransportReindexAction.REMOTE_CLUSTER_WHITELIST.getKey(), "myself"); + settings.put(TransportReindexAction.REMOTE_CLUSTER_WHITELIST.getKey(), "127.0.0.1:*"); if (useNetty3) { settings.put(NetworkModule.HTTP_TYPE_KEY, Netty3Plugin.NETTY_HTTP_TRANSPORT_NAME); settings.put(NetworkModule.TRANSPORT_TYPE_KEY, Netty3Plugin.NETTY_TRANSPORT_NAME); diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTests.java index 014014b432c..d1050e80adc 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTests.java @@ -35,7 +35,6 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; -import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.repositories.RepositoryMissingException; import org.elasticsearch.repositories.RepositoryVerificationException; import org.elasticsearch.repositories.azure.AzureRepository.Repository; @@ -75,14 +74,6 @@ public class AzureSnapshotRestoreTests extends AbstractAzureWithThirdPartyIntegT return testName.contains(" ") ? Strings.split(testName, " ")[0] : testName; } - @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder().put(super.nodeSettings(nodeOrdinal)) - // In snapshot tests, we explicitly disable cloud discovery - .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local") - .build(); - } - @Override public Settings indexSettings() { // During restore we frequently restore index to exactly the same state it was before, that might cause the same diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java index 7ca0aa57d7a..fd54c5fadbe 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java @@ -26,22 +26,23 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.node.MockNode; import org.elasticsearch.node.Node; import org.elasticsearch.node.NodeValidationException; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.InternalTestCluster; +import org.elasticsearch.test.discovery.MockZenPing; import org.elasticsearch.transport.MockTcpTransportPlugin; import org.junit.AfterClass; import org.junit.BeforeClass; import java.io.IOException; import java.nio.file.Path; -import java.util.Collections; +import java.util.Arrays; +import java.util.List; import static org.hamcrest.CoreMatchers.either; import static org.hamcrest.CoreMatchers.equalTo; @@ -63,25 +64,25 @@ public class TribeUnitTests extends ESTestCase { Settings baseSettings = Settings.builder() .put(NetworkModule.HTTP_ENABLED.getKey(), false) .put("transport.type", MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME) - .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local") .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) .put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 2) .build(); + final List> mockPlugins = Arrays.asList(MockTcpTransportPlugin.class, MockZenPing.TestPlugin.class); tribe1 = new TribeClientNode( Settings.builder() .put(baseSettings) .put("cluster.name", "tribe1") .put("node.name", "tribe1_node") .put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), random().nextLong()) - .build(), Collections.singleton(MockTcpTransportPlugin.class)).start(); + .build(), mockPlugins).start(); tribe2 = new TribeClientNode( Settings.builder() .put(baseSettings) .put("cluster.name", "tribe2") .put("node.name", "tribe2_node") .put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), random().nextLong()) - .build(), Collections.singleton(MockTcpTransportPlugin.class)).start(); + .build(), mockPlugins).start(); } @AfterClass @@ -106,11 +107,10 @@ public class TribeUnitTests extends ESTestCase { .put("transport.type", MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME).put("discovery.type", "local") .put("tribe.t1.transport.type", MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME) .put("tribe.t2.transport.type",MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME) - .put("tribe.t1.discovery.type", "local").put("tribe.t2.discovery.type", "local") .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) .put(extraSettings).build(); - try (Node node = new MockNode(settings, Collections.singleton(MockTcpTransportPlugin.class)).start()) { + try (Node node = new MockNode(settings, Arrays.asList(MockTcpTransportPlugin.class, MockZenPing.TestPlugin.class)).start()) { try (Client client = node.client()) { assertBusy(() -> { ClusterState state = client.admin().cluster().prepareState().clear().setNodes(true).get().getState(); diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/50_script_processor_using_painless.yaml b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/50_script_processor_using_painless.yaml index 8a80dec1c09..50d85238caa 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/50_script_processor_using_painless.yaml +++ b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/50_script_processor_using_painless.yaml @@ -9,7 +9,6 @@ "processors": [ { "script" : { - "lang" : "painless", "inline": "ctx.bytes_total = (ctx.bytes_in + ctx.bytes_out) * params.factor", "params": { "factor": 10 @@ -48,7 +47,6 @@ "processors": [ { "script" : { - "lang" : "painless", "file": "master" } } @@ -94,7 +92,6 @@ "processors": [ { "script" : { - "lang" : "painless", "id" : "sum_bytes" } } diff --git a/qa/vagrant/build.gradle b/qa/vagrant/build.gradle index fb7ee1d8d3c..bc27de7f624 100644 --- a/qa/vagrant/build.gradle +++ b/qa/vagrant/build.gradle @@ -37,7 +37,6 @@ List availableBoxes = [ 'sles-12', 'ubuntu-1204', 'ubuntu-1404', - 'ubuntu-1504', 'ubuntu-1604' ] diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/create.json b/rest-api-spec/src/main/resources/rest-api-spec/api/create.json new file mode 100644 index 00000000000..31c9d51f673 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/create.json @@ -0,0 +1,75 @@ +{ + "create": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/docs-index_.html", + "methods": ["PUT","POST"], + "url": { + "path": "/{index}/{type}/{id}/_create", + "paths": ["/{index}/{type}/{id}/_create"], + "parts": { + "id": { + "type" : "string", + "required" : true, + "description" : "Document ID" + }, + "index": { + "type" : "string", + "required" : true, + "description" : "The name of the index" + }, + "type": { + "type" : "string", + "required" : true, + "description" : "The type of the document" + } + }, + "params": { + "wait_for_active_shards": { + "type" : "string", + "description" : "Sets the number of shard copies that must be active before proceeding with the index operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1)" + }, + "parent": { + "type" : "string", + "description" : "ID of the parent document" + }, + "refresh": { + "type" : "enum", + "options": ["true", "false", "wait_for"], + "description" : "If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then do nothing with refreshes." + }, + "routing": { + "type" : "string", + "description" : "Specific routing value" + }, + "timeout": { + "type" : "time", + "description" : "Explicit operation timeout" + }, + "timestamp": { + "type" : "time", + "description" : "Explicit timestamp for the document" + }, + "ttl": { + "type" : "time", + "description" : "Expiration time for the document" + }, + "version" : { + "type" : "number", + "description" : "Explicit version number for concurrency control" + }, + "version_type": { + "type" : "enum", + "options" : ["internal", "external", "external_gte", "force"], + "description" : "Specific version type" + }, + "pipeline" : { + "type" : "string", + "description" : "The pipeline id to preprocess incoming documents with" + } + } + }, + "body": { + "description" : "The document", + "required" : true + } + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/node/NodeTests.java b/test/framework/src/main/java/org/elasticsearch/node/NodeTests.java index cf565499a8d..ea924734765 100644 --- a/test/framework/src/main/java/org/elasticsearch/node/NodeTests.java +++ b/test/framework/src/main/java/org/elasticsearch/node/NodeTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.InternalTestCluster; +import org.elasticsearch.transport.MockTcpTransportPlugin; import java.io.IOException; import java.nio.file.Path; @@ -46,13 +47,12 @@ public class NodeTests extends ESTestCase { .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), InternalTestCluster.clusterName("single-node-cluster", randomLong())) .put(Environment.PATH_HOME_SETTING.getKey(), tempDir) .put(NetworkModule.HTTP_ENABLED.getKey(), false) - .put("discovery.type", "local") - .put("transport.type", "local") + .put("transport.type", "mock-socket-network") .put(Node.NODE_DATA_SETTING.getKey(), true); if (name != null) { settings.put(Node.NODE_NAME_SETTING.getKey(), name); } - try (Node node = new MockNode(settings.build(), Collections.emptyList())) { + try (Node node = new MockNode(settings.build(), Collections.singleton(MockTcpTransportPlugin.class))) { final Settings nodeSettings = randomBoolean() ? node.settings() : node.getEnvironment().settings(); if (name == null) { assertThat(Node.NODE_NAME_SETTING.get(nodeSettings), equalTo(node.getNodeEnvironment().nodeId().substring(0, 7))); diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESBackcompatTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESBackcompatTestCase.java index 16647b04a47..0ece6fad393 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESBackcompatTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESBackcompatTestCase.java @@ -29,8 +29,6 @@ import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.discovery.DiscoveryModule; -import org.elasticsearch.node.Node; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.junit.listeners.LoggingListener; @@ -206,6 +204,11 @@ public abstract class ESBackcompatTestCase extends ESIntegTestCase { return finalSettings.build(); } + @Override + protected boolean addMockZenPings() { + return false; + } + protected int minExternalNodes() { return 1; } protected int maxExternalNodes() { @@ -243,7 +246,6 @@ public abstract class ESBackcompatTestCase extends ESIntegTestCase { protected Settings commonNodeSettings(int nodeOrdinal) { Settings.Builder builder = Settings.builder().put(requiredSettings()); builder.put(NetworkModule.TRANSPORT_TYPE_KEY, randomBoolean() ? "netty3" : "netty4"); // run same transport / disco as external - builder.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "zen"); return builder.build(); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 26059ba60ca..1f2f01cc352 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -28,15 +28,8 @@ import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TestUtil; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings; -import org.elasticsearch.discovery.DiscoveryModule; -import org.elasticsearch.client.RestClientBuilder; -import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.script.ScriptService; -import org.elasticsearch.transport.AssertingTransportInterceptor; -import org.elasticsearch.transport.MockTcpTransportPlugin; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; @@ -65,6 +58,7 @@ import org.elasticsearch.client.AdminClient; import org.elasticsearch.client.Client; import org.elasticsearch.client.Requests; import org.elasticsearch.client.RestClient; +import org.elasticsearch.client.RestClientBuilder; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -74,6 +68,7 @@ import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; @@ -99,9 +94,10 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.discovery.Discovery; -import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.discovery.zen.ElectMasterService; +import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexService; @@ -119,12 +115,16 @@ import org.elasticsearch.indices.store.IndicesStore; import org.elasticsearch.node.NodeMocksPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.MockSearchService; import org.elasticsearch.search.SearchHit; import org.elasticsearch.test.client.RandomizingClient; +import org.elasticsearch.test.discovery.MockZenPing; import org.elasticsearch.test.disruption.ServiceDisruptionScheme; import org.elasticsearch.test.store.MockFSIndexStore; import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.transport.AssertingTransportInterceptor; +import org.elasticsearch.transport.MockTcpTransportPlugin; import org.hamcrest.Matchers; import org.junit.After; import org.junit.AfterClass; @@ -1750,7 +1750,6 @@ public abstract class ESIntegTestCase extends ESTestCase { public Settings nodeSettings(int nodeOrdinal) { return Settings.builder() .put(NetworkModule.HTTP_ENABLED.getKey(), false) - .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local") .put(networkSettings.build()). put(ESIntegTestCase.this.nodeSettings(nodeOrdinal)).build(); } @@ -1787,6 +1786,10 @@ public abstract class ESIntegTestCase extends ESTestCase { return true; } + protected boolean addMockZenPings() { + return true; + } + /** * Returns a function that allows to wrap / filter all clients that are exposed by the test cluster. This is useful * for debugging or request / response pre and post processing. It also allows to intercept all calls done by the test @@ -1823,6 +1826,10 @@ public abstract class ESIntegTestCase extends ESTestCase { if (addMockTransportService()) { mocks.add(MockTcpTransportPlugin.class); } + + if (addMockZenPings()) { + mocks.add(MockZenPing.TestPlugin.class); + } mocks.add(TestSeedPlugin.class); return Collections.unmodifiableList(mocks); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java index 2752e0f2eca..f096e662f4a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java @@ -45,6 +45,7 @@ import org.elasticsearch.node.NodeValidationException; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.test.discovery.MockZenPing; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.MockTcpTransportPlugin; import org.junit.After; @@ -181,7 +182,6 @@ public abstract class ESSingleNodeTestCase extends ESTestCase { .put(ScriptService.SCRIPT_MAX_COMPILATIONS_PER_MINUTE.getKey(), 1000) .put(EsExecutors.PROCESSORS_SETTING.getKey(), 1) // limit the number of threads created .put(NetworkModule.HTTP_ENABLED.getKey(), false) - .put("discovery.type", "local") .put("transport.type", MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME) .put(Node.NODE_DATA_SETTING.getKey(), true) .put(nodeSettings()) // allow test cases to provide their own settings or override these @@ -191,6 +191,10 @@ public abstract class ESSingleNodeTestCase extends ESTestCase { plugins = new ArrayList<>(plugins); plugins.add(MockTcpTransportPlugin.class); } + if (plugins.contains(MockZenPing.TestPlugin.class) == false) { + plugins = new ArrayList<>(plugins); + plugins.add(MockZenPing.TestPlugin.class); + } Node build = new MockNode(settings, plugins); try { build.start(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java b/test/framework/src/main/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java index b393498ec89..3fd2b024a1d 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java +++ b/test/framework/src/main/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java @@ -25,7 +25,6 @@ import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkUtils; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; -import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.NodeConfigurationSource; @@ -39,7 +38,7 @@ import java.util.Set; public class ClusterDiscoveryConfiguration extends NodeConfigurationSource { - static Settings DEFAULT_NODE_SETTINGS = Settings.builder().put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "zen").build(); + static Settings DEFAULT_NODE_SETTINGS = Settings.EMPTY; private static final String IP_ADDR = "127.0.0.1"; final int numOfNodes; diff --git a/test/framework/src/main/java/org/elasticsearch/test/discovery/MockZenPing.java b/test/framework/src/main/java/org/elasticsearch/test/discovery/MockZenPing.java new file mode 100644 index 00000000000..d1e38e061bf --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/test/discovery/MockZenPing.java @@ -0,0 +1,107 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.test.discovery; + +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.discovery.DiscoveryModule; +import org.elasticsearch.discovery.zen.ping.PingContextProvider; +import org.elasticsearch.discovery.zen.ping.ZenPing; +import org.elasticsearch.plugins.DiscoveryPlugin; +import org.elasticsearch.plugins.Plugin; + +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * A {@link ZenPing} implementation which returns results based on an static in-memory map. This allows pinging + * to be immediate and can be used to speed up tests. + */ +public final class MockZenPing extends AbstractLifecycleComponent implements ZenPing { + + static final Map> activeNodesPerCluster = ConcurrentCollections.newConcurrentMap(); + + private volatile PingContextProvider contextProvider; + + @Inject + public MockZenPing(Settings settings) { + super(settings); + } + + @Override + public void setPingContextProvider(PingContextProvider contextProvider) { + this.contextProvider = contextProvider; + } + + @Override + public void ping(PingListener listener, TimeValue timeout) { + logger.info("pinging using mock zen ping"); + List responseList = getActiveNodesForCurrentCluster().stream() + .filter(p -> p != this) // remove this as pings are not expected to return the local node + .map(MockZenPing::getPingResponse) + .collect(Collectors.toList()); + listener.onPing(responseList); + } + + private ClusterName getClusterName() { + return contextProvider.clusterState().getClusterName(); + } + + private PingResponse getPingResponse() { + final ClusterState clusterState = contextProvider.clusterState(); + return new PingResponse(clusterState.nodes().getLocalNode(), clusterState.nodes().getMasterNode(), clusterState); + } + + @Override + protected void doStart() { + assert contextProvider != null; + boolean added = getActiveNodesForCurrentCluster().add(this); + assert added; + } + + private Set getActiveNodesForCurrentCluster() { + return activeNodesPerCluster.computeIfAbsent(getClusterName(), + clusterName -> ConcurrentCollections.newConcurrentSet()); + } + + @Override + protected void doStop() { + boolean found = getActiveNodesForCurrentCluster().remove(this); + assert found; + } + + @Override + protected void doClose() { + + } + + public static class TestPlugin extends Plugin implements DiscoveryPlugin { + + public void onModule(DiscoveryModule discoveryModule) { + discoveryModule.addZenPing(MockZenPing.class); + } + } +} diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java index e5b704b2f2c..7c001f910d7 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java @@ -27,13 +27,13 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.NodeConfigurationSource; -import org.elasticsearch.transport.MockTcpTransport; +import org.elasticsearch.test.discovery.MockZenPing; import org.elasticsearch.transport.MockTcpTransportPlugin; import org.elasticsearch.transport.TransportSettings; @@ -141,7 +141,6 @@ public class InternalTestClusterTests extends ESTestCase { NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 2 * ((masterNodes ? InternalTestCluster.DEFAULT_HIGH_NUM_MASTER_NODES : 0) + maxNumDataNodes + numClientNodes)) .put(NetworkModule.HTTP_ENABLED.getKey(), false) - .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local") .put(NetworkModule.TRANSPORT_TYPE_KEY, MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME).build(); } @@ -156,12 +155,13 @@ public class InternalTestClusterTests extends ESTestCase { String nodePrefix = "foobar"; Path baseDir = createTempDir(); + final List> mockPlugins = Arrays.asList(MockTcpTransportPlugin.class, MockZenPing.TestPlugin.class); InternalTestCluster cluster0 = new InternalTestCluster(clusterSeed, baseDir, masterNodes, minNumDataNodes, maxNumDataNodes, clusterName1, nodeConfigurationSource, numClientNodes, - enableHttpPipelining, nodePrefix, Collections.singleton(MockTcpTransportPlugin.class), Function.identity()); + enableHttpPipelining, nodePrefix, mockPlugins, Function.identity()); InternalTestCluster cluster1 = new InternalTestCluster(clusterSeed, baseDir, masterNodes, minNumDataNodes, maxNumDataNodes, clusterName2, nodeConfigurationSource, numClientNodes, - enableHttpPipelining, nodePrefix, Collections.singleton(MockTcpTransportPlugin.class), Function.identity()); + enableHttpPipelining, nodePrefix, mockPlugins, Function.identity()); assertClusters(cluster0, cluster1, false); long seed = randomLong(); @@ -205,7 +205,6 @@ public class InternalTestClusterTests extends ESTestCase { NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 2 + (masterNodes ? InternalTestCluster.DEFAULT_HIGH_NUM_MASTER_NODES : 0) + maxNumDataNodes + numClientNodes) .put(NetworkModule.TRANSPORT_TYPE_KEY, MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME) - .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local") .build(); } @Override @@ -219,7 +218,8 @@ public class InternalTestClusterTests extends ESTestCase { Path baseDir = createTempDir(); InternalTestCluster cluster = new InternalTestCluster(clusterSeed, baseDir, masterNodes, minNumDataNodes, maxNumDataNodes, clusterName1, nodeConfigurationSource, numClientNodes, - enableHttpPipelining, nodePrefix, Collections.singleton(MockTcpTransportPlugin.class), Function.identity()); + enableHttpPipelining, nodePrefix, Arrays.asList(MockTcpTransportPlugin.class, MockZenPing.TestPlugin.class), + Function.identity()); try { cluster.beforeTest(random(), 0.0); final Map shardNodePaths = new HashMap<>(); @@ -288,7 +288,6 @@ public class InternalTestClusterTests extends ESTestCase { .put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), numNodes) .put(NetworkModule.HTTP_ENABLED.getKey(), false) .put(NetworkModule.TRANSPORT_TYPE_KEY, MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME) - .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local") .put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), 0).build(); } @@ -297,7 +296,7 @@ public class InternalTestClusterTests extends ESTestCase { return Settings.builder() .put(NetworkModule.TRANSPORT_TYPE_KEY, MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME).build(); } - }, 0, randomBoolean(), "", Collections.singleton(MockTcpTransportPlugin.class), Function.identity()); + }, 0, randomBoolean(), "", Arrays.asList(MockTcpTransportPlugin.class, MockZenPing.TestPlugin.class), Function.identity()); cluster.beforeTest(random(), 0.0); try { Map> pathsPerRole = new HashMap<>();