Add multi-node IT infrastructure

This adds the infrastrucutre to run integration tests with more than one node.
 * it adds relevant macros and targets to integration-tests.xml to start unicast nodes
 * there is a qa/smoke-test-multinode project that simulates such a setup

this commit is soely the infrastructure and doesn't hook up any projects to use this.
For reliability and stability reasons this should be used with care and only if it's really
needed.

Closes #12718
This commit is contained in:
Simon Willnauer 2015-08-11 00:03:15 +02:00
parent 03e15056c7
commit d1bc099c91
6 changed files with 470 additions and 17 deletions

View File

@ -1,6 +1,5 @@
<?xml version="1.0"?> <?xml version="1.0"?>
<project name="elasticsearch-integration-tests"> <project name="elasticsearch-integration-tests">
<!-- our pid file for easy cleanup --> <!-- our pid file for easy cleanup -->
<property name="integ.pidfile" location="${integ.scratch}/es.pid"/> <property name="integ.pidfile" location="${integ.scratch}/es.pid"/>
@ -124,11 +123,27 @@
</sequential> </sequential>
</macrodef> </macrodef>
<!-- waits for cluster to form and have exactly two nodes -->
<macrodef name="waitfor-two-nodes">
<attribute name="port"/>
<attribute name="timeoutproperty"/>
<sequential>
<echo>Waiting for elasticsearch to form a cluster of two...</echo>
<waitfor maxwait="30" maxwaitunit="second"
checkevery="500" checkeveryunit="millisecond"
timeoutproperty="@{timeoutproperty}">
<http url="http://127.0.0.1:@{port}/_cluster/health?wait_for_nodes=2"/>
</waitfor>
</sequential>
</macrodef>
<!-- start elasticsearch and wait until its ready --> <!-- start elasticsearch and wait until its ready -->
<macrodef name="startup-elasticsearch"> <macrodef name="startup-elasticsearch">
<attribute name="home" default="${integ.scratch}/elasticsearch-${elasticsearch.version}"/> <attribute name="home" default="${integ.scratch}/elasticsearch-${elasticsearch.version}"/>
<attribute name="spawn" default="true"/> <attribute name="spawn" default="true"/>
<attribute name="args" default="${integ.args}"/> <attribute name="args" default="${integ.args}"/>
<attribute name="es.unicast.enabled" default="false"/>
<attribute name="es.unicast.hosts" default=""/>
<attribute name="es.cluster.name" default="${integ.cluster.name}"/> <attribute name="es.cluster.name" default="${integ.cluster.name}"/>
<attribute name="es.http.port" default="${integ.http.port}"/> <attribute name="es.http.port" default="${integ.http.port}"/>
<attribute name="es.transport.tcp.port" default="${integ.transport.port}"/> <attribute name="es.transport.tcp.port" default="${integ.transport.port}"/>
@ -146,6 +161,8 @@
-Des.pidfile=@{es.pidfile} -Des.pidfile=@{es.pidfile}
-Des.path.repo=@{home}/repo -Des.path.repo=@{home}/repo
-Des.discovery.zen.ping.multicast.enabled=false -Des.discovery.zen.ping.multicast.enabled=false
-Des.discovery.zen.ping.unicast.enabled=@{es.unicast.enabled}
-Des.discovery.zen.ping.unicast.hosts=@{es.unicast.hosts}
-Des.script.inline=on -Des.script.inline=on
-Des.script.indexed=on -Des.script.indexed=on
-Des.repositories.url.allowed_urls=http://snapshot.test* -Des.repositories.url.allowed_urls=http://snapshot.test*
@ -183,7 +200,7 @@
<local name="integ.pid"/> <local name="integ.pid"/>
<extract-pid file="@{es.pidfile}" property="integ.pid"/> <extract-pid file="@{es.pidfile}" property="integ.pid"/>
<echo>External cluster started PID ${integ.pid}</echo> <echo>External node started PID ${integ.pid}</echo>
</sequential> </sequential>
</macrodef> </macrodef>
@ -205,6 +222,40 @@
</sequential> </sequential>
</macrodef> </macrodef>
<macrodef name="stop-node">
<attribute name="es.pidfile" default="${integ.pidfile}"/>
<sequential>
<local name="integ.pid"/>
<extract-pid file="@{es.pidfile}" property="integ.pid"/>
<echo>Shutting down external node PID ${integ.pid}</echo>
<exec executable="taskkill" failonerror="true" osfamily="winnt">
<arg value="/F"/>
<arg value="/PID"/>
<arg value="${integ.pid}"/>
</exec>
<exec executable="kill" failonerror="true" osfamily="unix">
<arg value="-9"/>
<arg value="${integ.pid}"/>
</exec>
<delete file="@{es.pidfile}"/>
</sequential>
</macrodef>
<!-- starts a unicast node on an already setup workspace-->
<macrodef name="start-unicast-node">
<attribute name="es.http.port" default="${integ.http.port}"/>
<attribute name="es.transport.port" default="${integ.transport.port}"/>
<attribute name="es.pidfile" default="${integ.pidfile}"/>
<attribute name="es.peer.list" />
<sequential>
<startup-elasticsearch es.pidfile="@{es.pidfile}" es.unicast.enabled="true"
es.transport.tcp.port="@{es.transport.port}" es.http.port="@{es.http.port}"
es.unicast.hosts="@{es.peer.list}"/>
</sequential>
</macrodef>
<!-- unzip the elasticsearch zip --> <!-- unzip the elasticsearch zip -->
<target name="setup-workspace" depends="stop-external-cluster"> <target name="setup-workspace" depends="stop-external-cluster">
<sequential> <sequential>
@ -233,21 +284,7 @@
<!-- TODO, for some more safety, add back some of the old jps logic <!-- TODO, for some more safety, add back some of the old jps logic
and verify the pid is really an ES process! (fail otherwise) --> and verify the pid is really an ES process! (fail otherwise) -->
<target name="stop-external-cluster" if="integ.pidfile.exists"> <target name="stop-external-cluster" if="integ.pidfile.exists">
<local name="integ.pid"/> <stop-node/>
<extract-pid file="${integ.pidfile}" property="integ.pid"/>
<echo>Shutting down external cluster PID ${integ.pid}</echo>
<exec executable="taskkill" failonerror="true" osfamily="winnt">
<arg value="/F"/>
<arg value="/PID"/>
<arg value="${integ.pid}"/>
</exec>
<exec executable="kill" failonerror="true" osfamily="unix">
<arg value="-9"/>
<arg value="${integ.pid}"/>
</exec>
<delete file="${integ.pidfile}"/>
</target> </target>
<!-- distribution tests: .zip --> <!-- distribution tests: .zip -->

View File

@ -115,6 +115,8 @@
<integ.temp>${integ.scratch}/temp</integ.temp> <integ.temp>${integ.scratch}/temp</integ.temp>
<integ.http.port>9400</integ.http.port> <integ.http.port>9400</integ.http.port>
<integ.transport.port>9500</integ.transport.port> <integ.transport.port>9500</integ.transport.port>
<integ.http.port.sec>9600</integ.http.port.sec>
<integ.transport.port.sec>9700</integ.transport.port.sec>
<no.commit.pattern>\bno(n|)commit\b</no.commit.pattern> <no.commit.pattern>\bno(n|)commit\b</no.commit.pattern>
</properties> </properties>

View File

@ -0,0 +1,42 @@
<?xml version="1.0"?>
<project name="smoke-test-multinode"
xmlns:ac="antlib:net.sf.antcontrib">
<import file="${elasticsearch.integ.antfile.default}"/>
<property name="integ.pidfile.sec" location="${integ.scratch}/es-secondary.pid"/>
<available property="integ.pidfile.sec.exists" file="${integ.pidfile.sec}"/>
<target name="stop-secondary-node" if="integ.pidfile.sec.exists">
<stop-node es.pidfile="${integ.pidfile.sec}"/>
</target>
<target name="stop-primary-node" if="integ.pidfile.exists">
<stop-node es.pidfile="${integ.pidfile}"/>
</target>
<target name="start-external-multi-node-no-plugins" depends="stop-secondary-node, setup-workspace" unless="${shouldskip}">
<start-unicast-node es.peer.list="127.0.0.1:9700"/>
<ac:trycatch property="failure.message">
<ac:try>
<start-unicast-node es.http.port="9600" es.transport.port="9700"
es.pidfile="${integ.pidfile.sec}"
es.peer.list="127.0.0.1:${integ.transport.port}"/>
</ac:try>
<ac:catch>
<echo>Failed to start second node with message: ${failure.message}</echo>
<stop-node es.pidfile="${integ.pidfile}"/>
</ac:catch>
</ac:trycatch>
<ac:trycatch>
<ac:try>
<local name="failed.to.form.cluster"/>
<waitfor-two-nodes port="${integ.http.port}"
timeoutproperty="failed.to.form.cluster"/>
<fail message="Instances did not form a cluster" if="failed.to.form.cluster"/>
</ac:try>
<ac:catch>
<stop-node es.pidfile="${integ.pidfile}"/>
<stop-node es.pidfile="${integ.pidfile.sec}"/>
</ac:catch>
</ac:trycatch>
</target>
</project>

View File

@ -0,0 +1,305 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.elasticsearch.qa</groupId>
<artifactId>elasticsearch-qa</artifactId>
<version>2.0.0-beta1-SNAPSHOT</version>
</parent>
<!--
This test unzips elasticsearch, installs each plugin,
starts 2 elasticsearch nodes, verifies that they form a cluster.
-->
<artifactId>smoke-test-multinode</artifactId>
<name>QA: Smoke Test Multi-Node IT</name>
<description>Tests that multi node IT tests work</description>
<properties>
<skip.unit.tests>true</skip.unit.tests>
<elasticsearch.integ.antfile>${project.basedir}/integration-tests.xml</elasticsearch.integ.antfile>
<tests.rest.suite>smoke_test_multinode</tests.rest.suite>
<tests.rest.load_packaged>false</tests.rest.load_packaged>
</properties>
<dependencies>
<dependency>
<groupId>org.elasticsearch</groupId>
<artifactId>elasticsearch</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<!-- Provided dependencies by elasticsearch itself -->
<dependency>
<groupId>org.elasticsearch</groupId>
<artifactId>elasticsearch</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-core</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-backward-codecs</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-analyzers-common</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-queries</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-memory</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-highlighter</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-queryparser</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-suggest</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-join</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-spatial</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-expressions</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>com.spatial4j</groupId>
<artifactId>spatial4j</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>com.vividsolutions</groupId>
<artifactId>jts</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>com.github.spullara.mustache.java</groupId>
<artifactId>compiler</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>com.carrotsearch</groupId>
<artifactId>hppc</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>joda-time</groupId>
<artifactId>joda-time</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.joda</groupId>
<artifactId>joda-convert</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-core</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.dataformat</groupId>
<artifactId>jackson-dataformat-smile</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.dataformat</groupId>
<artifactId>jackson-dataformat-yaml</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.dataformat</groupId>
<artifactId>jackson-dataformat-cbor</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>com.ning</groupId>
<artifactId>compress-lzf</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>com.tdunning</groupId>
<artifactId>t-digest</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>commons-cli</groupId>
<artifactId>commons-cli</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.codehaus.groovy</groupId>
<artifactId>groovy-all</artifactId>
<classifier>indy</classifier>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>log4j</groupId>
<artifactId>apache-log4j-extras</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>net.java.dev.jna</groupId>
<artifactId>jna</artifactId>
<scope>provided</scope>
</dependency>
<!-- Required by the REST test framework -->
<!-- TODO: remove this dependency when we will have a REST Test module -->
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
<executions>
<execution>
<id>integ-setup-dependencies</id>
<phase>pre-integration-test</phase>
<goals>
<goal>copy</goal>
</goals>
<configuration>
<skip>${skip.integ.tests}</skip>
<useBaseVersion>true</useBaseVersion>
<outputDirectory>${integ.deps}/plugins</outputDirectory>
<artifactItems>
<!-- elasticsearch distribution -->
<artifactItem>
<groupId>org.elasticsearch.distribution.zip</groupId>
<artifactId>elasticsearch</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
<outputDirectory>${integ.deps}</outputDirectory>
</artifactItem>
</artifactItems>
</configuration>
</execution>
</executions>
</plugin>
<!-- integration tests -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-antrun-plugin</artifactId>
<executions>
<!-- start up external cluster -->
<execution>
<id>integ-setup</id>
<phase>pre-integration-test</phase>
<goals>
<goal>run</goal>
</goals>
<configuration>
<target>
<ant antfile="${elasticsearch.integ.antfile}" target="start-external-multi-node-no-plugins">
<property name="tests.jvm.argline" value="${tests.jvm.argline}"/>
<property name="integ.multi.node" value="true"/>
</ant>
</target>
<skip>${skip.integ.tests}</skip>
</configuration>
</execution>
<!-- shut down external cluster -->
<execution>
<id>integ-teardown</id>
<phase>post-integration-test</phase>
<goals>
<goal>run</goal>
</goals>
<configuration>
<target>
<ant antfile="${elasticsearch.integ.antfile}" target="stop-external-cluster"/>
</target>
<skip>${skip.integ.tests}</skip>
</configuration>
</execution>
</executions>
<dependencies>
<dependency>
<groupId>ant-contrib</groupId>
<artifactId>ant-contrib</artifactId>
<version>1.0b3</version>
<exclusions>
<exclusion>
<groupId>ant</groupId>
<artifactId>ant</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.ant</groupId>
<artifactId>ant-nodeps</artifactId>
<version>1.8.1</version>
</dependency>
</dependencies>
</plugin>
</plugins>
</build>
</project>

View File

@ -0,0 +1,26 @@
# Integration tests for smoke testing multi-node IT
#
---
"cluster health basic test, one index":
- do:
indices.create:
index: test_index
body:
settings:
index:
number_of_replicas: 1
- do:
cluster.health:
wait_for_status: green
- is_true: cluster_name
- is_false: timed_out
- gte: { number_of_nodes: 2 }
- gte: { number_of_data_nodes: 2 }
- gt: { active_primary_shards: 0 }
- gt: { active_shards: 0 }
- gte: { relocating_shards: 0 }
- match: { initializing_shards: 0 }
- match: { unassigned_shards: 0 }
- gte: { number_of_pending_tasks: 0 }

View File

@ -0,0 +1,41 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.smoketest;
import com.carrotsearch.randomizedtesting.annotations.Name;
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
import org.elasticsearch.test.rest.ESRestTestCase;
import org.elasticsearch.test.rest.RestTestCandidate;
import org.elasticsearch.test.rest.parser.RestTestParseException;
import java.io.IOException;
public class SmokeTestMultiIT extends ESRestTestCase {
public SmokeTestMultiIT(@Name("yaml") RestTestCandidate testCandidate) {
super(testCandidate);
}
@ParametersFactory
public static Iterable<Object[]> parameters() throws IOException, RestTestParseException {
return ESRestTestCase.createParameters(0, 1);
}
}