test: added smoke test for the shield tribe node integration

Original commit: elastic/x-pack-elasticsearch@f7ab8b9044
This commit is contained in:
Martijn van Groningen 2015-09-02 23:16:07 +02:00
parent 547b6346f6
commit e7b338a077
10 changed files with 926 additions and 230 deletions

View File

@ -302,6 +302,7 @@
<systemProperties>
<!-- use external cluster -->
<tests.cluster>127.0.0.1:${integ.transport.port}</tests.cluster>
<tests.rest.cluster>127.0.0.1:${integ.http.port}</tests.rest.cluster>
</systemProperties>
</configuration>
</execution>
@ -314,10 +315,12 @@
<modules>
<module>smoke-test-plugins</module>
<!-- Disabled until 'openssl' is available on the Windows build machines
See: https://github.com/elastic/infra/issues/331
<module>smoke-test-plugins-ssl</module>-->
<module>shield-core-rest-tests</module>
<module>smoke-test-watcher-with-shield</module>
<module>shield-example-realm</module>
<module>shield-tribe-node-tests</module>
</modules>
<profiles>

View File

@ -0,0 +1,166 @@
<?xml version="1.0"?>
<!--
~ ELASTICSEARCH CONFIDENTIAL
~ __________________
~
~ [2014] Elasticsearch Incorporated. All Rights Reserved.
~
~ NOTICE: All information contained herein is, and remains
~ the property of Elasticsearch Incorporated and its suppliers,
~ if any. The intellectual and technical concepts contained
~ herein are proprietary to Elasticsearch Incorporated
~ and its suppliers and may be covered by U.S. and Foreign Patents,
~ patents in process, and are protected by trade secret or copyright law.
~ Dissemination of this information or reproduction of this material
~ is strictly forbidden unless prior written permission is obtained
~ from Elasticsearch Incorporated.
-->
<project name="smoke-test-tribe-node-with-shield"
xmlns:ac="antlib:net.sf.antcontrib">
<taskdef name="xhttp" classname="org.elasticsearch.ant.HttpTask" classpath="${test_classpath}" />
<typedef name="xhttp" classname="org.elasticsearch.ant.HttpCondition" classpath="${test_classpath}"/>
<import file="${elasticsearch.integ.antfile.default}"/>
<import file="${elasticsearch.tools.directory}/ant/shield-overrides.xml"/>
<property name="integ.pidfile.2" location="${integ.scratch}/cluster2.pid"/>
<available property="integ.pidfile.2.exists" file="${integ.pidfile.2}"/>
<property name="integ.pidfile.3" location="${integ.scratch}/cluster3.pid"/>
<available property="integ.pidfile.3.exists" file="${integ.pidfile.3}"/>
<macrodef name="create-index">
<attribute name="name" />
<attribute name="port" />
<sequential>
<xhttp uri="http://127.0.0.1:@{port}/@{name}" method="PUT" username="test_admin" password="changeme" />
<waitfor maxwait="30" maxwaitunit="second"
checkevery="500" checkeveryunit="millisecond"
timeoutproperty="@{timeoutproperty}">
<xhttp uri="http://127.0.0.1:@{port}/_cluster/health/@{name}?wait_for_status=yellow" username="test_admin" password="changeme" />
</waitfor>
</sequential>
</macrodef>
<target name="start-tribe-node-and-2-clusters-with-shield" depends="setup-workspace">
<ac:for list="${xplugins.list}" param="xplugin.name">
<sequential>
<fail message="Expected @{xplugin.name}-${version}.zip as a dependency, but could not be found in ${integ.deps}/plugins}">
<condition>
<not>
<available file="${integ.deps}/plugins/@{xplugin.name}-${elasticsearch.version}.zip"/>
</not>
</condition>
</fail>
</sequential>
</ac:for>
<ac:for param="file">
<path>
<fileset dir="${integ.deps}/plugins"/>
</path>
<sequential>
<local name="plugin.name"/>
<convert-plugin-name file="@{file}" outputproperty="plugin.name"/>
<install-plugin name="${plugin.name}" file="@{file}"/>
</sequential>
</ac:for>
<local name="home"/>
<property name="home" location="${integ.scratch}/elasticsearch-${elasticsearch.version}"/>
<echo>Adding roles.yml</echo>
<copy file="shield-roles.yml" tofile="${home}/config/shield/roles.yml" overwrite="true"/>
<echo>Adding shield users...</echo>
<run-script script="${home}/bin/shield/esusers">
<nested>
<arg value="useradd"/>
<arg value="test_admin"/>
<arg value="-p"/>
<arg value="changeme"/>
<arg value="-r"/>
<arg value="admin"/>
</nested>
</run-script>
<echo>Starting two nodes, each node in a different cluster</echo>
<ac:trycatch property="failure.message">
<ac:try>
<startup-elasticsearch es.transport.tcp.port="9600"
es.http.port="9700"
es.pidfile="${integ.pidfile.2}"
es.unicast.hosts="127.0.0.1:9600"
es.cluster.name="cluster1"/>
</ac:try>
<ac:catch>
<echo>Failed to start first cluster with message: ${failure.message}</echo>
<stop-node es.pidfile="${integ.pidfile.2}"/>
</ac:catch>
</ac:trycatch>
<ac:trycatch property="failure.message">
<ac:try>
<startup-elasticsearch es.transport.tcp.port="9800"
es.http.port="9900"
es.pidfile="${integ.pidfile.3}"
es.unicast.hosts="127.0.0.1:9800"
es.cluster.name="cluster2"/>
</ac:try>
<ac:catch>
<echo>Failed to start second cluster with message: ${failure.message}</echo>
<stop-node es.pidfile="${integ.pidfile.3}"/>
<stop-node es.pidfile="${integ.pidfile.2}"/>
</ac:catch>
</ac:trycatch>
<ac:trycatch property="failure.message">
<ac:try>
<echo>Starting a tribe node, configured to connect to cluster1 and cluster2</echo>
<startup-elasticsearch>
<additional-args>
<arg value="-Des.tribe.cluster1.cluster.name=cluster1"/>
<arg value="-Des.tribe.cluster1.discovery.zen.ping.unicast.hosts=127.0.0.1:9600"/>
<arg value="-Des.tribe.cluster2.cluster.name=cluster2"/>
<arg value="-Des.tribe.cluster2.discovery.zen.ping.unicast.hosts=127.0.0.1:9800"/>
</additional-args>
</startup-elasticsearch>
<xhttp uri="http://127.0.0.1:${integ.http.port}/_cluster/health?wait_for_nodes=5" username="test_admin" password="changeme" />
<!--
From the rest tests we only connect to the tribe node, so we need create the indices externally:
By creating the index after the tribe node has started we can be sure that the tribe node knows
about it. See: https://github.com/elastic/elasticsearch/issues/13292
-->
<echo>Creating index1 in cluster1</echo>
<create-index name="index1" port="9700"/>
<echo>Creating index2 in cluster2</echo>
<create-index name="index2" port="9900"/>
</ac:try>
<ac:catch>
<echo>Failed to start tribe node with message: ${failure.message}</echo>
<stop-node es.pidfile="${integ.pidfile}"/>
<stop-node es.pidfile="${integ.pidfile.3}"/>
<stop-node es.pidfile="${integ.pidfile.2}"/>
</ac:catch>
</ac:trycatch>
</target>
<target name="stop-tribe-node" if="integ.pidfile.exists">
<stop-node es.pidfile="${integ.pidfile}"/>
</target>
<target name="stop-cluster1" if="integ.pidfile.2.exists">
<stop-node es.pidfile="${integ.pidfile.2}"/>
</target>
<target name="stop-cluster2" if="integ.pidfile.2.exists">
<stop-node es.pidfile="${integ.pidfile.3}"/>
</target>
<target name="stop-tribe-node-and-all-clusters" depends="stop-tribe-node,stop-cluster1,stop-cluster2"/>
</project>

View File

@ -0,0 +1,202 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
~ ELASTICSEARCH CONFIDENTIAL
~ __________________
~
~ [2014] Elasticsearch Incorporated. All Rights Reserved.
~
~ NOTICE: All information contained herein is, and remains
~ the property of Elasticsearch Incorporated and its suppliers,
~ if any. The intellectual and technical concepts contained
~ herein are proprietary to Elasticsearch Incorporated
~ and its suppliers and may be covered by U.S. and Foreign Patents,
~ patents in process, and are protected by trade secret or copyright law.
~ Dissemination of this information or reproduction of this material
~ is strictly forbidden unless prior written permission is obtained
~ from Elasticsearch Incorporated.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.elasticsearch.qa</groupId>
<artifactId>x-plugins-qa</artifactId>
<version>3.0.0-SNAPSHOT</version>
</parent>
<artifactId>smoke-test-tribe-node-with-shield</artifactId>
<name>QA: Smoke Test tribe node with Shield</name>
<description>Start a tribe node and two nodes both with a different cluster name and verifies if all data accessable via the tribe node</description>
<properties>
<skip.unit.tests>true</skip.unit.tests>
<elasticsearch.integ.antfile>${project.basedir}/integration-tests.xml</elasticsearch.integ.antfile>
<tests.rest.load_packaged>false</tests.rest.load_packaged>
<xplugins.list>license,shield</xplugins.list>
</properties>
<dependencies>
<dependency>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>shield</artifactId>
<version>${elasticsearch.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>license</artifactId>
<version>${project.version}</version>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<testResources>
<!-- REST API specifications copied from main Elasticsearch specs
because they are required to execute the Watcher REST tests -->
<testResource>
<directory>${elasticsearch.tools.directory}/rest-api-spec</directory>
<targetPath>rest-api-spec</targetPath>
<includes>
<!-- required by the test framework -->
<include>api/info.json</include>
<include>api/cluster.health.json</include>
<include>api/cluster.state.json</include>
<!-- used by Watcher REST tests -->
<include>api/index.json</include>
<include>api/get.json</include>
<include>api/delete.json</include>
<include>api/delete-by-query.json</include>
<include>api/bulk.json</include>
<include>api/update.json</include>
<include>api/search.json</include>
<include>api/indices.delete.json</include>
<include>api/indices.refresh.json</include>
</includes>
</testResource>
<testResource>
<directory>${basedir}/rest-api-spec</directory>
<filtering>true</filtering>
<targetPath>rest-api-spec</targetPath>
<includes>
<include>api/*.json</include>
<include>test/**/*.yaml</include>
</includes>
</testResource>
</testResources>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
<executions>
<execution>
<id>integ-setup-dependencies</id>
<phase>pre-integration-test</phase>
<goals>
<goal>copy</goal>
</goals>
<configuration>
<skip>${skip.integ.tests}</skip>
<useBaseVersion>true</useBaseVersion>
<outputDirectory>${integ.deps}/plugins</outputDirectory>
<artifactItems>
<!-- elasticsearch distribution -->
<artifactItem>
<groupId>org.elasticsearch.distribution.zip</groupId>
<artifactId>elasticsearch</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
<outputDirectory>${integ.deps}</outputDirectory>
</artifactItem>
<!-- commercial plugins -->
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>license</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
</artifactItem>
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>shield</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
</artifactItem>
</artifactItems>
</configuration>
</execution>
</executions>
</plugin>
<!-- integration tests -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-antrun-plugin</artifactId>
<executions>
<!-- start up external cluster -->
<execution>
<id>integ-setup</id>
<phase>pre-integration-test</phase>
<goals>
<goal>run</goal>
</goals>
<configuration>
<target>
<property name="test_classpath" refid="maven.test.classpath"/>
<ant antfile="${elasticsearch.integ.antfile}" target="start-tribe-node-and-2-clusters-with-shield">
<property name="tests.jvm.argline" value="${tests.jvm.argline}"/>
<property name="plugins.dir" value="${plugins.dir}"/>
<property name="xplugins.list" value="${xplugins.list}"/>
</ant>
</target>
<skip>${skip.integ.tests}</skip>
</configuration>
</execution>
<!-- shut down external cluster -->
<execution>
<id>integ-teardown</id>
<phase>post-integration-test</phase>
<goals>
<goal>run</goal>
</goals>
<configuration>
<target>
<property name="test_classpath" refid="maven.test.classpath"/>
<ant antfile="${elasticsearch.integ.antfile}" target="stop-tribe-node-and-all-clusters"/>
</target>
<skip>${skip.integ.tests}</skip>
</configuration>
</execution>
</executions>
<dependencies>
<dependency>
<groupId>ant-contrib</groupId>
<artifactId>ant-contrib</artifactId>
<version>1.0b3</version>
<exclusions>
<exclusion>
<groupId>ant</groupId>
<artifactId>ant</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.ant</groupId>
<artifactId>ant-nodeps</artifactId>
<version>1.8.1</version>
</dependency>
</dependencies>
</plugin>
</plugins>
</build>
</project>

View File

@ -0,0 +1,27 @@
---
"Tribe node search":
- do:
index:
index: index1
type: test
id: 1
body: { foo: bar }
- do:
index:
index: index2
type: test
id: 1
body: { foo: bar }
- do:
indices.refresh: {}
- do:
search:
index: index1,index2
body:
query: { term: { foo: bar }}
- match: { hits.total: 2 }

View File

@ -0,0 +1,4 @@
admin:
cluster: all
indices:
'*': all

View File

@ -0,0 +1,25 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.ant;
import org.apache.tools.ant.BuildException;
import org.apache.tools.ant.taskdefs.condition.Condition;
public class HttpCondition extends HttpTask implements Condition {
private int expectedResponseCode = 200;
@Override
public boolean eval() throws BuildException {
int responseCode = executeHttpRequest();
getProject().log("response code=" + responseCode);
return responseCode == expectedResponseCode;
}
public void setExpectedResponseCode(int expectedResponseCode) {
this.expectedResponseCode = expectedResponseCode;
}
}

View File

@ -0,0 +1,82 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.ant;
import org.apache.tools.ant.BuildException;
import org.apache.tools.ant.Task;
import org.elasticsearch.common.Base64;
import java.net.HttpURLConnection;
import java.net.URI;
import java.net.URL;
import java.nio.charset.StandardCharsets;
public class HttpTask extends Task {
private String uri;
private String method;
private String body;
private String username;
private String password;
@Override
public void execute() throws BuildException {
int responseCode = executeHttpRequest();
getProject().log("response code=" + responseCode);
}
protected int executeHttpRequest() {
try {
URI uri = new URI(this.uri);
URL url = uri.toURL();
getProject().log("url=" + url);
HttpURLConnection urlConnection = (HttpURLConnection) url.openConnection();
if (method != null) {
urlConnection.setRequestMethod(method);
}
if (username != null) {
String basicAuth = "Basic " + Base64.encodeBytes((username + ":" + password).getBytes(StandardCharsets.UTF_8));
urlConnection.setRequestProperty("Authorization", basicAuth);
}
if (body != null) {
urlConnection.setDoOutput(true);
urlConnection.setRequestProperty("Accept-Charset", StandardCharsets.UTF_8.name());
byte[] bytes = body.getBytes(StandardCharsets.UTF_8.name());
urlConnection.setRequestProperty("Content-Length", String.valueOf(bytes.length));
urlConnection.getOutputStream().write(bytes);
urlConnection.getOutputStream().close();
}
urlConnection.connect();
int responseCode = urlConnection.getResponseCode();
urlConnection.disconnect();
return responseCode;
} catch (Exception e) {
throw new BuildException(e);
}
}
public void setUri(String uri) {
this.uri = uri;
}
public void setMethod(String method) {
this.method = method;
}
public void setBody(String body) {
this.body = body;
}
public void setUsername(String username) {
this.username = username;
}
public void setPassword(String password) {
this.password = password;
}
}

View File

@ -0,0 +1,44 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.shield;
import com.carrotsearch.randomizedtesting.annotations.Name;
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
import org.elasticsearch.client.support.Headers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.shield.authc.support.SecuredString;
import org.elasticsearch.test.rest.ESRestTestCase;
import org.elasticsearch.test.rest.RestTestCandidate;
import org.elasticsearch.test.rest.parser.RestTestParseException;
import java.io.IOException;
import static org.elasticsearch.shield.authc.support.UsernamePasswordToken.basicAuthHeaderValue;
public class RestIT extends TribeRestTestCase {
private static final String USER = "test_admin";
private static final String PASS = "changeme";
public RestIT(@Name("yaml") RestTestCandidate testCandidate) {
super(testCandidate);
}
@ParametersFactory
public static Iterable<Object[]> parameters() throws IOException, RestTestParseException {
return ESRestTestCase.createParameters(0, 1);
}
@Override
protected Settings restClientSettings() {
String token = basicAuthHeaderValue(USER, new SecuredString(PASS.toCharArray()));
return Settings.builder()
.put(Headers.PREFIX + ".Authorization", token)
.build();
}
}

View File

@ -0,0 +1,373 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.shield;
import com.carrotsearch.randomizedtesting.RandomizedTest;
import com.carrotsearch.randomizedtesting.annotations.TestGroup;
import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
import org.apache.lucene.util.LuceneTestCase.SuppressFsync;
import org.apache.lucene.util.TimeUnits;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.InetSocketTransportAddress;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.node.Node;
import org.elasticsearch.repositories.uri.URLRepository;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.rest.ESRestTestCase;
import org.elasticsearch.test.rest.RestTestCandidate;
import org.elasticsearch.test.rest.RestTestExecutionContext;
import org.elasticsearch.test.rest.client.RestException;
import org.elasticsearch.test.rest.parser.RestTestParseException;
import org.elasticsearch.test.rest.parser.RestTestSuiteParser;
import org.elasticsearch.test.rest.section.DoSection;
import org.elasticsearch.test.rest.section.ExecutableSection;
import org.elasticsearch.test.rest.section.RestTestSuite;
import org.elasticsearch.test.rest.section.SkipSection;
import org.elasticsearch.test.rest.section.TestSection;
import org.elasticsearch.test.rest.spec.RestApi;
import org.elasticsearch.test.rest.spec.RestSpec;
import org.elasticsearch.test.rest.support.FileUtils;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import java.io.IOException;
import java.io.InputStream;
import java.lang.annotation.ElementType;
import java.lang.annotation.Inherited;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
import java.net.*;
import java.nio.file.FileSystem;
import java.nio.file.FileSystems;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.PathMatcher;
import java.nio.file.StandardCopyOption;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* Forked from RestTestCase with changes required to run rest tests via a tribe node
*
* Reasons for forking:
* 1) Always communicate via the tribe node from the tests. The original class in core connects to any endpoint it can see via the nodes info api and that would mean also the nodes part of the other clusters would be just as entry point. This should not happen for the tribe tests
* 2) The original class in core executes delete calls after each test, but the tribe node can't handle master level write operations. These api calls hang for 1m and then just fail.
* 3) The indices in cluster1 and cluster2 are created from the ant integ file and because of that the original class in core would just remove that in between tests.
* 4) extends ESTestCase instead if ESIntegTestCase and doesn't setup a test cluster and just connects to the one endpoint defined in the tests.rest.cluster.
*/
@ESRestTestCase.Rest
@SuppressFsync // we aren't trying to test this here, and it can make the test slow
@SuppressCodecs("*") // requires custom completion postings format
@ClusterScope(randomDynamicTemplates = false)
@TimeoutSuite(millis = 40 * TimeUnits.MINUTE) // timeout the suite after 40min and fail the test.
public abstract class TribeRestTestCase extends ESTestCase {
/**
* Property that allows to control whether the REST tests are run (default) or not
*/
public static final String TESTS_REST = "tests.rest";
public static final String TESTS_REST_CLUSTER = "tests.rest.cluster";
/**
* Annotation for REST tests
*/
@Inherited
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
@TestGroup(enabled = true, sysProperty = ESRestTestCase.TESTS_REST)
public @interface Rest {
}
/**
* Property that allows to control which REST tests get run. Supports comma separated list of tests
* or directories that contain tests e.g. -Dtests.rest.suite=index,get,create/10_with_id
*/
public static final String REST_TESTS_SUITE = "tests.rest.suite";
/**
* Property that allows to blacklist some of the REST tests based on a comma separated list of globs
* e.g. -Dtests.rest.blacklist=get/10_basic/*
*/
public static final String REST_TESTS_BLACKLIST = "tests.rest.blacklist";
/**
* Property that allows to control whether spec validation is enabled or not (default true).
*/
public static final String REST_TESTS_VALIDATE_SPEC = "tests.rest.validate_spec";
/**
* Property that allows to control where the REST spec files need to be loaded from
*/
public static final String REST_TESTS_SPEC = "tests.rest.spec";
public static final String REST_LOAD_PACKAGED_TESTS = "tests.rest.load_packaged";
private static final String DEFAULT_TESTS_PATH = "/rest-api-spec/test";
private static final String DEFAULT_SPEC_PATH = "/rest-api-spec/api";
private static final String PATHS_SEPARATOR = ",";
private final PathMatcher[] blacklistPathMatchers;
private static RestTestExecutionContext restTestExecutionContext;
private final RestTestCandidate testCandidate;
public TribeRestTestCase(RestTestCandidate testCandidate) {
this.testCandidate = testCandidate;
String[] blacklist = resolvePathsProperty(REST_TESTS_BLACKLIST, null);
if (blacklist != null) {
blacklistPathMatchers = new PathMatcher[blacklist.length];
int i = 0;
for (String glob : blacklist) {
blacklistPathMatchers[i++] = PathUtils.getDefaultFileSystem().getPathMatcher("glob:" + glob);
}
} else {
blacklistPathMatchers = new PathMatcher[0];
}
}
@Override
protected void afterIfFailed(List<Throwable> errors) {
logger.info("Stash dump on failure [{}]", XContentHelper.toString(restTestExecutionContext.stash()));
super.afterIfFailed(errors);
}
public static Iterable<Object[]> createParameters(int id, int count) throws IOException, RestTestParseException {
TestGroup testGroup = Rest.class.getAnnotation(TestGroup.class);
String sysProperty = TestGroup.Utilities.getSysProperty(Rest.class);
boolean enabled;
try {
enabled = RandomizedTest.systemPropertyAsBoolean(sysProperty, testGroup.enabled());
} catch (IllegalArgumentException e) {
// Ignore malformed system property, disable the group if malformed though.
enabled = false;
}
if (!enabled) {
return new ArrayList<>();
}
//parse tests only if rest test group is enabled, otherwise rest tests might not even be available on file system
List<RestTestCandidate> restTestCandidates = collectTestCandidates(id, count);
List<Object[]> objects = new ArrayList<>();
for (RestTestCandidate restTestCandidate : restTestCandidates) {
objects.add(new Object[]{restTestCandidate});
}
return objects;
}
private static List<RestTestCandidate> collectTestCandidates(int id, int count) throws RestTestParseException, IOException {
List<RestTestCandidate> testCandidates = new ArrayList<>();
FileSystem fileSystem = getFileSystem();
// don't make a try-with, getFileSystem returns null
// ... and you can't close() the default filesystem
try {
String[] paths = resolvePathsProperty(REST_TESTS_SUITE, DEFAULT_TESTS_PATH);
Map<String, Set<Path>> yamlSuites = FileUtils.findYamlSuites(fileSystem, DEFAULT_TESTS_PATH, paths);
RestTestSuiteParser restTestSuiteParser = new RestTestSuiteParser();
//yaml suites are grouped by directory (effectively by api)
for (String api : yamlSuites.keySet()) {
List<Path> yamlFiles = new ArrayList<>(yamlSuites.get(api));
for (Path yamlFile : yamlFiles) {
String key = api + yamlFile.getFileName().toString();
if (mustExecute(key, id, count)) {
RestTestSuite restTestSuite = restTestSuiteParser.parse(api, yamlFile);
for (TestSection testSection : restTestSuite.getTestSections()) {
testCandidates.add(new RestTestCandidate(restTestSuite, testSection));
}
}
}
}
} finally {
IOUtils.close(fileSystem);
}
//sort the candidates so they will always be in the same order before being shuffled, for repeatability
Collections.sort(testCandidates, new Comparator<RestTestCandidate>() {
@Override
public int compare(RestTestCandidate o1, RestTestCandidate o2) {
return o1.getTestPath().compareTo(o2.getTestPath());
}
});
return testCandidates;
}
private static boolean mustExecute(String test, int id, int count) {
int hash = (int) (Math.abs((long)test.hashCode()) % count);
return hash == id;
}
private static String[] resolvePathsProperty(String propertyName, String defaultValue) {
String property = System.getProperty(propertyName);
if (!Strings.hasLength(property)) {
return defaultValue == null ? null : new String[]{defaultValue};
} else {
return property.split(PATHS_SEPARATOR);
}
}
/**
* Returns a new FileSystem to read REST resources, or null if they
* are available from classpath.
*/
@SuppressForbidden(reason = "proper use of URL, hack around a JDK bug")
static FileSystem getFileSystem() throws IOException {
// REST suite handling is currently complicated, with lots of filtering and so on
// For now, to work embedded in a jar, return a ZipFileSystem over the jar contents.
URL codeLocation = FileUtils.class.getProtectionDomain().getCodeSource().getLocation();
boolean loadPackaged = RandomizedTest.systemPropertyAsBoolean(REST_LOAD_PACKAGED_TESTS, true);
if (codeLocation.getFile().endsWith(".jar") && loadPackaged) {
try {
// hack around a bug in the zipfilesystem implementation before java 9,
// its checkWritable was incorrect and it won't work without write permissions.
// if we add the permission, it will open jars r/w, which is too scary! so copy to a safe r-w location.
Path tmp = Files.createTempFile(null, ".jar");
try (InputStream in = codeLocation.openStream()) {
Files.copy(in, tmp, StandardCopyOption.REPLACE_EXISTING);
}
return FileSystems.newFileSystem(new URI("jar:" + tmp.toUri()), Collections.<String,Object>emptyMap());
} catch (URISyntaxException e) {
throw new IOException("couldn't open zipfilesystem: ", e);
}
} else {
return null;
}
}
@BeforeClass
public static void initExecutionContext() throws IOException, RestException {
String[] specPaths = resolvePathsProperty(REST_TESTS_SPEC, DEFAULT_SPEC_PATH);
RestSpec restSpec = null;
FileSystem fileSystem = getFileSystem();
// don't make a try-with, getFileSystem returns null
// ... and you can't close() the default filesystem
try {
restSpec = RestSpec.parseFrom(fileSystem, DEFAULT_SPEC_PATH, specPaths);
} finally {
IOUtils.close(fileSystem);
}
validateSpec(restSpec);
restTestExecutionContext = new RestTestExecutionContext(restSpec);
}
private static void validateSpec(RestSpec restSpec) {
boolean validateSpec = RandomizedTest.systemPropertyAsBoolean(REST_TESTS_VALIDATE_SPEC, true);
if (validateSpec) {
StringBuilder errorMessage = new StringBuilder();
for (RestApi restApi : restSpec.getApis()) {
if (restApi.getMethods().contains("GET") && restApi.isBodySupported()) {
if (!restApi.getMethods().contains("POST")) {
errorMessage.append("\n- ").append(restApi.getName()).append(" supports GET with a body but doesn't support POST");
}
}
}
if (errorMessage.length() > 0) {
throw new IllegalArgumentException(errorMessage.toString());
}
}
}
@AfterClass
public static void close() {
if (restTestExecutionContext != null) {
restTestExecutionContext.close();
restTestExecutionContext = null;
}
}
/**
* Used to obtain settings for the REST client that is used to send REST requests.
*/
protected Settings restClientSettings() {
return Settings.EMPTY;
}
protected InetSocketAddress[] httpAddresses() {
String clusterAddresses = System.getProperty(TESTS_REST_CLUSTER);
String[] stringAddresses = clusterAddresses.split(",");
InetSocketAddress[] transportAddresses = new InetSocketAddress[stringAddresses.length];
int i = 0;
for (String stringAddress : stringAddresses) {
String[] split = stringAddress.split(":");
if (split.length < 2) {
throw new IllegalArgumentException("address [" + clusterAddresses + "] not valid");
}
try {
transportAddresses[i++] = new InetSocketAddress(InetAddress.getByName(split[0]), Integer.valueOf(split[1]));
} catch (NumberFormatException e) {
throw new IllegalArgumentException("port is not valid, expected number but was [" + split[1] + "]");
} catch (UnknownHostException e) {
throw new IllegalArgumentException("unknown host [" + split[0] + "]", e);
}
}
return transportAddresses;
}
@Before
public void reset() throws IOException, RestException {
//skip test if it matches one of the blacklist globs
for (PathMatcher blacklistedPathMatcher : blacklistPathMatchers) {
//we need to replace a few characters otherwise the test section name can't be parsed as a path on windows
String testSection = testCandidate.getTestSection().getName().replace("*", "").replace("\\", "/").replaceAll("\\s+/", "/").replace(":", "").trim();
String testPath = testCandidate.getSuitePath() + "/" + testSection;
assumeFalse("[" + testCandidate.getTestPath() + "] skipped, reason: blacklisted", blacklistedPathMatcher.matches(PathUtils.get(testPath)));
}
//The client needs non static info to get initialized, therefore it can't be initialized in the before class
restTestExecutionContext.initClient(httpAddresses(), restClientSettings());
restTestExecutionContext.clear();
//skip test if the whole suite (yaml file) is disabled
assumeFalse(buildSkipMessage(testCandidate.getSuitePath(), testCandidate.getSetupSection().getSkipSection()),
testCandidate.getSetupSection().getSkipSection().skip(restTestExecutionContext.esVersion()));
//skip test if test section is disabled
assumeFalse(buildSkipMessage(testCandidate.getTestPath(), testCandidate.getTestSection().getSkipSection()),
testCandidate.getTestSection().getSkipSection().skip(restTestExecutionContext.esVersion()));
}
private static String buildSkipMessage(String description, SkipSection skipSection) {
StringBuilder messageBuilder = new StringBuilder();
if (skipSection.isVersionCheck()) {
messageBuilder.append("[").append(description).append("] skipped, reason: [").append(skipSection.getReason()).append("] ");
} else {
messageBuilder.append("[").append(description).append("] skipped, reason: features ").append(skipSection.getFeatures()).append(" not supported");
}
return messageBuilder.toString();
}
@Test
public void test() throws IOException {
//let's check that there is something to run, otherwise there might be a problem with the test section
if (testCandidate.getTestSection().getExecutableSections().size() == 0) {
throw new IllegalArgumentException("No executable sections loaded for [" + testCandidate.getTestPath() + "]");
}
if (!testCandidate.getSetupSection().isEmpty()) {
logger.info("start setup test [{}]", testCandidate.getTestPath());
for (DoSection doSection : testCandidate.getSetupSection().getDoSections()) {
doSection.execute(restTestExecutionContext);
}
logger.info("end setup test [{}]", testCandidate.getTestPath());
}
restTestExecutionContext.clear();
for (ExecutableSection executableSection : testCandidate.getTestSection().getExecutableSections()) {
executableSection.execute(restTestExecutionContext);
}
}
}

View File

@ -1,230 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.shield.tribe;
import com.google.common.collect.ImmutableMap;
import org.apache.lucene.util.LuceneTestCase;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
import org.elasticsearch.client.support.Headers;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.InetSocketTransportAddress;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.shield.authc.support.UsernamePasswordToken;
import org.elasticsearch.shield.crypto.InternalCryptoService;
import org.elasticsearch.shield.transport.netty.ShieldNettyHttpServerTransport;
import org.elasticsearch.test.InternalTestCluster;
import org.elasticsearch.test.ShieldIntegTestCase;
import org.elasticsearch.test.ShieldSettingsSource;
import org.elasticsearch.transport.Transport;
import org.elasticsearch.tribe.TribeService;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.Test;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import static org.elasticsearch.test.InternalTestCluster.clusterName;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
import static org.hamcrest.Matchers.*;
@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/x-plugins/issues/551")
public class TribeTests extends ShieldIntegTestCase {
//use known suite prefix since their threads are already ignored via ElasticsearchThreadFilter
public static final String SECOND_CLUSTER_NODE_PREFIX = SUITE_CLUSTER_NODE_PREFIX;
public static final String TRIBE_CLUSTER_NODE_PREFIX = "tribe_cluster_node_";
private static InternalTestCluster cluster2;
private static ShieldSettingsSource tribeSettingsSource;
private InternalTestCluster tribeNodeCluster;
@Before
public void setupSecondClusterAndTribeNode() throws Exception {
final Settings globalClusterSettings = internalCluster().getInstance(Settings.class);
//TODO tribe nodes and all of the tribes need to have either ssl disabled or enabled as a whole
//we read the randomized setting from the global cluster and apply it to the other cluster that we are going to start
//for simplicity the same certificates are used on all clusters
final boolean sslTransportEnabled = globalClusterSettings.getAsBoolean("shield.transport.ssl", null);
//we run this part in @Before instead of beforeClass because we need to have the current cluster already assigned to global
//so that we can retrieve its settings and apply some of them the the second cluster (and tribe node too)
if (cluster2 == null) {
// create another cluster
String cluster2Name = clusterName(Scope.SUITE.name(), randomLong());
//no port conflicts as this test uses the global cluster and a suite cluster that gets manually created
ShieldSettingsSource cluster2SettingsSource = new ShieldSettingsSource(2, sslTransportEnabled, systemKey(), createTempDir(), Scope.SUITE);
cluster2 = new InternalTestCluster("network", randomLong(), createTempDir(), 2, 2, cluster2Name, cluster2SettingsSource, 0, false, SECOND_CLUSTER_NODE_PREFIX, true);
assert tribeSettingsSource == null;
//given the low (2 and 1) number of nodes that the 2 SUITE clusters will have, we are not going to have port conflicts
tribeSettingsSource = new ShieldSettingsSource(1, sslTransportEnabled, systemKey(), createTempDir(), Scope.SUITE) {
@Override
public Settings nodeSettings(int nodeOrdinal) {
Settings shieldSettings = super.nodeSettings(nodeOrdinal);
//all the settings are needed for the tribe node, some of them will also need to be copied to the tribe clients configuration
Settings.Builder builder = Settings.builder().put(shieldSettings);
//the tribe node itself won't join any cluster, no need for unicast discovery configuration
builder.remove("discovery.type");
builder.remove("discovery.zen.ping.multicast.enabled");
//remove doesn't remove all the elements of an array, but we know it has only one element
builder.remove("discovery.zen.ping.unicast.hosts.0");
//copy the needed settings to the tribe clients configuration
ImmutableMap<String, String> shieldSettingsAsMap = shieldSettings.getAsMap();
for (Map.Entry<String, String> entry : shieldSettingsAsMap.entrySet()) {
if (isSettingNeededForTribeClient(entry.getKey())) {
builder.put("tribe.t1." + entry.getKey(), entry.getValue());
builder.put("tribe.t2." + entry.getKey(), entry.getValue());
}
}
return builder.put("tribe.t1.cluster.name", internalCluster().getClusterName())
.putArray("tribe.t1.discovery.zen.ping.unicast.hosts", unicastHosts(internalCluster()))
.put("tribe.t1.shield.transport.ssl", sslTransportEnabled)
.put("tribe.t2.cluster.name", cluster2.getClusterName())
.putArray("tribe.t2.discovery.zen.ping.unicast.hosts", unicastHosts(cluster2))
.put("tribe.t2.shield.transport.ssl", sslTransportEnabled).build();
}
/**
* Returns true if the setting is needed to setup a tribe client and needs to get forwarded to it, false otherwise.
* Only some of the settings need to be forwarded e.g. realm configuration gets filtered out
*/
private boolean isSettingNeededForTribeClient(String settingKey) {
if (settingKey.equals("transport.host")) {
return true;
}
//discovery settings get forwarded to tribe clients to disable multicast discovery
if (settingKey.equals("discovery.type") || settingKey.equals("discovery.zen.ping.multicast.enabled")) {
return true;
}
//plugins need to be properly loaded on the tribe clients too
if (settingKey.startsWith("plugin")) {
return true;
}
//make sure node.mode is network on the tribe clients too
if (settingKey.equals("node.mode")) {
return true;
}
//forward the shield audit enabled to the tribe clients
if (settingKey.equals("shield.audit.enabled")) {
return true;
}
//forward the system key to the tribe clients, same file will be used
if (settingKey.equals(InternalCryptoService.FILE_SETTING)) {
return true;
}
//forward ssl settings to the tribe clients, same certificates will be used
if (settingKey.startsWith("shield.ssl") || settingKey.equals("shield.transport.ssl") || settingKey.equals(ShieldNettyHttpServerTransport.HTTP_SSL_SETTING)) {
return true;
}
//forward the credentials to the tribe clients
if (settingKey.equals("shield.user") || settingKey.equals(Headers.PREFIX + "." + UsernamePasswordToken.BASIC_AUTH_HEADER)) {
return true;
}
return false;
}
};
}
cluster2.beforeTest(getRandom(), 0.5);
//we need to recreate the tribe node after each test otherwise ensureClusterSizeConsistency barfs
String tribeClusterName = clusterName(Scope.SUITE.name(), randomLong());
tribeNodeCluster = new InternalTestCluster("network", randomLong(), createTempDir(), 1, 1, tribeClusterName, tribeSettingsSource, 0, false, TRIBE_CLUSTER_NODE_PREFIX, true);
tribeNodeCluster.beforeTest(getRandom(), 0.5);
awaitSameNodeCounts();
}
private static String[] unicastHosts(InternalTestCluster testCluster) {
Iterable<Transport> transports = testCluster.getInstances(Transport.class);
List<String> unicastHosts = new ArrayList<>();
for (Transport transport : transports) {
TransportAddress transportAddress = transport.boundAddress().boundAddress();
assertThat(transportAddress, is(instanceOf(InetSocketTransportAddress.class)));
InetSocketTransportAddress inetSocketTransportAddress = (InetSocketTransportAddress) transportAddress;
unicastHosts.add("localhost:" + inetSocketTransportAddress.address().getPort());
}
return unicastHosts.toArray(new String[unicastHosts.size()]);
}
@After
public void afterTest() throws IOException {
//we need to close the tribe node after each test otherwise ensureClusterSizeConsistency barfs
if (tribeNodeCluster != null) {
try {
tribeNodeCluster.close();
} finally {
tribeNodeCluster = null;
}
}
//and clean up the second cluster that we manually started
if (cluster2 != null) {
try {
cluster2.wipe();
} finally {
cluster2.afterTest();
}
}
}
@AfterClass
public static void tearDownSecondCluster() {
if (cluster2 != null) {
try {
cluster2.close();
} finally {
cluster2 = null;
tribeSettingsSource = null;
}
}
}
@Test
public void testIndexRefreshAndSearch() throws Exception {
internalCluster().client().admin().indices().prepareCreate("test1").get();
cluster2.client().admin().indices().prepareCreate("test2").get();
assertThat(tribeNodeCluster.client().admin().cluster().prepareHealth().setWaitForGreenStatus().get().getStatus(), equalTo(ClusterHealthStatus.GREEN));
tribeNodeCluster.client().prepareIndex("test1", "type1", "1").setSource("field1", "value1").get();
tribeNodeCluster.client().prepareIndex("test2", "type1", "1").setSource("field1", "value1").get();
assertNoFailures(tribeNodeCluster.client().admin().indices().prepareRefresh().get());
assertHitCount(tribeNodeCluster.client().prepareSearch().get(), 2l);
}
private void awaitSameNodeCounts() throws Exception {
assertBusy(new Runnable() {
@Override
public void run() {
DiscoveryNodes tribeNodes = tribeNodeCluster.client().admin().cluster().prepareState().get().getState().getNodes();
assertThat(countDataNodesForTribe("t1", tribeNodes), equalTo(internalCluster().client().admin().cluster().prepareState().get().getState().getNodes().dataNodes().size()));
assertThat(countDataNodesForTribe("t2", tribeNodes), equalTo(cluster2.client().admin().cluster().prepareState().get().getState().getNodes().dataNodes().size()));
}
});
}
private int countDataNodesForTribe(String tribeName, DiscoveryNodes nodes) {
int count = 0;
for (DiscoveryNode node : nodes) {
if (!node.dataNode()) {
continue;
}
if (tribeName.equals(node.getAttributes().get(TribeService.TRIBE_NAME))) {
count++;
}
}
return count;
}
}