From ec4de10ee2b0fb2a8be97e03204a84e175b28872 Mon Sep 17 00:00:00 2001 From: Areek Zillur Date: Tue, 28 Feb 2017 18:36:53 -0500 Subject: [PATCH] Tribe node security tests with external clusters (elastic/x-pack-elasticsearch#606) * Tribe node security tests with external clusters This PR adds a qa module for security tests with tribe node using external clusters. Existing SecurityTribeIT tests have been ported to use external clusters with tribe setup as a first step. Currently the ports to the external clusters are passed to the integration tests through system properties and external clusters are built on test setup (the code for building external clusters is copied from ESIntegTestCase). This is a WIP as we need a more generic way to facilitate testing tribe setup with external clusters. thoughts welcome. * incorporate feedback * update to master Original commit: elastic/x-pack-elasticsearch@686887ca91497e2fd791e614543e4b6a74663d9a --- qa/tribe-tests-with-security/build.gradle | 83 +++++++ .../test/TribeWithSecurityIT.java | 210 ++++++++++++++++++ 2 files changed, 293 insertions(+) create mode 100644 qa/tribe-tests-with-security/build.gradle create mode 100644 qa/tribe-tests-with-security/src/test/java/org/elasticsearch/test/TribeWithSecurityIT.java diff --git a/qa/tribe-tests-with-security/build.gradle b/qa/tribe-tests-with-security/build.gradle new file mode 100644 index 00000000000..e4dc9f6b7c9 --- /dev/null +++ b/qa/tribe-tests-with-security/build.gradle @@ -0,0 +1,83 @@ +import org.elasticsearch.gradle.test.ClusterConfiguration +import org.elasticsearch.gradle.test.ClusterFormationTasks +import org.elasticsearch.gradle.test.NodeInfo + +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + testCompile project(path: ':x-pack-elasticsearch:plugin', configuration: 'runtime') + testCompile project(path: ':x-pack-elasticsearch:plugin', configuration: 'testArtifacts') +} + +ClusterConfiguration configOne = new ClusterConfiguration(project) +configOne.clusterName = 'cluster1' +configOne.setting('node.name', 'cluster1-node1') +configOne.setting('xpack.monitoring.enabled', false) +configOne.setting('xpack.ml.enabled', false) +configOne.plugin(':x-pack-elasticsearch:plugin') +configOne.setupCommand('setupDummyUser', + 'bin/x-pack/users', 'useradd', 'test_user', '-p', 'changeme', '-r', 'superuser') +configOne.waitCondition = { node, ant -> + File tmpFile = new File(node.cwd, 'wait.success') + ant.get(src: "http://${node.httpUri()}", + dest: tmpFile.toString(), + username: 'test_user', + password: 'changeme', + ignoreerrors: true, + retries: 10) + return tmpFile.exists() +} +List cluster1Nodes = ClusterFormationTasks.setup(project, 'clusterOne', integTestRunner, configOne) + +ClusterConfiguration configTwo = new ClusterConfiguration(project) +configTwo.clusterName = 'cluster2' +configTwo.setting('node.name', 'cluster2-node1') +configTwo.setting('xpack.monitoring.enabled', false) +configTwo.setting('xpack.ml.enabled', false) +configTwo.plugin(':x-pack-elasticsearch:plugin') +configTwo.setupCommand('setupDummyUser', + 'bin/x-pack/users', 'useradd', 'test_user', '-p', 'changeme', '-r', 'superuser') +configTwo.waitCondition = { node, ant -> + File tmpFile = new File(node.cwd, 'wait.success') + ant.get(src: "http://${node.httpUri()}", + dest: tmpFile.toString(), + username: 'test_user', + password: 'changeme', + ignoreerrors: true, + retries: 10) + return tmpFile.exists() + } +List cluster2Nodes = ClusterFormationTasks.setup(project, 'clusterTwo', integTestRunner, configTwo) + +integTestCluster { + plugin ':x-pack-elasticsearch:plugin' + setupCommand 'setupDummyUser', + 'bin/x-pack/users', 'useradd', 'test_user', '-p', 'changeme', '-r', 'superuser' + setting 'xpack.monitoring.enabled', false + setting 'xpack.ml.enabled', false + setting 'node.name', 'tribe-node' + setting 'tribe.on_conflict', 'prefer_cluster1' + setting 'tribe.cluster1.cluster.name', 'cluster1' + setting 'tribe.cluster1.discovery.zen.ping.unicast.hosts', "'${-> cluster1Nodes.get(0).transportUri()}'" + setting 'tribe.cluster1.http.enabled', 'true' + setting 'tribe.cluster2.cluster.name', 'cluster2' + setting 'tribe.cluster2.discovery.zen.ping.unicast.hosts', "'${-> cluster2Nodes.get(0).transportUri()}'" + setting 'tribe.cluster2.http.enabled', 'true' + waitCondition = { node, ant -> + File tmpFile = new File(node.cwd, 'wait.success') + ant.get(src: "http://${node.httpUri()}", + dest: tmpFile.toString(), + username: 'test_user', + password: 'changeme', + ignoreerrors: true, + retries: 10) + return tmpFile.exists() + } +} + +integTestRunner { + systemProperty 'tests.cluster', "${-> cluster1Nodes.get(0).transportUri()}" + systemProperty 'tests.cluster2', "${-> cluster2Nodes.get(0).transportUri()}" + systemProperty 'tests.tribe', "${-> integTest.nodes.get(0).transportUri()}" +} diff --git a/qa/tribe-tests-with-security/src/test/java/org/elasticsearch/test/TribeWithSecurityIT.java b/qa/tribe-tests-with-security/src/test/java/org/elasticsearch/test/TribeWithSecurityIT.java new file mode 100644 index 00000000000..a685e7f74ea --- /dev/null +++ b/qa/tribe-tests-with-security/src/test/java/org/elasticsearch/test/TribeWithSecurityIT.java @@ -0,0 +1,210 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.test; + + +import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.xpack.security.Security; +import org.elasticsearch.xpack.security.action.role.GetRolesResponse; +import org.elasticsearch.xpack.security.action.role.PutRoleResponse; +import org.elasticsearch.xpack.security.authc.support.SecuredString; +import org.elasticsearch.xpack.security.authc.support.UsernamePasswordToken; +import org.elasticsearch.xpack.security.client.SecurityClient; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.URL; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout; +import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME; +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.arrayContaining; + +public class TribeWithSecurityIT extends SecurityIntegTestCase { + + private static TestCluster cluster2; + private static TestCluster tribeNode; + + @Override + public void setUp() throws Exception { + super.setUp(); + if (cluster2 == null) { + cluster2 = buildExternalCluster(System.getProperty("tests.cluster2")); + } + if (tribeNode == null) { + tribeNode = buildExternalCluster(System.getProperty("tests.tribe")); + } + } + + + @AfterClass + public static void tearDownExternalClusters() throws IOException { + if (cluster2 != null) { + try { + cluster2.close(); + } finally { + cluster2 = null; + } + } + if (tribeNode != null) { + try { + tribeNode.close(); + } finally { + tribeNode = null; + } + } + } + + @After + public void removeSecurityIndex() { + client().admin().indices().prepareDelete(SECURITY_INDEX_NAME).get(); + cluster2.client().admin().indices().prepareDelete(SECURITY_INDEX_NAME).get(); + securityClient(client()).prepareClearRealmCache().get(); + securityClient(cluster2.client()).prepareClearRealmCache().get(); + } + + @Before + public void addSecurityIndex() { + client().admin().indices().prepareCreate(SECURITY_INDEX_NAME).get(); + cluster2.client().admin().indices().prepareCreate(SECURITY_INDEX_NAME).get(); + } + + @Override + protected Settings externalClusterClientSettings() { + Settings.Builder builder = Settings.builder().put(super.externalClusterClientSettings()); + builder.put(NetworkModule.TRANSPORT_TYPE_KEY, Security.NAME4); + return builder.build(); + } + + private ExternalTestCluster buildExternalCluster(String clusterAddresses) throws IOException { + String[] stringAddresses = clusterAddresses.split(","); + TransportAddress[] transportAddresses = new TransportAddress[stringAddresses.length]; + int i = 0; + for (String stringAddress : stringAddresses) { + URL url = new URL("http://" + stringAddress); + InetAddress inetAddress = InetAddress.getByName(url.getHost()); + transportAddresses[i++] = new TransportAddress(new InetSocketAddress(inetAddress, url.getPort())); + } + return new ExternalTestCluster(createTempDir(), externalClusterClientSettings(), transportClientPlugins(), transportAddresses); + } + + public void testThatTribeCanAuthenticateElasticUser() throws Exception { + ClusterHealthResponse response = tribeNode.client().filterWithHeader(Collections.singletonMap("Authorization", + UsernamePasswordToken.basicAuthHeaderValue("elastic", new SecuredString("changeme".toCharArray())))) + .admin().cluster().prepareHealth().get(); + assertNoTimeout(response); + } + + public void testThatTribeCanAuthenticateElasticUserWithChangedPassword() throws Exception { + securityClient(client()).prepareChangePassword("elastic", "password".toCharArray()).get(); + + assertTribeNodeHasAllIndices(); + ClusterHealthResponse response = tribeNode.client().filterWithHeader(Collections.singletonMap("Authorization", + UsernamePasswordToken.basicAuthHeaderValue("elastic", new SecuredString("password".toCharArray())))) + .admin().cluster().prepareHealth().get(); + assertNoTimeout(response); + } + + public void testThatTribeClustersHaveDifferentPasswords() throws Exception { + securityClient().prepareChangePassword("elastic", "password".toCharArray()).get(); + securityClient(cluster2.client()).prepareChangePassword("elastic", "password2".toCharArray()).get(); + + assertTribeNodeHasAllIndices(); + ClusterHealthResponse response = tribeNode.client().filterWithHeader(Collections.singletonMap("Authorization", + UsernamePasswordToken.basicAuthHeaderValue("elastic", new SecuredString("password".toCharArray())))) + .admin().cluster().prepareHealth().get(); + assertNoTimeout(response); + } + + public void testUserModificationUsingTribeNodeAreDisabled() throws Exception { + SecurityClient securityClient = securityClient(tribeNode.client()); + NotSerializableExceptionWrapper e = expectThrows(NotSerializableExceptionWrapper.class, + () -> securityClient.preparePutUser("joe", "password".toCharArray()).get()); + assertThat(e.getMessage(), containsString("users may not be created or modified using a tribe node")); + e = expectThrows(NotSerializableExceptionWrapper.class, () -> securityClient.prepareSetEnabled("elastic", randomBoolean()).get()); + assertThat(e.getMessage(), containsString("users may not be created or modified using a tribe node")); + e = expectThrows(NotSerializableExceptionWrapper.class, + () -> securityClient.prepareChangePassword("elastic", "password".toCharArray()).get()); + assertThat(e.getMessage(), containsString("users may not be created or modified using a tribe node")); + e = expectThrows(NotSerializableExceptionWrapper.class, () -> securityClient.prepareDeleteUser("joe").get()); + assertThat(e.getMessage(), containsString("users may not be deleted using a tribe node")); + } + + // note tribe node has tribe.on_conflict set to prefer cluster_1 + public void testRetrieveRolesOnPreferredClusterOnly() throws Exception { + final int randomRoles = scaledRandomIntBetween(3, 8); + List shouldBeSuccessfulRoles = new ArrayList<>(); + + for (int i = 0; i < randomRoles; i++) { + final String rolename = "preferredClusterRole" + i; + PutRoleResponse response = securityClient(client()).preparePutRole(rolename).cluster("monitor").get(); + assertTrue(response.isCreated()); + shouldBeSuccessfulRoles.add(rolename); + } + + assertTribeNodeHasAllIndices(); + SecurityClient securityClient = securityClient(tribeNode.client()); + for (String rolename : shouldBeSuccessfulRoles) { + GetRolesResponse response = securityClient.prepareGetRoles(rolename).get(); + assertTrue(response.hasRoles()); + assertEquals(1, response.roles().length); + assertThat(response.roles()[0].getClusterPrivileges(), arrayContaining("monitor")); + } + } + + private void assertTribeNodeHasAllIndices() throws Exception { + assertBusy(() -> { + Set indices = new HashSet<>(); + client().admin().cluster().prepareState().setMetaData(true).get() + .getState().getMetaData().getIndices().keysIt().forEachRemaining(indices::add); + cluster2.client().admin().cluster().prepareState().setMetaData(true).get() + .getState().getMetaData().getIndices().keysIt().forEachRemaining(indices::add); + + ClusterState state = tribeNode.client().admin().cluster().prepareState().setRoutingTable(true) + .setMetaData(true).get().getState(); + StringBuilder sb = new StringBuilder(); + for (String index : indices) { + if (sb.length() == 0) { + sb.append("["); + sb.append(index); + } else { + sb.append(","); + sb.append(index); + } + } + sb.append("]"); + Set tribeIndices = new HashSet<>(); + for (ObjectCursor cursor : state.getMetaData().getIndices().values()) { + tribeIndices.add(cursor.value.getIndex().getName()); + } + + assertThat("cluster indices [" + indices + "] tribe indices [" + tribeIndices + "]", + state.getMetaData().getIndices().size(), equalTo(indices.size())); + for (String index : indices) { + assertTrue(state.getMetaData().hasIndex(index)); + assertTrue(state.getRoutingTable().hasIndex(index)); + assertTrue(state.getRoutingTable().index(index).allPrimaryShardsActive()); + } + }); + } +} \ No newline at end of file