security: native users and roles can be used on tribe nodes

This change allows native users and roles to be used on tribe nodes. The tribe node will actually
use the security index of one of the tribes, which must be specified with the `tribe.on_conflict`
setting. User and role modifications are not permitted when running on a tribe node.

Closes elastic/elasticsearch#3451

Original commit: elastic/x-pack-elasticsearch@2b762ca648
This commit is contained in:
Jay Modi 2016-10-03 10:12:30 -04:00 committed by GitHub
parent c7b7a9f201
commit 52b7170121
5 changed files with 435 additions and 62 deletions

View File

@ -41,6 +41,7 @@ import org.elasticsearch.transport.Transport;
import org.elasticsearch.transport.TransportInterceptor; import org.elasticsearch.transport.TransportInterceptor;
import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.tribe.TribeService;
import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.watcher.ResourceWatcherService;
import org.elasticsearch.xpack.XPackPlugin; import org.elasticsearch.xpack.XPackPlugin;
import org.elasticsearch.xpack.XPackSettings; import org.elasticsearch.xpack.XPackSettings;
@ -551,19 +552,6 @@ public class Security implements ActionPlugin, IngestPlugin, NetworkPlugin {
for (Map.Entry<String, Settings> tribeSettings : tribesSettings.entrySet()) { for (Map.Entry<String, Settings> tribeSettings : tribesSettings.entrySet()) {
String tribePrefix = "tribe." + tribeSettings.getKey() + "."; String tribePrefix = "tribe." + tribeSettings.getKey() + ".";
// we copy over existing mandatory plugins under additional settings, as they would get overridden
// otherwise (arrays don't get merged)
String[] existingMandatoryPlugins = tribeSettings.getValue().getAsArray("plugin.mandatory", null);
if (existingMandatoryPlugins == null) {
//x-pack is mandatory on every tribe if installed and enabled on the tribe node
settingsBuilder.putArray(tribePrefix + "plugin.mandatory", XPackPlugin.NAME);
} else {
if (Arrays.binarySearch(existingMandatoryPlugins, XPackPlugin.NAME) < 0) {
throw new IllegalStateException("when [plugin.mandatory] is explicitly configured, [" +
XPackPlugin.NAME + "] must be included in this list");
}
}
final String tribeEnabledSetting = tribePrefix + XPackSettings.SECURITY_ENABLED.getKey(); final String tribeEnabledSetting = tribePrefix + XPackSettings.SECURITY_ENABLED.getKey();
if (settings.get(tribeEnabledSetting) != null) { if (settings.get(tribeEnabledSetting) != null) {
boolean enabled = XPackSettings.SECURITY_ENABLED.get(tribeSettings.getValue()); boolean enabled = XPackSettings.SECURITY_ENABLED.get(tribeSettings.getValue());
@ -584,6 +572,19 @@ public class Security implements ActionPlugin, IngestPlugin, NetworkPlugin {
} }
} }
} }
Map<String, Settings> realmsSettings = settings.getGroups(setting("authc.realms"), true);
final boolean hasNativeRealm = XPackSettings.RESERVED_REALM_ENABLED_SETTING.get(settings) ||
realmsSettings.isEmpty() ||
realmsSettings.entrySet().stream()
.anyMatch((e) -> NativeRealm.TYPE.equals(e.getValue().get("type")) && e.getValue().getAsBoolean("enabled", true));
if (hasNativeRealm) {
if (TribeService.ON_CONFLICT_SETTING.get(settings).startsWith("prefer_") == false) {
throw new IllegalArgumentException("use of security on tribe nodes requires setting [tribe.on_conflict] to specify the " +
"name of the tribe to prefer such as [prefer_t1] as the security index can exist in multiple tribes but only one" +
" can be used by the tribe node");
}
}
} }
public static String settingPrefix() { public static String settingPrefix() {

View File

@ -102,6 +102,7 @@ public class NativeUsersStore extends AbstractComponent implements ClusterStateL
private final Hasher hasher = Hasher.BCRYPT; private final Hasher hasher = Hasher.BCRYPT;
private final AtomicReference<State> state = new AtomicReference<>(State.INITIALIZED); private final AtomicReference<State> state = new AtomicReference<>(State.INITIALIZED);
private final InternalClient client; private final InternalClient client;
private final boolean isTribeNode;
private int scrollSize; private int scrollSize;
private TimeValue scrollKeepAlive; private TimeValue scrollKeepAlive;
@ -110,6 +111,7 @@ public class NativeUsersStore extends AbstractComponent implements ClusterStateL
public NativeUsersStore(Settings settings, InternalClient client) { public NativeUsersStore(Settings settings, InternalClient client) {
super(settings); super(settings);
this.client = client; this.client = client;
this.isTribeNode = settings.getGroups("tribe", true).isEmpty() == false;
} }
/** /**
@ -305,6 +307,10 @@ public class NativeUsersStore extends AbstractComponent implements ClusterStateL
public void changePassword(final ChangePasswordRequest request, final ActionListener<Void> listener) { public void changePassword(final ChangePasswordRequest request, final ActionListener<Void> listener) {
final String username = request.username(); final String username = request.username();
assert SystemUser.NAME.equals(username) == false && XPackUser.NAME.equals(username) == false : username + "is internal!"; assert SystemUser.NAME.equals(username) == false && XPackUser.NAME.equals(username) == false : username + "is internal!";
if (isTribeNode) {
listener.onFailure(new UnsupportedOperationException("users may not be created or modified using a tribe node"));
return;
}
final String docType; final String docType;
if (ReservedRealm.isReserved(username, settings)) { if (ReservedRealm.isReserved(username, settings)) {
@ -373,6 +379,9 @@ public class NativeUsersStore extends AbstractComponent implements ClusterStateL
if (state() != State.STARTED) { if (state() != State.STARTED) {
listener.onFailure(new IllegalStateException("user cannot be added as native user service has not been started")); listener.onFailure(new IllegalStateException("user cannot be added as native user service has not been started"));
return; return;
} else if (isTribeNode) {
listener.onFailure(new UnsupportedOperationException("users may not be created or modified using a tribe node"));
return;
} }
try { try {
@ -459,6 +468,9 @@ public class NativeUsersStore extends AbstractComponent implements ClusterStateL
if (state() != State.STARTED) { if (state() != State.STARTED) {
listener.onFailure(new IllegalStateException("enabled status cannot be changed as native user service has not been started")); listener.onFailure(new IllegalStateException("enabled status cannot be changed as native user service has not been started"));
return; return;
} else if (isTribeNode) {
listener.onFailure(new UnsupportedOperationException("users may not be created or modified using a tribe node"));
return;
} }
if (ReservedRealm.isReserved(username, settings)) { if (ReservedRealm.isReserved(username, settings)) {
@ -530,6 +542,9 @@ public class NativeUsersStore extends AbstractComponent implements ClusterStateL
if (state() != State.STARTED) { if (state() != State.STARTED) {
listener.onFailure(new IllegalStateException("user cannot be deleted as native user service has not been started")); listener.onFailure(new IllegalStateException("user cannot be deleted as native user service has not been started"));
return; return;
} else if (isTribeNode) {
listener.onFailure(new UnsupportedOperationException("users may not be deleted using a tribe node"));
return;
} }
try { try {
@ -568,6 +583,10 @@ public class NativeUsersStore extends AbstractComponent implements ClusterStateL
return false; return false;
} }
if (isTribeNode) {
return true;
}
if (securityIndexMappingAndTemplateUpToDate(clusterState, logger) == false) { if (securityIndexMappingAndTemplateUpToDate(clusterState, logger) == false) {
return false; return false;
} }

View File

@ -28,6 +28,7 @@ import org.elasticsearch.action.search.SearchScrollRequest;
import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.ClusterStateListener;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.cache.Cache; import org.elasticsearch.common.cache.Cache;
@ -108,6 +109,7 @@ public class NativeRolesStore extends AbstractComponent implements RolesStore, C
private final InternalClient client; private final InternalClient client;
private final AtomicReference<State> state = new AtomicReference<>(State.INITIALIZED); private final AtomicReference<State> state = new AtomicReference<>(State.INITIALIZED);
private final boolean isTribeNode;
private final Cache<String, RoleAndVersion> roleCache; private final Cache<String, RoleAndVersion> roleCache;
// the lock is used in an odd manner; when iterating over the cache we cannot have modifiers other than deletes using // the lock is used in an odd manner; when iterating over the cache we cannot have modifiers other than deletes using
// the iterator but when not iterating we can modify the cache without external locking. When making normal modifications to the cache // the iterator but when not iterating we can modify the cache without external locking. When making normal modifications to the cache
@ -135,6 +137,7 @@ public class NativeRolesStore extends AbstractComponent implements RolesStore, C
.setMaximumWeight(CACHE_SIZE_SETTING.get(settings)) .setMaximumWeight(CACHE_SIZE_SETTING.get(settings))
.setExpireAfterWrite(CACHE_TTL_SETTING.get(settings).getMillis()) .setExpireAfterWrite(CACHE_TTL_SETTING.get(settings).getMillis())
.build(); .build();
this.isTribeNode = settings.getGroups("tribe", true).isEmpty() == false;
} }
public boolean canStart(ClusterState clusterState, boolean master) { public boolean canStart(ClusterState clusterState, boolean master) {
@ -149,7 +152,28 @@ public class NativeRolesStore extends AbstractComponent implements RolesStore, C
logger.debug("native roles store waiting until gateway has recovered from disk"); logger.debug("native roles store waiting until gateway has recovered from disk");
return false; return false;
} }
return securityIndexMappingAndTemplateUpToDate(clusterState, logger);
if (isTribeNode) {
return true;
}
if (securityIndexMappingAndTemplateUpToDate(clusterState, logger) == false) {
return false;
}
IndexMetaData metaData = clusterState.metaData().index(SecurityTemplateService.SECURITY_INDEX_NAME);
if (metaData == null) {
logger.debug("security index [{}] does not exist, so service can start", SecurityTemplateService.SECURITY_INDEX_NAME);
return true;
}
if (clusterState.routingTable().index(SecurityTemplateService.SECURITY_INDEX_NAME).allPrimaryShardsActive()) {
logger.debug("security index [{}] all primary shards started, so service can start",
SecurityTemplateService.SECURITY_INDEX_NAME);
securityIndexExists = true;
return true;
}
return false;
} }
public void start() { public void start() {
@ -261,7 +285,11 @@ public class NativeRolesStore extends AbstractComponent implements RolesStore, C
if (state() != State.STARTED) { if (state() != State.STARTED) {
logger.trace("attempted to delete role [{}] before service was started", deleteRoleRequest.name()); logger.trace("attempted to delete role [{}] before service was started", deleteRoleRequest.name());
listener.onResponse(false); listener.onResponse(false);
} else if (isTribeNode) {
listener.onFailure(new UnsupportedOperationException("roles may not be deleted using a tribe node"));
return;
} }
try { try {
DeleteRequest request = client.prepareDelete(SecurityTemplateService.SECURITY_INDEX_NAME, DeleteRequest request = client.prepareDelete(SecurityTemplateService.SECURITY_INDEX_NAME,
ROLE_DOC_TYPE, deleteRoleRequest.name()).request(); ROLE_DOC_TYPE, deleteRoleRequest.name()).request();
@ -291,7 +319,11 @@ public class NativeRolesStore extends AbstractComponent implements RolesStore, C
if (state() != State.STARTED) { if (state() != State.STARTED) {
logger.trace("attempted to put role [{}] before service was started", request.name()); logger.trace("attempted to put role [{}] before service was started", request.name());
listener.onResponse(false); listener.onResponse(false);
} else if (isTribeNode) {
listener.onFailure(new UnsupportedOperationException("roles may not be created or modified using a tribe node"));
return;
} }
try { try {
client.prepareIndex(SecurityTemplateService.SECURITY_INDEX_NAME, ROLE_DOC_TYPE, role.getName()) client.prepareIndex(SecurityTemplateService.SECURITY_INDEX_NAME, ROLE_DOC_TYPE, role.getName())
.setSource(role.toXContent(jsonBuilder(), ToXContent.EMPTY_PARAMS)) .setSource(role.toXContent(jsonBuilder(), ToXContent.EMPTY_PARAMS))

View File

@ -5,8 +5,6 @@
*/ */
package org.elasticsearch.xpack.security; package org.elasticsearch.xpack.security;
import java.io.IOException;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
@ -14,11 +12,10 @@ import org.elasticsearch.xpack.XPackPlugin;
import org.elasticsearch.xpack.XPackSettings; import org.elasticsearch.xpack.XPackSettings;
import org.elasticsearch.xpack.security.audit.index.IndexAuditTrail; import org.elasticsearch.xpack.security.audit.index.IndexAuditTrail;
import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.Matchers.is;
import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.Matchers.nullValue;
import static org.hamcrest.CoreMatchers.nullValue;
import static org.hamcrest.Matchers.arrayContaining; import static org.hamcrest.Matchers.arrayContaining;
import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.not;
@ -27,48 +24,12 @@ public class SecuritySettingsTests extends ESTestCase {
private static final String TRIBE_T1_SECURITY_ENABLED = "tribe.t1." + XPackSettings.SECURITY_ENABLED.getKey(); private static final String TRIBE_T1_SECURITY_ENABLED = "tribe.t1." + XPackSettings.SECURITY_ENABLED.getKey();
private static final String TRIBE_T2_SECURITY_ENABLED = "tribe.t2." + XPackSettings.SECURITY_ENABLED.getKey(); private static final String TRIBE_T2_SECURITY_ENABLED = "tribe.t2." + XPackSettings.SECURITY_ENABLED.getKey();
public void testSecurityIsMandatoryOnTribes() throws IOException {
Settings settings = Settings.builder().put("tribe.t1.cluster.name", "non_existing")
.put("tribe.t2.cluster.name", "non_existing").build();
Settings additionalSettings = Security.additionalSettings(settings, false);
assertThat(additionalSettings.getAsArray("tribe.t1.plugin.mandatory", null), arrayContaining(XPackPlugin.NAME));
assertThat(additionalSettings.getAsArray("tribe.t2.plugin.mandatory", null), arrayContaining(XPackPlugin.NAME));
}
public void testAdditionalMandatoryPluginsOnTribes() {
Settings settings = Settings.builder().put("tribe.t1.cluster.name", "non_existing")
.putArray("tribe.t1.plugin.mandatory", "test_plugin").build();
//simulate what PluginsService#updatedSettings does to make sure we don't override existing mandatory plugins
try {
Settings.builder().put(settings).put(Security.additionalSettings(settings, false)).build();
fail("security cannot change the value of a setting that is already defined, so a exception should be thrown");
} catch (IllegalStateException e) {
assertThat(e.getMessage(), containsString(XPackPlugin.NAME));
assertThat(e.getMessage(), containsString("plugin.mandatory"));
}
}
public void testMandatoryPluginsOnTribesSecurityAlreadyMandatory() {
Settings settings = Settings.builder().put("tribe.t1.cluster.name", "non_existing")
.putArray("tribe.t1.plugin.mandatory", "test_plugin", XPackPlugin.NAME).build();
//simulate what PluginsService#updatedSettings does to make sure we don't override existing mandatory plugins
Settings finalSettings = Settings.builder().put(settings).put(Security.additionalSettings(settings, false)).build();
String[] finalMandatoryPlugins = finalSettings.getAsArray("tribe.t1.plugin.mandatory", null);
assertThat(finalMandatoryPlugins, notNullValue());
assertThat(finalMandatoryPlugins.length, equalTo(2));
assertThat(finalMandatoryPlugins[0], equalTo("test_plugin"));
assertThat(finalMandatoryPlugins[1], equalTo(XPackPlugin.NAME));
}
public void testSecurityIsEnabledByDefaultOnTribes() { public void testSecurityIsEnabledByDefaultOnTribes() {
Settings settings = Settings.builder().put("tribe.t1.cluster.name", "non_existing") Settings settings = Settings.builder()
.put("tribe.t2.cluster.name", "non_existing").build(); .put("tribe.t1.cluster.name", "non_existing")
.put("tribe.t2.cluster.name", "non_existing2")
.put("tribe.on_conflict", "prefer_t1")
.build();
Settings additionalSettings = Security.additionalSettings(settings, false); Settings additionalSettings = Security.additionalSettings(settings, false);
@ -107,6 +68,7 @@ public class SecuritySettingsTests extends ESTestCase {
Settings settings = Settings.builder() Settings settings = Settings.builder()
.put("tribe.t1.cluster.name", "non_existing") .put("tribe.t1.cluster.name", "non_existing")
.put("tribe.t2.cluster.name", "non_existing") .put("tribe.t2.cluster.name", "non_existing")
.put("tribe.on_conflict", "prefer_" + randomFrom("t1", "t2"))
.put("xpack.security.foo", "bar") .put("xpack.security.foo", "bar")
.put("xpack.security.bar", "foo") .put("xpack.security.bar", "foo")
.putArray("xpack.security.something.else.here", new String[] { "foo", "bar" }) .putArray("xpack.security.something.else.here", new String[] { "foo", "bar" })
@ -123,6 +85,56 @@ public class SecuritySettingsTests extends ESTestCase {
assertThat(additionalSettings.get("tribe.t2.xpack.security.foo"), is("bar")); assertThat(additionalSettings.get("tribe.t2.xpack.security.foo"), is("bar"));
assertThat(additionalSettings.get("tribe.t2.xpack.security.bar"), is("foo")); assertThat(additionalSettings.get("tribe.t2.xpack.security.bar"), is("foo"));
assertThat(additionalSettings.getAsArray("tribe.t2.xpack.security.something.else.here"), arrayContaining("foo", "bar")); assertThat(additionalSettings.getAsArray("tribe.t2.xpack.security.something.else.here"), arrayContaining("foo", "bar"));
assertThat(additionalSettings.get("tribe.on_conflict"), nullValue());
assertThat(additionalSettings.get("tribe.t1.on_conflict"), nullValue());
assertThat(additionalSettings.get("tribe.t2.on_conflict"), nullValue());
}
public void testOnConflictMustBeSetOnTribe() {
final Settings settings = Settings.builder()
.put("tribe.t1.cluster.name", "non_existing")
.put("tribe.t2.cluster.name", "non_existing2")
.build();
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> Security.additionalSettings(settings, false));
assertThat(e.getMessage(), containsString("tribe.on_conflict"));
final Settings badOnConflict = Settings.builder().put(settings).put("tribe.on_conflict", randomFrom("any", "drop")).build();
e = expectThrows(IllegalArgumentException.class, () -> Security.additionalSettings(badOnConflict, false));
assertThat(e.getMessage(), containsString("tribe.on_conflict"));
Settings goodOnConflict = Settings.builder().put(settings).put("tribe.on_conflict", "prefer_" + randomFrom("t1", "t2")).build();
Settings additionalSettings = Security.additionalSettings(goodOnConflict, false);
assertNotNull(additionalSettings);
}
public void testOnConflictWithNoNativeRealms() {
final Settings noNative = Settings.builder()
.put("tribe.t1.cluster.name", "non_existing")
.put("tribe.t2.cluster.name", "non_existing2")
.put(XPackSettings.RESERVED_REALM_ENABLED_SETTING.getKey(), false)
.put("xpack.security.authc.realms.foo.type", randomFrom("ldap", "pki", randomAsciiOfLengthBetween(1, 6)))
.build();
Settings additionalSettings = Security.additionalSettings(noNative, false);
assertNotNull(additionalSettings);
// still with the reserved realm
final Settings withReserved = Settings.builder()
.put("tribe.t1.cluster.name", "non_existing")
.put("tribe.t2.cluster.name", "non_existing2")
.put("xpack.security.authc.realms.foo.type", randomFrom("ldap", "pki", randomAsciiOfLengthBetween(1, 6)))
.build();
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> Security.additionalSettings(withReserved, false));
assertThat(e.getMessage(), containsString("tribe.on_conflict"));
// reserved disabled but no realms defined
final Settings reservedDisabled = Settings.builder()
.put("tribe.t1.cluster.name", "non_existing")
.put("tribe.t2.cluster.name", "non_existing2")
.put(XPackSettings.RESERVED_REALM_ENABLED_SETTING.getKey(), false)
.build();
e = expectThrows(IllegalArgumentException.class, () -> Security.additionalSettings(reservedDisabled, false));
assertThat(e.getMessage(), containsString("tribe.on_conflict"));
} }
public void testValidAutoCreateIndex() { public void testValidAutoCreateIndex() {

View File

@ -0,0 +1,309 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.security;
import org.elasticsearch.ElasticsearchSecurityException;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.node.MockNode;
import org.elasticsearch.node.Node;
import org.elasticsearch.node.NodeValidationException;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.InternalTestCluster;
import org.elasticsearch.test.NativeRealmIntegTestCase;
import org.elasticsearch.test.SecuritySettingsSource;
import org.elasticsearch.xpack.security.action.role.GetRolesResponse;
import org.elasticsearch.xpack.security.action.role.PutRoleResponse;
import org.elasticsearch.xpack.security.action.user.PutUserResponse;
import org.elasticsearch.xpack.security.authc.support.SecuredString;
import org.elasticsearch.xpack.security.authc.support.UsernamePasswordToken;
import org.elasticsearch.xpack.security.client.SecurityClient;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout;
import static org.hamcrest.Matchers.arrayContaining;
import static org.hamcrest.Matchers.containsString;
/**
* Tests security with tribe nodes
*/
public class SecurityTribeIT extends NativeRealmIntegTestCase {
private static final String SECOND_CLUSTER_NODE_PREFIX = "node_cluster2_";
private static InternalTestCluster cluster2;
private static boolean useSSL;
private Node tribeNode;
private Client tribeClient;
@BeforeClass
public static void setupSSL() {
useSSL = randomBoolean();
}
@Override
public void setUp() throws Exception {
super.setUp();
if (cluster2 == null) {
SecuritySettingsSource cluster2SettingsSource =
new SecuritySettingsSource(defaultMaxNumberOfNodes(), useSSL, systemKey(), createTempDir(), Scope.SUITE);
cluster2 = new InternalTestCluster(randomLong(), createTempDir(), true, 1, 2,
UUIDs.randomBase64UUID(random()), cluster2SettingsSource, 0, false, SECOND_CLUSTER_NODE_PREFIX, getMockPlugins(),
getClientWrapper());
cluster2.beforeTest(random(), 0.1);
cluster2.ensureAtLeastNumDataNodes(2);
}
}
@AfterClass
public static void tearDownSecondCluster() {
if (cluster2 != null) {
try {
cluster2.close();
} finally {
cluster2 = null;
}
}
}
/**
* We intentionally do not override {@link ESIntegTestCase#tearDown()} as doing so causes the ensure cluster size check to timeout
*/
@After
public void tearDownTribeNodeAndWipeCluster() throws Exception {
if (cluster2 != null) {
try {
cluster2.wipe(Collections.<String>emptySet());
try {
// this is a hack to clean up the .security index since only the XPack user or superusers can delete it
cluster2.getInstance(InternalClient.class)
.admin().indices().prepareDelete(SecurityTemplateService.SECURITY_INDEX_NAME).get();
} catch (IndexNotFoundException e) {
// ignore it since not all tests create this index...
}
// Clear the realm cache for all realms since we use a SUITE scoped cluster
SecurityClient client = securityClient(cluster2.client());
client.prepareClearRealmCache().get();
} finally {
cluster2.afterTest();
}
}
if (tribeNode != null) {
tribeNode.close();
tribeNode = null;
}
}
@Override
public boolean sslTransportEnabled() {
return useSSL;
}
@Override
protected boolean ignoreExternalCluster() {
return true;
}
private void setupTribeNode(Settings settings) throws NodeValidationException {
SecuritySettingsSource cluster2SettingsSource = new SecuritySettingsSource(1, useSSL, systemKey(), createTempDir(), Scope.TEST);
Map<String,String> asMap = new HashMap<>(cluster2SettingsSource.nodeSettings(0).getAsMap());
asMap.remove(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey());
Settings.Builder tribe1Defaults = Settings.builder();
Settings.Builder tribe2Defaults = Settings.builder();
for (Map.Entry<String, String> entry : asMap.entrySet()) {
if (entry.getKey().startsWith("path.")) {
continue;
} else if (entry.getKey().equals("transport.tcp.port")) {
continue;
}
tribe1Defaults.put("tribe.t1." + entry.getKey(), entry.getValue());
tribe2Defaults.put("tribe.t2." + entry.getKey(), entry.getValue());
}
// give each tribe it's unicast hosts to connect to
tribe1Defaults.putArray("tribe.t1." + UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.getKey(),
getUnicastHosts(internalCluster().client()));
tribe2Defaults.putArray("tribe.t2." + UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.getKey(),
getUnicastHosts(cluster2.client()));
Settings merged = Settings.builder()
.put(internalCluster().getDefaultSettings())
.put(asMap)
.put("tribe.t1.cluster.name", internalCluster().getClusterName())
.put("tribe.t2.cluster.name", cluster2.getClusterName())
.put("tribe.blocks.write", false)
.put("tribe.on_conflict", "prefer_t1")
.put(tribe1Defaults.build())
.put(tribe2Defaults.build())
.put(settings)
.put("node.name", "tribe_node") // make sure we can identify threads from this node
.build();
tribeNode = new MockNode(merged, nodePlugins()).start();
tribeClient = tribeNode.client();
}
private String[] getUnicastHosts(Client client) {
ArrayList<String> unicastHosts = new ArrayList<>();
NodesInfoResponse nodeInfos = client.admin().cluster().prepareNodesInfo().clear().setTransport(true).get();
for (NodeInfo info : nodeInfos.getNodes()) {
TransportAddress address = info.getTransport().getAddress().publishAddress();
unicastHosts.add(address.getAddress() + ":" + address.getPort());
}
return unicastHosts.toArray(new String[unicastHosts.size()]);
}
public void testThatTribeCanAuthenticateElasticUser() throws Exception {
setupTribeNode(Settings.EMPTY);
ClusterHealthResponse response = tribeClient.filterWithHeader(Collections.singletonMap("Authorization",
UsernamePasswordToken.basicAuthHeaderValue("elastic", new SecuredString("changeme".toCharArray()))))
.admin().cluster().prepareHealth().get();
assertNoTimeout(response);
}
public void testThatTribeCanAuthenticateElasticUserWithChangedPassword() throws Exception {
setupTribeNode(Settings.EMPTY);
Client clusterClient = randomBoolean() ? client() : cluster2.client();
securityClient(clusterClient).prepareChangePassword("elastic", "password".toCharArray()).get();
ClusterHealthResponse response = tribeClient.filterWithHeader(Collections.singletonMap("Authorization",
UsernamePasswordToken.basicAuthHeaderValue("elastic", new SecuredString("password".toCharArray()))))
.admin().cluster().prepareHealth().get();
assertNoTimeout(response);
}
public void testThatTribeClustersHaveDifferentPasswords() throws Exception {
setupTribeNode(Settings.EMPTY);
securityClient().prepareChangePassword("elastic", "password".toCharArray()).get();
securityClient(cluster2.client()).prepareChangePassword("elastic", "password2".toCharArray()).get();
ClusterHealthResponse response = tribeClient.filterWithHeader(Collections.singletonMap("Authorization",
UsernamePasswordToken.basicAuthHeaderValue("elastic", new SecuredString("password".toCharArray()))))
.admin().cluster().prepareHealth().get();
assertNoTimeout(response);
}
public void testUsersInBothTribes() throws Exception {
final String preferredTribe = randomBoolean() ? "t1" : "t2";
setupTribeNode(Settings.builder().put("tribe.on_conflict", "prefer_" + preferredTribe).build());
final int randomUsers = scaledRandomIntBetween(3, 8);
final Client cluster1Client = client();
final Client cluster2Client = cluster2.client();
List<String> shouldBeSuccessfulUsers = new ArrayList<>();
List<String> shouldFailUsers = new ArrayList<>();
final Client preferredClient = "t1".equals(preferredTribe) ? cluster1Client : cluster2Client;
for (int i = 0; i < randomUsers; i++) {
final String username = "user" + i;
Client clusterClient = randomBoolean() ? cluster1Client : cluster2Client;
PutUserResponse response =
securityClient(clusterClient).preparePutUser(username, "password".toCharArray(), "superuser").get();
assertTrue(response.created());
// if it was the first client, we should expect authentication to succeed
if (preferredClient == clusterClient) {
shouldBeSuccessfulUsers.add(username);
} else {
shouldFailUsers.add(username);
}
}
for (String username : shouldBeSuccessfulUsers) {
ClusterHealthResponse response = tribeClient.filterWithHeader(Collections.singletonMap("Authorization",
UsernamePasswordToken.basicAuthHeaderValue(username, new SecuredString("password".toCharArray()))))
.admin().cluster().prepareHealth().get();
assertNoTimeout(response);
}
for (String username : shouldFailUsers) {
ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, () ->
tribeClient.filterWithHeader(Collections.singletonMap("Authorization",
UsernamePasswordToken.basicAuthHeaderValue(username, new SecuredString("password".toCharArray()))))
.admin().cluster().prepareHealth().get());
assertThat(e.getMessage(), containsString("authenticate"));
}
}
public void testUserModificationUsingTribeNodeAreDisabled() throws Exception {
setupTribeNode(Settings.EMPTY);
SecurityClient securityClient = securityClient(getClientWrapper().apply(tribeClient));
UnsupportedOperationException e = expectThrows(UnsupportedOperationException.class,
() -> securityClient.preparePutUser("joe", "password".toCharArray()).get());
assertThat(e.getMessage(), containsString("users may not be created or modified using a tribe node"));
e = expectThrows(UnsupportedOperationException.class, () -> securityClient.prepareSetEnabled("elastic", randomBoolean()).get());
assertThat(e.getMessage(), containsString("users may not be created or modified using a tribe node"));
e = expectThrows(UnsupportedOperationException.class,
() -> securityClient.prepareChangePassword("elastic", "password".toCharArray()).get());
assertThat(e.getMessage(), containsString("users may not be created or modified using a tribe node"));
e = expectThrows(UnsupportedOperationException.class, () -> securityClient.prepareDeleteUser("joe").get());
assertThat(e.getMessage(), containsString("users may not be deleted using a tribe node"));
}
public void testRetrieveRolesOnTribeNode() throws Exception {
final String preferredTribe = randomBoolean() ? "t1" : "t2";
setupTribeNode(Settings.builder().put("tribe.on_conflict", "prefer_" + preferredTribe).build());
final int randomRoles = scaledRandomIntBetween(3, 8);
final Client cluster1Client = client();
final Client cluster2Client = cluster2.client();
List<String> shouldBeSuccessfulRoles = new ArrayList<>();
List<String> shouldFailRoles = new ArrayList<>();
final Client preferredClient = "t1".equals(preferredTribe) ? cluster1Client : cluster2Client;
for (int i = 0; i < randomRoles; i++) {
final String rolename = "role" + i;
Client clusterClient = randomBoolean() ? cluster1Client : cluster2Client;
PutRoleResponse response = securityClient(clusterClient).preparePutRole(rolename).cluster("monitor").get();
assertTrue(response.isCreated());
// if it was the first client, we should expect authentication to succeed
if (preferredClient == clusterClient) {
shouldBeSuccessfulRoles.add(rolename);
} else {
shouldFailRoles.add(rolename);
}
}
SecurityClient securityClient = securityClient(getClientWrapper().apply(tribeClient));
for (String rolename : shouldBeSuccessfulRoles) {
GetRolesResponse response = securityClient.prepareGetRoles(rolename).get();
assertTrue(response.hasRoles());
assertEquals(1, response.roles().length);
assertThat(response.roles()[0].getClusterPrivileges(), arrayContaining("monitor"));
}
for (String rolename : shouldFailRoles) {
GetRolesResponse response = securityClient.prepareGetRoles(rolename).get();
assertFalse(response.hasRoles());
}
}
public void testRoleModificationUsingTribeNodeAreDisabled() throws Exception {
setupTribeNode(Settings.EMPTY);
SecurityClient securityClient = securityClient(getClientWrapper().apply(tribeClient));
UnsupportedOperationException e = expectThrows(UnsupportedOperationException.class,
() -> securityClient.preparePutRole("role").cluster("all").get());
assertThat(e.getMessage(), containsString("roles may not be created or modified using a tribe node"));
e = expectThrows(UnsupportedOperationException.class, () -> securityClient.prepareDeleteRole("role").get());
assertThat(e.getMessage(), containsString("roles may not be deleted using a tribe node"));
}
}