Add socket permissions for tribe nodes
Today when a node starts, we create dynamic socket permissions based on the configured HTTP ports and transport ports. If no ports are configured, we use the default port ranges. When a tribe node starts, a tribe node creates an internal node client for connecting to each remote cluster. If neither an explicit HTTP port nor transport ports were specified, the default port ranges are large enough for the tribe node and its internal node clients. If an explicit HTTP port or transport port was specified for the tribe node, then socket permissions for those ports will be created, but not for the internal node clients. Whether the internal node clients have explicit ports specified, or attempt to bind within the default range, socket permissions for these will not have been created and the internal node clients will hit a permissions issue when attempting to bind. This commit addresses this issue by also accounting for tribe nodes when creating the dynamic socket permissions. Additionally, we add our first real integration test for tribe nodes. Relates #21546
This commit is contained in:
commit
48579cccab
|
@ -123,7 +123,7 @@ class ClusterConfiguration {
|
|||
|
||||
Map<String, String> systemProperties = new HashMap<>()
|
||||
|
||||
Map<String, String> settings = new HashMap<>()
|
||||
Map<String, Object> settings = new HashMap<>()
|
||||
|
||||
// map from destination path, to source file
|
||||
Map<String, Object> extraConfigFiles = new HashMap<>()
|
||||
|
@ -140,7 +140,7 @@ class ClusterConfiguration {
|
|||
}
|
||||
|
||||
@Input
|
||||
void setting(String name, String value) {
|
||||
void setting(String name, Object value) {
|
||||
settings.put(name, value)
|
||||
}
|
||||
|
||||
|
|
|
@ -20,10 +20,10 @@
|
|||
package org.elasticsearch.bootstrap;
|
||||
|
||||
import org.elasticsearch.SecureSM;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.io.PathUtils;
|
||||
import org.elasticsearch.common.network.NetworkModule;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.http.HttpTransportSettings;
|
||||
|
@ -266,12 +266,14 @@ final class Security {
|
|||
}
|
||||
}
|
||||
|
||||
static void addBindPermissions(Permissions policy, Settings settings) throws IOException {
|
||||
// http is simple
|
||||
String httpRange = HttpTransportSettings.SETTING_HTTP_PORT.get(settings).getPortRangeString();
|
||||
// listen is always called with 'localhost' but use wildcard to be sure, no name service is consulted.
|
||||
// see SocketPermission implies() code
|
||||
policy.add(new SocketPermission("*:" + httpRange, "listen,resolve"));
|
||||
/**
|
||||
* Add dynamic {@link SocketPermission}s based on HTTP and transport settings.
|
||||
*
|
||||
* @param policy the {@link Permissions} instance to apply the dynamic {@link SocketPermission}s to.
|
||||
* @param settings the {@link Settings} instance to read the HTTP and transport settings from
|
||||
*/
|
||||
static void addBindPermissions(Permissions policy, Settings settings) {
|
||||
addSocketPermissionForHttp(policy, settings);
|
||||
// transport is waaaay overengineered
|
||||
Map<String, Settings> profiles = TransportSettings.TRANSPORT_PROFILES_SETTING.get(settings).getAsGroups();
|
||||
if (!profiles.containsKey(TransportSettings.DEFAULT_PROFILE)) {
|
||||
|
@ -284,16 +286,76 @@ final class Security {
|
|||
for (Map.Entry<String, Settings> entry : profiles.entrySet()) {
|
||||
Settings profileSettings = entry.getValue();
|
||||
String name = entry.getKey();
|
||||
String transportRange = profileSettings.get("port", TransportSettings.PORT.get(settings));
|
||||
|
||||
// a profile is only valid if its the default profile, or if it has an actual name and specifies a port
|
||||
boolean valid = TransportSettings.DEFAULT_PROFILE.equals(name) || (Strings.hasLength(name) && profileSettings.get("port") != null);
|
||||
if (valid) {
|
||||
// listen is always called with 'localhost' but use wildcard to be sure, no name service is consulted.
|
||||
// see SocketPermission implies() code
|
||||
policy.add(new SocketPermission("*:" + transportRange, "listen,resolve"));
|
||||
addSocketPermissionForTransportProfile(policy, profileSettings, settings);
|
||||
}
|
||||
}
|
||||
|
||||
for (final Settings tribeNodeSettings : settings.getGroups("tribe", true).values()) {
|
||||
// tribe nodes have HTTP disabled by default, so we check if HTTP is enabled before granting
|
||||
if (NetworkModule.HTTP_ENABLED.exists(tribeNodeSettings) && NetworkModule.HTTP_ENABLED.get(tribeNodeSettings)) {
|
||||
addSocketPermissionForHttp(policy, tribeNodeSettings);
|
||||
}
|
||||
addSocketPermissionForTransport(policy, tribeNodeSettings);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Add dynamic {@link SocketPermission} based on HTTP settings.
|
||||
*
|
||||
* @param policy the {@link Permissions} instance to apply the dynamic {@link SocketPermission}s to.
|
||||
* @param settings the {@link Settings} instance to read the HTTP settingsfrom
|
||||
*/
|
||||
private static void addSocketPermissionForHttp(final Permissions policy, final Settings settings) {
|
||||
// http is simple
|
||||
final String httpRange = HttpTransportSettings.SETTING_HTTP_PORT.get(settings).getPortRangeString();
|
||||
addSocketPermissionForPortRange(policy, httpRange);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add dynamic {@link SocketPermission} based on transport settings. This method will first check if there is a port range specified in
|
||||
* the transport profile specified by {@code profileSettings} and will fall back to {@code settings}.
|
||||
*
|
||||
* @param policy the {@link Permissions} instance to apply the dynamic {@link SocketPermission}s to
|
||||
* @param profileSettings the {@link Settings} to read the transport profile from
|
||||
* @param settings the {@link Settings} instance to read the transport settings from
|
||||
*/
|
||||
private static void addSocketPermissionForTransportProfile(
|
||||
final Permissions policy,
|
||||
final Settings profileSettings,
|
||||
final Settings settings) {
|
||||
final String transportRange = profileSettings.get("port");
|
||||
if (transportRange != null) {
|
||||
addSocketPermissionForPortRange(policy, transportRange);
|
||||
} else {
|
||||
addSocketPermissionForTransport(policy, settings);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Add dynamic {@link SocketPermission} based on transport settings.
|
||||
*
|
||||
* @param policy the {@link Permissions} instance to apply the dynamic {@link SocketPermission}s to
|
||||
* @param settings the {@link Settings} instance to read the transport settings from
|
||||
*/
|
||||
private static void addSocketPermissionForTransport(final Permissions policy, final Settings settings) {
|
||||
final String transportRange = TransportSettings.PORT.get(settings);
|
||||
addSocketPermissionForPortRange(policy, transportRange);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add dynamic {@link SocketPermission} for the specified port range.
|
||||
*
|
||||
* @param policy the {@link Permissions} instance to apply the dynamic {@link SocketPermission} to.
|
||||
* @param portRange the port range
|
||||
*/
|
||||
private static void addSocketPermissionForPortRange(final Permissions policy, final String portRange) {
|
||||
// listen is always called with 'localhost' but use wildcard to be sure, no name service is consulted.
|
||||
// see SocketPermission implies() code
|
||||
policy.add(new SocketPermission("*:" + portRange, "listen,resolve"));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -0,0 +1,69 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
import org.elasticsearch.gradle.test.ClusterConfiguration
|
||||
import org.elasticsearch.gradle.test.ClusterFormationTasks
|
||||
import org.elasticsearch.gradle.test.NodeInfo
|
||||
|
||||
apply plugin: 'elasticsearch.rest-test'
|
||||
|
||||
List<NodeInfo> oneNodes
|
||||
|
||||
task setupClusterOne(type: DefaultTask) {
|
||||
mustRunAfter(precommit)
|
||||
ClusterConfiguration configOne = new ClusterConfiguration(project)
|
||||
configOne.clusterName = 'one'
|
||||
configOne.setting('node.name', 'one')
|
||||
oneNodes = ClusterFormationTasks.setup(project, setupClusterOne, configOne)
|
||||
}
|
||||
|
||||
List<NodeInfo> twoNodes
|
||||
|
||||
task setupClusterTwo(type: DefaultTask) {
|
||||
mustRunAfter(precommit)
|
||||
ClusterConfiguration configTwo = new ClusterConfiguration(project)
|
||||
configTwo.clusterName = 'two'
|
||||
configTwo.setting('node.name', 'two')
|
||||
twoNodes = ClusterFormationTasks.setup(project, setupClusterTwo, configTwo)
|
||||
}
|
||||
|
||||
integTest {
|
||||
dependsOn(setupClusterOne, setupClusterTwo)
|
||||
cluster {
|
||||
// tribe nodes had a bug where if explicit ports was specified for the tribe node, the dynamic socket permissions that were applied
|
||||
// would not account for the fact that the internal node client needed to bind to sockets too; thus, we use explicit port ranges to
|
||||
// ensure that the code that fixes this bug is exercised
|
||||
setting 'http.port', '40200-40249'
|
||||
setting 'transport.tcp.port', '40300-40349'
|
||||
setting 'node.name', 'quest'
|
||||
setting 'tribe.one.cluster.name', 'one'
|
||||
setting 'tribe.one.discovery.zen.ping.unicast.hosts', "'${-> oneNodes.get(0).transportUri()}'"
|
||||
setting 'tribe.one.http.enabled', 'true'
|
||||
setting 'tribe.one.http.port', '40250-40299'
|
||||
setting 'tribe.one.transport.tcp.port', '40350-40399'
|
||||
setting 'tribe.two.cluster.name', 'two'
|
||||
setting 'tribe.two.discovery.zen.ping.unicast.hosts', "'${-> twoNodes.get(0).transportUri()}'"
|
||||
setting 'tribe.two.http.enabled', 'true'
|
||||
setting 'tribe.two.http.port', '40250-40299'
|
||||
setting 'tribe.two.transport.tcp.port', '40250-40399'
|
||||
}
|
||||
// need to kill the standalone nodes here
|
||||
finalizedBy 'setupClusterOne#stop'
|
||||
finalizedBy 'setupClusterTwo#stop'
|
||||
}
|
|
@ -0,0 +1,53 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.tribe;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.annotations.Name;
|
||||
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
|
||||
import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
|
||||
import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;
|
||||
import org.elasticsearch.test.rest.yaml.parser.ClientYamlTestParseException;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class TribeClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
|
||||
|
||||
// tribe nodes can not handle delete indices requests
|
||||
@Override
|
||||
protected boolean preserveIndicesUponCompletion() {
|
||||
return true;
|
||||
}
|
||||
|
||||
// tribe nodes can not handle delete template requests
|
||||
@Override
|
||||
protected boolean preserveTemplatesUponCompletion() {
|
||||
return true;
|
||||
}
|
||||
|
||||
public TribeClientYamlTestSuiteIT(@Name("yaml") final ClientYamlTestCandidate testCandidate) {
|
||||
super(testCandidate);
|
||||
}
|
||||
|
||||
@ParametersFactory
|
||||
public static Iterable<Object[]> parameters() throws IOException, ClientYamlTestParseException {
|
||||
return createParameters();
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,16 @@
|
|||
---
|
||||
"Tribe node test":
|
||||
- do:
|
||||
cat.nodes:
|
||||
h: name
|
||||
s: name
|
||||
v: true
|
||||
|
||||
- match:
|
||||
$body: |
|
||||
/^ name\n
|
||||
one\n
|
||||
quest\n
|
||||
quest/one\n
|
||||
quest/two\n
|
||||
two\n $/
|
|
@ -58,12 +58,13 @@ List projects = [
|
|||
'qa:evil-tests',
|
||||
'qa:rolling-upgrade',
|
||||
'qa:smoke-test-client',
|
||||
'qa:smoke-test-http',
|
||||
'qa:smoke-test-ingest-with-all-dependencies',
|
||||
'qa:smoke-test-ingest-disabled',
|
||||
'qa:smoke-test-multinode',
|
||||
'qa:smoke-test-plugins',
|
||||
'qa:smoke-test-reindex-with-painless',
|
||||
'qa:smoke-test-http',
|
||||
'qa:smoke-test-tribe-node',
|
||||
'qa:vagrant',
|
||||
]
|
||||
|
||||
|
|
|
@ -150,6 +150,16 @@ public class ESRestTestCase extends ESTestCase {
|
|||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Controls whether or not to preserve templates upon completion of this test. The default implementation is to delete not preserve
|
||||
* templates.
|
||||
*
|
||||
* @return whether or not to preserve templates
|
||||
*/
|
||||
protected boolean preserveTemplatesUponCompletion() {
|
||||
return false;
|
||||
}
|
||||
|
||||
private void wipeCluster() throws IOException {
|
||||
if (preserveIndicesUponCompletion() == false) {
|
||||
// wipe indices
|
||||
|
@ -164,7 +174,9 @@ public class ESRestTestCase extends ESTestCase {
|
|||
}
|
||||
|
||||
// wipe index templates
|
||||
adminClient().performRequest("DELETE", "_template/*");
|
||||
if (preserveTemplatesUponCompletion() == false) {
|
||||
adminClient().performRequest("DELETE", "_template/*");
|
||||
}
|
||||
|
||||
wipeSnapshots();
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue