Default max local storage nodes to one

This commit defaults the max local storage nodes to one. The motivation
for this change is that a default value greather than one is dangerous
as users sometimes end up unknowingly starting a second node and start
thinking that they have encountered data loss.

Relates #19964
This commit is contained in:
Jason Tedor 2016-08-12 09:26:20 -04:00 committed by GitHub
parent c9722c4b08
commit 1f0673c9bd
14 changed files with 65 additions and 23 deletions

View File

@ -261,6 +261,7 @@ class ClusterFormationTasks {
'node.attr.testattr' : 'test', 'node.attr.testattr' : 'test',
'repositories.url.allowed_urls': 'http://snapshot.test*' 'repositories.url.allowed_urls': 'http://snapshot.test*'
] ]
esConfig['node.max_local_storage_nodes'] = node.config.numNodes
esConfig['http.port'] = node.config.httpPort esConfig['http.port'] = node.config.httpPort
esConfig['transport.tcp.port'] = node.config.transportPort esConfig['transport.tcp.port'] = node.config.transportPort
esConfig.putAll(node.config.settings) esConfig.putAll(node.config.settings)

View File

@ -68,6 +68,7 @@ import java.util.Collection;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Locale;
import java.util.Map; import java.util.Map;
import java.util.Random; import java.util.Random;
import java.util.Set; import java.util.Set;
@ -151,7 +152,7 @@ public final class NodeEnvironment implements Closeable {
/** /**
* Maximum number of data nodes that should run in an environment. * Maximum number of data nodes that should run in an environment.
*/ */
public static final Setting<Integer> MAX_LOCAL_STORAGE_NODES_SETTING = Setting.intSetting("node.max_local_storage_nodes", 50, 1, public static final Setting<Integer> MAX_LOCAL_STORAGE_NODES_SETTING = Setting.intSetting("node.max_local_storage_nodes", 1, 1,
Property.NodeScope); Property.NodeScope);
/** /**
@ -244,8 +245,15 @@ public final class NodeEnvironment implements Closeable {
} }
if (locks[0] == null) { if (locks[0] == null) {
throw new IllegalStateException("Failed to obtain node lock, is the following location writable?: " final String message = String.format(
+ Arrays.toString(environment.dataWithClusterFiles()), lastException); Locale.ROOT,
"failed to obtain node locks, tried [%s] with lock id%s;" +
" maybe these locations are not writable or multiple nodes were started without increasing [%s] (was [%d])?",
Arrays.toString(environment.dataWithClusterFiles()),
maxLocalStorageNodes == 1 ? " [0]" : "s [0--" + (maxLocalStorageNodes - 1) + "]",
MAX_LOCAL_STORAGE_NODES_SETTING.getKey(),
maxLocalStorageNodes);
throw new IllegalStateException(message, lastException);
} }
this.nodeMetaData = loadOrCreateNodeMetaData(settings, startupTraceLogger, nodePaths); this.nodeMetaData = loadOrCreateNodeMetaData(settings, startupTraceLogger, nodePaths);
this.logger = Loggers.getLogger(getClass(), Node.addNodeNameIfNeeded(settings, this.nodeMetaData.nodeId())); this.logger = Loggers.getLogger(getClass(), Node.addNodeNameIfNeeded(settings, this.nodeMetaData.nodeId()));

View File

@ -117,6 +117,9 @@ public class TribeService extends AbstractLifecycleComponent {
sb.put(Node.NODE_MASTER_SETTING.getKey(), false); sb.put(Node.NODE_MASTER_SETTING.getKey(), false);
sb.put(Node.NODE_DATA_SETTING.getKey(), false); sb.put(Node.NODE_DATA_SETTING.getKey(), false);
sb.put(Node.NODE_INGEST_SETTING.getKey(), false); sb.put(Node.NODE_INGEST_SETTING.getKey(), false);
if (!NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.exists(settings)) {
sb.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), nodesSettings.size());
}
sb.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local"); // a tribe node should not use zen discovery sb.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local"); // a tribe node should not use zen discovery
// nothing is going to be discovered, since no master will be elected // nothing is going to be discovered, since no master will be elected
sb.put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), 0); sb.put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), 0);

View File

@ -38,6 +38,7 @@ import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.Store;
@ -118,6 +119,7 @@ public class ClusterInfoServiceIT extends ESIntegTestCase {
protected Settings nodeSettings(int nodeOrdinal) { protected Settings nodeSettings(int nodeOrdinal) {
return Settings.builder() return Settings.builder()
// manual collection or upon cluster forming. // manual collection or upon cluster forming.
.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 2)
.put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), "1s") .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), "1s")
.build(); .build();
} }

View File

@ -55,6 +55,7 @@ import org.elasticsearch.discovery.zen.ping.ZenPing;
import org.elasticsearch.discovery.zen.ping.ZenPingService; import org.elasticsearch.discovery.zen.ping.ZenPingService;
import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing; import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing;
import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction; import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.indices.store.IndicesStoreIntegrationIT; import org.elasticsearch.indices.store.IndicesStoreIntegrationIT;
import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase;
@ -207,6 +208,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
// TODO: Rarely use default settings form some of these // TODO: Rarely use default settings form some of these
Settings nodeSettings = Settings.builder() Settings nodeSettings = Settings.builder()
.put(settings) .put(settings)
.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 4)
.put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), minimumMasterNode) .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), minimumMasterNode)
.build(); .build();

View File

@ -74,18 +74,14 @@ public class NodeEnvironmentTests extends ESTestCase {
} }
public void testNodeLockSingleEnvironment() throws IOException { public void testNodeLockSingleEnvironment() throws IOException {
final Settings settings = buildEnvSettings(Settings.builder() final Settings settings = buildEnvSettings(Settings.builder().put("node.max_local_storage_nodes", 1).build());
.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 1).build());
NodeEnvironment env = newNodeEnvironment(settings); NodeEnvironment env = newNodeEnvironment(settings);
List<String> dataPaths = Environment.PATH_DATA_SETTING.get(settings); List<String> dataPaths = Environment.PATH_DATA_SETTING.get(settings);
try { // Reuse the same location and attempt to lock again
// Reuse the same location and attempt to lock again IllegalStateException ex =
new NodeEnvironment(settings, new Environment(settings)); expectThrows(IllegalStateException.class, () -> new NodeEnvironment(settings, new Environment(settings)));
fail("env has already locked all the data directories it is allowed"); assertThat(ex.getMessage(), containsString("failed to obtain node lock"));
} catch (IllegalStateException ex) {
assertThat(ex.getMessage(), containsString("Failed to obtain node lock"));
}
// Close the environment that holds the lock and make sure we can get the lock after release // Close the environment that holds the lock and make sure we can get the lock after release
env.close(); env.close();
@ -121,7 +117,7 @@ public class NodeEnvironmentTests extends ESTestCase {
} }
public void testNodeLockMultipleEnvironment() throws IOException { public void testNodeLockMultipleEnvironment() throws IOException {
final Settings settings = buildEnvSettings(Settings.EMPTY); final Settings settings = buildEnvSettings(Settings.builder().put("node.max_local_storage_nodes", 2).build());
final NodeEnvironment first = newNodeEnvironment(settings); final NodeEnvironment first = newNodeEnvironment(settings);
List<String> dataPaths = Environment.PATH_DATA_SETTING.get(settings); List<String> dataPaths = Environment.PATH_DATA_SETTING.get(settings);
NodeEnvironment second = new NodeEnvironment(settings, new Environment(settings)); NodeEnvironment second = new NodeEnvironment(settings, new Environment(settings));

View File

@ -22,6 +22,7 @@ package org.elasticsearch.indices.memory.breaker;
import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.client.Client; import org.elasticsearch.client.Client;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.search.sort.SortOrder;
import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase;
@ -40,6 +41,7 @@ public class CircuitBreakerNoopIT extends ESIntegTestCase {
@Override @Override
protected Settings nodeSettings(int nodeOrdinal) { protected Settings nodeSettings(int nodeOrdinal) {
return Settings.builder() return Settings.builder()
.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 2)
.put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING.getKey(), "noop") .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING.getKey(), "noop")
// This is set low, because if the "noop" is not a noop, it will break // This is set low, because if the "noop" is not a noop, it will break
.put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "10b") .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "10b")

View File

@ -39,6 +39,7 @@ import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.DiscoveryModule;
import org.elasticsearch.discovery.MasterNotDiscoveredException; import org.elasticsearch.discovery.MasterNotDiscoveredException;
import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing; import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.node.Node; import org.elasticsearch.node.Node;
import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.InternalTestCluster;
@ -128,6 +129,7 @@ public class TribeIT extends ESIntegTestCase {
tribe1Defaults.putArray("tribe.t2." + UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.getKey(), getUnicastHosts(cluster2.client())); tribe1Defaults.putArray("tribe.t2." + UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.getKey(), getUnicastHosts(cluster2.client()));
Settings merged = Settings.builder() Settings merged = Settings.builder()
.put(internalCluster().getDefaultSettings())
.put("tribe.t1.cluster.name", internalCluster().getClusterName()) .put("tribe.t1.cluster.name", internalCluster().getClusterName())
.put("tribe.t2.cluster.name", cluster2.getClusterName()) .put("tribe.t2.cluster.name", cluster2.getClusterName())
.put("tribe.t1.transport.type", "local") .put("tribe.t1.transport.type", "local")
@ -142,7 +144,6 @@ public class TribeIT extends ESIntegTestCase {
.put(tribe1Defaults.build()) .put(tribe1Defaults.build())
.put(tribe2Defaults.build()) .put(tribe2Defaults.build())
.put(internalCluster().getDefaultSettings())
.put("node.name", "tribe_node") // make sure we can identify threads from this node .put("node.name", "tribe_node") // make sure we can identify threads from this node
.build(); .build();

View File

@ -310,3 +310,14 @@ The unit 'w' representing weeks is no longer supported.
Fractional time values (e.g., 0.5s) are no longer supported. For example, this means when setting Fractional time values (e.g., 0.5s) are no longer supported. For example, this means when setting
timeouts "0.5s" will be rejected and should instead be input as "500ms". timeouts "0.5s" will be rejected and should instead be input as "500ms".
==== Node max local storage nodes
Previous versions of Elasticsearch defaulted to allowing multiple nodes to share the same data
directory (up to 50). This can be confusing where users accidentally startup multiple nodes and end
up thinking that they've lost data because the second node will start with an empty data directory.
While the default of allowing multiple nodes is friendly to playing with forming a small cluster on
a laptop, and end-users do sometimes run multiple nodes on the same host, this tends to be the
exception. Keeping with Elasticsearch's continual movement towards safer out-of-the-box defaults,
and optimizing for the norm instead of the exception, the default for
`node.max_local_storage_nodes` is now one.

View File

@ -60,7 +60,9 @@ public class TribeUnitTests extends ESTestCase {
.put(NetworkModule.HTTP_ENABLED.getKey(), false) .put(NetworkModule.HTTP_ENABLED.getKey(), false)
.put("transport.type", "local") .put("transport.type", "local")
.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local") .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local")
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()).build(); .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir())
.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 2)
.build();
tribe1 = new TribeClientNode( tribe1 = new TribeClientNode(
Settings.builder() Settings.builder()

View File

@ -31,6 +31,7 @@ import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.DiscoveryModule;
import org.elasticsearch.client.RestClientBuilder; import org.elasticsearch.client.RestClientBuilder;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.ScriptService;
import org.elasticsearch.transport.MockTcpTransportPlugin; import org.elasticsearch.transport.MockTcpTransportPlugin;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
@ -1630,6 +1631,7 @@ public abstract class ESIntegTestCase extends ESTestCase {
*/ */
protected Settings nodeSettings(int nodeOrdinal) { protected Settings nodeSettings(int nodeOrdinal) {
Settings.Builder builder = Settings.builder() Settings.Builder builder = Settings.builder()
.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), Integer.MAX_VALUE)
// Default the watermarks to absurdly low to prevent the tests // Default the watermarks to absurdly low to prevent the tests
// from failing on nodes without enough disk space // from failing on nodes without enough disk space
.put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "1b") .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "1b")

View File

@ -175,8 +175,8 @@ public final class InternalTestCluster extends TestCluster {
public final int HTTP_BASE_PORT = GLOBAL_HTTP_BASE_PORT + CLUSTER_BASE_PORT_OFFSET; public final int HTTP_BASE_PORT = GLOBAL_HTTP_BASE_PORT + CLUSTER_BASE_PORT_OFFSET;
static final int DEFAULT_LOW_NUM_MASTER_NODES = 1; public static final int DEFAULT_LOW_NUM_MASTER_NODES = 1;
static final int DEFAULT_HIGH_NUM_MASTER_NODES = 3; public static final int DEFAULT_HIGH_NUM_MASTER_NODES = 3;
static final int DEFAULT_MIN_NUM_DATA_NODES = 1; static final int DEFAULT_MIN_NUM_DATA_NODES = 1;
static final int DEFAULT_MAX_NUM_DATA_NODES = TEST_NIGHTLY ? 6 : 3; static final int DEFAULT_MAX_NUM_DATA_NODES = TEST_NIGHTLY ? 6 : 3;
@ -300,6 +300,7 @@ public final class InternalTestCluster extends TestCluster {
builder.put(Environment.PATH_DATA_SETTING.getKey(), dataPath.toString()); builder.put(Environment.PATH_DATA_SETTING.getKey(), dataPath.toString());
} }
} }
builder.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), Integer.MAX_VALUE);
builder.put(Environment.PATH_SHARED_DATA_SETTING.getKey(), baseDir.resolve("custom")); builder.put(Environment.PATH_SHARED_DATA_SETTING.getKey(), baseDir.resolve("custom"));
builder.put(Environment.PATH_HOME_SETTING.getKey(), baseDir); builder.put(Environment.PATH_HOME_SETTING.getKey(), baseDir);
builder.put(Environment.PATH_REPO_SETTING.getKey(), baseDir.resolve("repos")); builder.put(Environment.PATH_REPO_SETTING.getKey(), baseDir.resolve("repos"));

View File

@ -26,6 +26,7 @@ import org.elasticsearch.common.network.NetworkUtils;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.util.CollectionUtils;
import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.DiscoveryModule;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.InternalTestCluster;
import org.elasticsearch.test.NodeConfigurationSource; import org.elasticsearch.test.NodeConfigurationSource;
import org.elasticsearch.transport.TransportSettings; import org.elasticsearch.transport.TransportSettings;
@ -108,7 +109,7 @@ public class ClusterDiscoveryConfiguration extends NodeConfigurationSource {
@Override @Override
public Settings nodeSettings(int nodeOrdinal) { public Settings nodeSettings(int nodeOrdinal) {
Settings.Builder builder = Settings.builder(); Settings.Builder builder = Settings.builder().put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), numOfNodes);
String[] unicastHosts = new String[unicastHostOrdinals.length]; String[] unicastHosts = new String[unicastHostOrdinals.length];
if (nodeOrdinal >= unicastHostPorts.length) { if (nodeOrdinal >= unicastHostPorts.length) {

View File

@ -128,12 +128,16 @@ public class InternalTestClusterTests extends ESTestCase {
boolean masterNodes = randomBoolean(); boolean masterNodes = randomBoolean();
int minNumDataNodes = randomIntBetween(0, 3); int minNumDataNodes = randomIntBetween(0, 3);
int maxNumDataNodes = randomIntBetween(minNumDataNodes, 4); int maxNumDataNodes = randomIntBetween(minNumDataNodes, 4);
int numClientNodes = randomIntBetween(0, 2);
final String clusterName1 = "shared1"; final String clusterName1 = "shared1";
final String clusterName2 = "shared2"; final String clusterName2 = "shared2";
NodeConfigurationSource nodeConfigurationSource = new NodeConfigurationSource() { NodeConfigurationSource nodeConfigurationSource = new NodeConfigurationSource() {
@Override @Override
public Settings nodeSettings(int nodeOrdinal) { public Settings nodeSettings(int nodeOrdinal) {
return Settings.builder() return Settings.builder()
.put(
NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(),
2 * ((masterNodes ? InternalTestCluster.DEFAULT_HIGH_NUM_MASTER_NODES : 0) + maxNumDataNodes + numClientNodes))
.put(NetworkModule.HTTP_ENABLED.getKey(), false) .put(NetworkModule.HTTP_ENABLED.getKey(), false)
.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local") .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local")
.put(NetworkModule.TRANSPORT_TYPE_KEY, "local").build(); .put(NetworkModule.TRANSPORT_TYPE_KEY, "local").build();
@ -145,7 +149,7 @@ public class InternalTestClusterTests extends ESTestCase {
.put(NetworkModule.TRANSPORT_TYPE_KEY, "local").build(); .put(NetworkModule.TRANSPORT_TYPE_KEY, "local").build();
} }
}; };
int numClientNodes = randomIntBetween(0, 2);
boolean enableHttpPipelining = randomBoolean(); boolean enableHttpPipelining = randomBoolean();
String nodePrefix = "foobar"; String nodePrefix = "foobar";
@ -187,13 +191,17 @@ public class InternalTestClusterTests extends ESTestCase {
long clusterSeed = randomLong(); long clusterSeed = randomLong();
boolean masterNodes = randomBoolean(); boolean masterNodes = randomBoolean();
// we need one stable node // we need one stable node
int minNumDataNodes = 2; final int minNumDataNodes = 2;
int maxNumDataNodes = 2; final int maxNumDataNodes = 2;
final int numClientNodes = randomIntBetween(0, 2);
final String clusterName1 = "shared1"; final String clusterName1 = "shared1";
NodeConfigurationSource nodeConfigurationSource = new NodeConfigurationSource() { NodeConfigurationSource nodeConfigurationSource = new NodeConfigurationSource() {
@Override @Override
public Settings nodeSettings(int nodeOrdinal) { public Settings nodeSettings(int nodeOrdinal) {
return Settings.builder().put(NetworkModule.HTTP_ENABLED.getKey(), false) return Settings.builder().put(NetworkModule.HTTP_ENABLED.getKey(), false)
.put(
NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(),
2 + (masterNodes ? InternalTestCluster.DEFAULT_HIGH_NUM_MASTER_NODES : 0) + maxNumDataNodes + numClientNodes)
.put(NetworkModule.TRANSPORT_TYPE_KEY, "local") .put(NetworkModule.TRANSPORT_TYPE_KEY, "local")
.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local") .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local")
.build(); .build();
@ -203,7 +211,7 @@ public class InternalTestClusterTests extends ESTestCase {
return Settings.builder() return Settings.builder()
.put(NetworkModule.TRANSPORT_TYPE_KEY, "local").build(); .put(NetworkModule.TRANSPORT_TYPE_KEY, "local").build();
} }
}; int numClientNodes = randomIntBetween(0, 2); };
boolean enableHttpPipelining = randomBoolean(); boolean enableHttpPipelining = randomBoolean();
String nodePrefix = "test"; String nodePrefix = "test";
Path baseDir = createTempDir(); Path baseDir = createTempDir();
@ -269,11 +277,13 @@ public class InternalTestClusterTests extends ESTestCase {
public void testDifferentRolesMaintainPathOnRestart() throws Exception { public void testDifferentRolesMaintainPathOnRestart() throws Exception {
final Path baseDir = createTempDir(); final Path baseDir = createTempDir();
final int numNodes = 5;
InternalTestCluster cluster = new InternalTestCluster(randomLong(), baseDir, true, 0, 0, "test", InternalTestCluster cluster = new InternalTestCluster(randomLong(), baseDir, true, 0, 0, "test",
new NodeConfigurationSource() { new NodeConfigurationSource() {
@Override @Override
public Settings nodeSettings(int nodeOrdinal) { public Settings nodeSettings(int nodeOrdinal) {
return Settings.builder() return Settings.builder()
.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), numNodes)
.put(NetworkModule.HTTP_ENABLED.getKey(), false) .put(NetworkModule.HTTP_ENABLED.getKey(), false)
.put(NetworkModule.TRANSPORT_TYPE_KEY, "local") .put(NetworkModule.TRANSPORT_TYPE_KEY, "local")
.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local") .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local")
@ -289,7 +299,7 @@ public class InternalTestClusterTests extends ESTestCase {
cluster.beforeTest(random(), 0.0); cluster.beforeTest(random(), 0.0);
try { try {
Map<DiscoveryNode.Role, Set<String>> pathsPerRole = new HashMap<>(); Map<DiscoveryNode.Role, Set<String>> pathsPerRole = new HashMap<>();
for (int i = 0; i < 5; i++) { for (int i = 0; i < numNodes; i++) {
final DiscoveryNode.Role role = randomFrom(MASTER, DiscoveryNode.Role.DATA, DiscoveryNode.Role.INGEST); final DiscoveryNode.Role role = randomFrom(MASTER, DiscoveryNode.Role.DATA, DiscoveryNode.Role.INGEST);
final String node; final String node;
switch (role) { switch (role) {