Enforce min master nodes in test cluster (#22065)
In order to start clusters with min master nodes set without setting `discovery.initial_state_timeout`, #21846 has changed the way we start nodes. Instead to the previous serial start up, we now always start the nodes in an async fashion (internally). This means that starting a cluster is unsafe without `min_master_nodes` being set. We should therefore make it mandatory.
This commit is contained in:
parent
1eddff822a
commit
bf65a69bbf
|
@ -22,6 +22,7 @@ package org.elasticsearch.action.admin.indices.exists;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.discovery.MasterNotDiscoveredException;
|
||||
import org.elasticsearch.discovery.zen.ElectMasterService;
|
||||
import org.elasticsearch.gateway.GatewayService;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
||||
|
@ -36,7 +37,9 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThro
|
|||
public class IndicesExistsIT extends ESIntegTestCase {
|
||||
|
||||
public void testIndexExistsWithBlocksInPlace() throws IOException {
|
||||
Settings settings = Settings.builder().put(GatewayService.RECOVER_AFTER_NODES_SETTING.getKey(), 99).build();
|
||||
Settings settings = Settings.builder()
|
||||
.put(GatewayService.RECOVER_AFTER_NODES_SETTING.getKey(), 99)
|
||||
.put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 1).build();
|
||||
String node = internalCluster().startNode(settings);
|
||||
|
||||
assertThrows(client(node).admin().indices().prepareExists("test").setMasterNodeTimeout(TimeValue.timeValueSeconds(0)),
|
||||
|
|
|
@ -268,7 +268,7 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
|
|||
public void testDynamicUpdateMinimumMasterNodes() throws Exception {
|
||||
Settings settings = Settings.builder()
|
||||
.put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "400ms")
|
||||
.put("discovery.initial_state_timeout", "500ms")
|
||||
.put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), "1")
|
||||
.build();
|
||||
|
||||
logger.info("--> start first node and wait for it to be a master");
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.elasticsearch.cluster.block.ClusterBlock;
|
|||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.discovery.zen.ElectMasterService;
|
||||
import org.elasticsearch.node.Node;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
||||
|
@ -49,20 +50,22 @@ public class RecoverAfterNodesIT extends ESIntegTestCase {
|
|||
return blocks;
|
||||
}
|
||||
|
||||
public Client startNode(Settings.Builder settings) {
|
||||
String name = internalCluster().startNode(settings);
|
||||
public Client startNode(Settings.Builder settings, int minMasterNodes) {
|
||||
String name = internalCluster().startNode(
|
||||
Settings.builder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), minMasterNodes)
|
||||
.put(settings.build()));
|
||||
return internalCluster().client(name);
|
||||
}
|
||||
|
||||
public void testRecoverAfterNodes() throws Exception {
|
||||
logger.info("--> start node (1)");
|
||||
Client clientNode1 = startNode(Settings.builder().put("gateway.recover_after_nodes", 3));
|
||||
Client clientNode1 = startNode(Settings.builder().put("gateway.recover_after_nodes", 3), 1);
|
||||
assertThat(clientNode1.admin().cluster().prepareState().setLocal(true).execute().actionGet()
|
||||
.getState().blocks().global(ClusterBlockLevel.METADATA_WRITE),
|
||||
hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
|
||||
|
||||
logger.info("--> start node (2)");
|
||||
Client clientNode2 = startNode(Settings.builder().put("gateway.recover_after_nodes", 3));
|
||||
Client clientNode2 = startNode(Settings.builder().put("gateway.recover_after_nodes", 3), 1);
|
||||
Thread.sleep(BLOCK_WAIT_TIMEOUT.millis());
|
||||
assertThat(clientNode1.admin().cluster().prepareState().setLocal(true).execute().actionGet()
|
||||
.getState().blocks().global(ClusterBlockLevel.METADATA_WRITE),
|
||||
|
@ -72,7 +75,7 @@ public class RecoverAfterNodesIT extends ESIntegTestCase {
|
|||
hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
|
||||
|
||||
logger.info("--> start node (3)");
|
||||
Client clientNode3 = startNode(Settings.builder().put("gateway.recover_after_nodes", 3));
|
||||
Client clientNode3 = startNode(Settings.builder().put("gateway.recover_after_nodes", 3), 1);
|
||||
|
||||
assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, clientNode1).isEmpty(), equalTo(true));
|
||||
assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, clientNode2).isEmpty(), equalTo(true));
|
||||
|
@ -81,13 +84,17 @@ public class RecoverAfterNodesIT extends ESIntegTestCase {
|
|||
|
||||
public void testRecoverAfterMasterNodes() throws Exception {
|
||||
logger.info("--> start master_node (1)");
|
||||
Client master1 = startNode(Settings.builder().put("gateway.recover_after_master_nodes", 2).put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true));
|
||||
Client master1 = startNode(Settings.builder()
|
||||
.put("gateway.recover_after_master_nodes", 2).put(Node.NODE_DATA_SETTING.getKey(), false)
|
||||
.put(Node.NODE_MASTER_SETTING.getKey(), true), 1);
|
||||
assertThat(master1.admin().cluster().prepareState().setLocal(true).execute().actionGet()
|
||||
.getState().blocks().global(ClusterBlockLevel.METADATA_WRITE),
|
||||
hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
|
||||
|
||||
logger.info("--> start data_node (1)");
|
||||
Client data1 = startNode(Settings.builder().put("gateway.recover_after_master_nodes", 2).put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false));
|
||||
Client data1 = startNode(Settings.builder()
|
||||
.put("gateway.recover_after_master_nodes", 2)
|
||||
.put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false), 1);
|
||||
assertThat(master1.admin().cluster().prepareState().setLocal(true).execute().actionGet()
|
||||
.getState().blocks().global(ClusterBlockLevel.METADATA_WRITE),
|
||||
hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
|
||||
|
@ -96,7 +103,9 @@ public class RecoverAfterNodesIT extends ESIntegTestCase {
|
|||
hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
|
||||
|
||||
logger.info("--> start data_node (2)");
|
||||
Client data2 = startNode(Settings.builder().put("gateway.recover_after_master_nodes", 2).put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false));
|
||||
Client data2 = startNode(Settings.builder()
|
||||
.put("gateway.recover_after_master_nodes", 2).put(Node.NODE_DATA_SETTING.getKey(), true)
|
||||
.put(Node.NODE_MASTER_SETTING.getKey(), false), 1);
|
||||
assertThat(master1.admin().cluster().prepareState().setLocal(true).execute().actionGet()
|
||||
.getState().blocks().global(ClusterBlockLevel.METADATA_WRITE),
|
||||
hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
|
||||
|
@ -108,7 +117,10 @@ public class RecoverAfterNodesIT extends ESIntegTestCase {
|
|||
hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
|
||||
|
||||
logger.info("--> start master_node (2)");
|
||||
Client master2 = startNode(Settings.builder().put("gateway.recover_after_master_nodes", 2).put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true));
|
||||
Client master2 = startNode(Settings.builder()
|
||||
.put("gateway.recover_after_master_nodes", 2)
|
||||
.put(Node.NODE_DATA_SETTING.getKey(), false)
|
||||
.put(Node.NODE_MASTER_SETTING.getKey(), true), 1);
|
||||
assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, master1).isEmpty(), equalTo(true));
|
||||
assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, master2).isEmpty(), equalTo(true));
|
||||
assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, data1).isEmpty(), equalTo(true));
|
||||
|
@ -117,13 +129,19 @@ public class RecoverAfterNodesIT extends ESIntegTestCase {
|
|||
|
||||
public void testRecoverAfterDataNodes() throws Exception {
|
||||
logger.info("--> start master_node (1)");
|
||||
Client master1 = startNode(Settings.builder().put("gateway.recover_after_data_nodes", 2).put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true));
|
||||
Client master1 = startNode(Settings.builder()
|
||||
.put("gateway.recover_after_data_nodes", 2)
|
||||
.put(Node.NODE_DATA_SETTING.getKey(), false)
|
||||
.put(Node.NODE_MASTER_SETTING.getKey(), true), 1);
|
||||
assertThat(master1.admin().cluster().prepareState().setLocal(true).execute().actionGet()
|
||||
.getState().blocks().global(ClusterBlockLevel.METADATA_WRITE),
|
||||
hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
|
||||
|
||||
logger.info("--> start data_node (1)");
|
||||
Client data1 = startNode(Settings.builder().put("gateway.recover_after_data_nodes", 2).put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false));
|
||||
Client data1 = startNode(Settings.builder()
|
||||
.put("gateway.recover_after_data_nodes", 2)
|
||||
.put(Node.NODE_DATA_SETTING.getKey(), true)
|
||||
.put(Node.NODE_MASTER_SETTING.getKey(), false), 1);
|
||||
assertThat(master1.admin().cluster().prepareState().setLocal(true).execute().actionGet()
|
||||
.getState().blocks().global(ClusterBlockLevel.METADATA_WRITE),
|
||||
hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
|
||||
|
@ -132,7 +150,10 @@ public class RecoverAfterNodesIT extends ESIntegTestCase {
|
|||
hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
|
||||
|
||||
logger.info("--> start master_node (2)");
|
||||
Client master2 = startNode(Settings.builder().put("gateway.recover_after_data_nodes", 2).put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true));
|
||||
Client master2 = startNode(Settings.builder()
|
||||
.put("gateway.recover_after_data_nodes", 2)
|
||||
.put(Node.NODE_DATA_SETTING.getKey(), false)
|
||||
.put(Node.NODE_MASTER_SETTING.getKey(), true), 1);
|
||||
assertThat(master2.admin().cluster().prepareState().setLocal(true).execute().actionGet()
|
||||
.getState().blocks().global(ClusterBlockLevel.METADATA_WRITE),
|
||||
hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
|
||||
|
@ -144,7 +165,10 @@ public class RecoverAfterNodesIT extends ESIntegTestCase {
|
|||
hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
|
||||
|
||||
logger.info("--> start data_node (2)");
|
||||
Client data2 = startNode(Settings.builder().put("gateway.recover_after_data_nodes", 2).put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false));
|
||||
Client data2 = startNode(Settings.builder()
|
||||
.put("gateway.recover_after_data_nodes", 2)
|
||||
.put(Node.NODE_DATA_SETTING.getKey(), true)
|
||||
.put(Node.NODE_MASTER_SETTING.getKey(), false), 1);
|
||||
assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, master1).isEmpty(), equalTo(true));
|
||||
assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, master2).isEmpty(), equalTo(true));
|
||||
assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, data1).isEmpty(), equalTo(true));
|
||||
|
|
|
@ -1065,7 +1065,7 @@ public abstract class ESIntegTestCase extends ESTestCase {
|
|||
}
|
||||
|
||||
protected void ensureClusterSizeConsistency() {
|
||||
if (cluster() != null) { // if static init fails the cluster can be null
|
||||
if (cluster() != null && cluster().size() > 0) { // if static init fails the cluster can be null
|
||||
logger.trace("Check consistency for [{}] nodes", cluster().size());
|
||||
assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(cluster().size())).get());
|
||||
}
|
||||
|
@ -1075,7 +1075,7 @@ public abstract class ESIntegTestCase extends ESTestCase {
|
|||
* Verifies that all nodes that have the same version of the cluster state as master have same cluster state
|
||||
*/
|
||||
protected void ensureClusterStateConsistency() throws IOException {
|
||||
if (cluster() != null) {
|
||||
if (cluster() != null && cluster().size() > 0) {
|
||||
ClusterState masterClusterState = client().admin().cluster().prepareState().all().get().getState();
|
||||
byte[] masterClusterStateBytes = ClusterState.Builder.toBytes(masterClusterState);
|
||||
// remove local node reference
|
||||
|
|
|
@ -130,6 +130,7 @@ import java.util.stream.Stream;
|
|||
|
||||
import static org.apache.lucene.util.LuceneTestCase.TEST_NIGHTLY;
|
||||
import static org.apache.lucene.util.LuceneTestCase.rarely;
|
||||
import static org.elasticsearch.discovery.zen.ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING;
|
||||
import static org.elasticsearch.test.ESTestCase.assertBusy;
|
||||
import static org.elasticsearch.test.ESTestCase.randomFrom;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
|
@ -589,12 +590,14 @@ public final class InternalTestCluster extends TestCluster {
|
|||
.put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), seed);
|
||||
|
||||
if (autoManageMinMasterNodes) {
|
||||
assert finalSettings.get(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey()) == null :
|
||||
assert finalSettings.get(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey()) == null :
|
||||
"min master nodes may not be set when auto managed";
|
||||
finalSettings
|
||||
// don't wait too long not to slow down tests
|
||||
.put(ZenDiscovery.MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING.getKey(), "5s")
|
||||
.put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), defaultMinMasterNodes);
|
||||
.put(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), defaultMinMasterNodes);
|
||||
} else if (finalSettings.get(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey()) == null) {
|
||||
throw new IllegalArgumentException(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey() + " must be configured");
|
||||
}
|
||||
MockNode node = new MockNode(finalSettings.build(), plugins);
|
||||
return new NodeAndClient(name, node, nodeId);
|
||||
|
@ -883,8 +886,8 @@ public final class InternalTestCluster extends TestCluster {
|
|||
newSettings.put(callbackSettings);
|
||||
}
|
||||
if (minMasterNodes >= 0) {
|
||||
assert ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.exists(newSettings.build()) == false : "min master nodes is auto managed";
|
||||
newSettings.put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), minMasterNodes).build();
|
||||
assert DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.exists(newSettings.build()) == false : "min master nodes is auto managed";
|
||||
newSettings.put(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), minMasterNodes).build();
|
||||
}
|
||||
if (clearDataIfNeeded) {
|
||||
clearDataIfNeeded(callback);
|
||||
|
@ -908,6 +911,10 @@ public final class InternalTestCluster extends TestCluster {
|
|||
private void createNewNode(final Settings newSettings) {
|
||||
final long newIdSeed = NodeEnvironment.NODE_ID_SEED_SETTING.get(node.settings()) + 1; // use a new seed to make sure we have new node id
|
||||
Settings finalSettings = Settings.builder().put(node.settings()).put(newSettings).put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), newIdSeed).build();
|
||||
if (DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.exists(finalSettings) == false) {
|
||||
throw new IllegalStateException(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey() +
|
||||
" is not configured after restart of [" + name + "]");
|
||||
}
|
||||
Collection<Class<? extends Plugin>> plugins = node.getClasspathPlugins();
|
||||
node = new MockNode(finalSettings, plugins);
|
||||
markNodeDataDirsAsNotEligableForWipe(node);
|
||||
|
@ -1694,7 +1701,7 @@ public final class InternalTestCluster extends TestCluster {
|
|||
logger.debug("updating min_master_nodes to [{}]", minMasterNodes);
|
||||
try {
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(
|
||||
Settings.builder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), minMasterNodes)
|
||||
Settings.builder().put(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), minMasterNodes)
|
||||
));
|
||||
} catch (Exception e) {
|
||||
throw new ElasticsearchException("failed to update minimum master node to [{}] (current masters [{}])", e,
|
||||
|
|
|
@ -36,7 +36,6 @@ import org.elasticsearch.test.NodeConfigurationSource;
|
|||
import org.elasticsearch.test.discovery.TestZenDiscovery;
|
||||
import org.elasticsearch.transport.MockTcpTransportPlugin;
|
||||
import org.elasticsearch.transport.TransportSettings;
|
||||
import org.hamcrest.Matcher;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
|
@ -61,7 +60,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFile
|
|||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFileNotExists;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.hasEntry;
|
||||
import static org.hamcrest.Matchers.hasKey;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
|
||||
/**
|
||||
|
@ -137,22 +135,15 @@ public class InternalTestClusterTests extends ESTestCase {
|
|||
|
||||
private void assertMMNinNodeSetting(String node, InternalTestCluster cluster, int masterNodes) {
|
||||
final int minMasterNodes = masterNodes / 2 + 1;
|
||||
final Matcher<Map<? extends String, ? extends String>> minMasterMatcher =
|
||||
hasEntry(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), Integer.toString(minMasterNodes));
|
||||
final Matcher<Map<? extends String, ?>> noMinMasterNodesMatcher = not(hasKey(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey()));
|
||||
Settings nodeSettings = cluster.client(node).admin().cluster().prepareNodesInfo(node).get().getNodes().get(0).getSettings();
|
||||
assertThat("node setting of node [" + node + "] has the wrong min_master_node setting: ["
|
||||
+ nodeSettings.get(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey()) + "]",
|
||||
nodeSettings.getAsMap(),
|
||||
cluster.getAutoManageMinMasterNode() ? minMasterMatcher: noMinMasterNodesMatcher);
|
||||
hasEntry(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), Integer.toString(minMasterNodes)));
|
||||
}
|
||||
|
||||
private void assertMMNinClusterSetting(InternalTestCluster cluster, int masterNodes) {
|
||||
final int minMasterNodes = masterNodes / 2 + 1;
|
||||
Matcher<Map<? extends String, ? extends String>> minMasterMatcher =
|
||||
hasEntry(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), Integer.toString(minMasterNodes));
|
||||
Matcher<Map<? extends String, ?>> noMinMasterNodesMatcher = not(hasKey(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey()));
|
||||
|
||||
for (final String node : cluster.getNodeNames()) {
|
||||
Settings stateSettings = cluster.client(node).admin().cluster().prepareState().setLocal(true)
|
||||
.get().getState().getMetaData().settings();
|
||||
|
@ -160,27 +151,44 @@ public class InternalTestClusterTests extends ESTestCase {
|
|||
assertThat("dynamic setting for node [" + node + "] has the wrong min_master_node setting : ["
|
||||
+ stateSettings.get(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey()) + "]",
|
||||
stateSettings.getAsMap(),
|
||||
cluster.getAutoManageMinMasterNode() ? minMasterMatcher: noMinMasterNodesMatcher);
|
||||
hasEntry(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), Integer.toString(minMasterNodes)));
|
||||
}
|
||||
}
|
||||
|
||||
public void testBeforeTest() throws Exception {
|
||||
final boolean autoManageMinMasterNodes = randomBoolean();
|
||||
long clusterSeed = randomLong();
|
||||
boolean masterNodes = randomBoolean();
|
||||
int minNumDataNodes = randomIntBetween(0, 3);
|
||||
int maxNumDataNodes = randomIntBetween(minNumDataNodes, 4);
|
||||
int numClientNodes = randomIntBetween(0, 2);
|
||||
final boolean masterNodes;
|
||||
final int minNumDataNodes;
|
||||
final int maxNumDataNodes;
|
||||
if (autoManageMinMasterNodes) {
|
||||
masterNodes = randomBoolean();
|
||||
minNumDataNodes = randomIntBetween(0, 3);
|
||||
maxNumDataNodes = randomIntBetween(minNumDataNodes, 4);
|
||||
} else {
|
||||
// if we manage min master nodes, we need to lock down the number of nodes
|
||||
minNumDataNodes = randomIntBetween(0, 4);
|
||||
maxNumDataNodes = minNumDataNodes;
|
||||
masterNodes = false;
|
||||
}
|
||||
final int numClientNodes = randomIntBetween(0, 2);
|
||||
final String clusterName1 = "shared1";
|
||||
final String clusterName2 = "shared2";
|
||||
NodeConfigurationSource nodeConfigurationSource = new NodeConfigurationSource() {
|
||||
@Override
|
||||
public Settings nodeSettings(int nodeOrdinal) {
|
||||
return Settings.builder()
|
||||
final Settings.Builder settings = Settings.builder()
|
||||
.put(
|
||||
NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(),
|
||||
2 * ((masterNodes ? InternalTestCluster.DEFAULT_HIGH_NUM_MASTER_NODES : 0) + maxNumDataNodes + numClientNodes))
|
||||
.put(NetworkModule.HTTP_ENABLED.getKey(), false)
|
||||
.put(NetworkModule.TRANSPORT_TYPE_KEY, MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME).build();
|
||||
.put(NetworkModule.TRANSPORT_TYPE_KEY, MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME);
|
||||
if (autoManageMinMasterNodes == false) {
|
||||
assert minNumDataNodes == maxNumDataNodes;
|
||||
assert masterNodes == false;
|
||||
settings.put(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), minNumDataNodes / 2 + 1);
|
||||
}
|
||||
return settings.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -195,7 +203,6 @@ public class InternalTestClusterTests extends ESTestCase {
|
|||
|
||||
Path baseDir = createTempDir();
|
||||
final List<Class<? extends Plugin>> mockPlugins = Arrays.asList(MockTcpTransportPlugin.class, TestZenDiscovery.TestPlugin.class);
|
||||
final boolean autoManageMinMasterNodes = randomBoolean();
|
||||
InternalTestCluster cluster0 = new InternalTestCluster(clusterSeed, baseDir, masterNodes,
|
||||
autoManageMinMasterNodes, minNumDataNodes, maxNumDataNodes, clusterName1, nodeConfigurationSource, numClientNodes,
|
||||
enableHttpPipelining, nodePrefix, mockPlugins, Function.identity());
|
||||
|
@ -258,9 +265,8 @@ public class InternalTestClusterTests extends ESTestCase {
|
|||
boolean enableHttpPipelining = randomBoolean();
|
||||
String nodePrefix = "test";
|
||||
Path baseDir = createTempDir();
|
||||
final boolean autoManageMinMasterNodes = randomBoolean();
|
||||
InternalTestCluster cluster = new InternalTestCluster(clusterSeed, baseDir, masterNodes,
|
||||
autoManageMinMasterNodes, minNumDataNodes, maxNumDataNodes, clusterName1, nodeConfigurationSource, numClientNodes,
|
||||
true, minNumDataNodes, maxNumDataNodes, clusterName1, nodeConfigurationSource, numClientNodes,
|
||||
enableHttpPipelining, nodePrefix, Arrays.asList(MockTcpTransportPlugin.class, TestZenDiscovery.TestPlugin.class),
|
||||
Function.identity());
|
||||
try {
|
||||
|
|
Loading…
Reference in New Issue