Replace internal usages of 'master' term in 'test' directory (#3283)

* Replace internal usages of 'master' terminology in server/src/internalClusterTest directory

Signed-off-by: Tianli Feng <ftianli@amazon.com>

* Replace 'master' terminology with 'cluster manager' in test directory

Signed-off-by: Tianli Feng <ftianli@amazon.com>

* Replace 'master' terminology with 'cluster manager' in server/src/internalClusterTest directory

Signed-off-by: Tianli Feng <ftianli@amazon.com>

* Adjust format by spotlessApply task

Signed-off-by: Tianli Feng <ftianli@amazon.com>

* Replace 'master' terminology with 'cluster manager' in test directory

Signed-off-by: Tianli Feng <ftianli@amazon.com>

* Adjust format by spotlessApply task

Signed-off-by: Tianli Feng <ftianli@amazon.com>
This commit is contained in:
Tianli Feng 2022-05-11 16:34:57 -07:00 committed by GitHub
parent 677915dc00
commit 10bff0c9f5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
86 changed files with 743 additions and 692 deletions

View File

@ -86,7 +86,7 @@ public class IngestRestartIT extends OpenSearchIntegTestCase {
public void testFailureInConditionalProcessor() {
internalCluster().ensureAtLeastNumDataNodes(1);
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
final String pipelineId = "foo";
client().admin()
.cluster()

View File

@ -69,7 +69,7 @@ public class Zen2RestApiIT extends OpenSearchNetty4IntegTestCase {
}
public void testRollingRestartOfTwoNodeCluster() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(1);
internalCluster().setBootstrapClusterManagerNodeIndex(1);
final List<String> nodes = internalCluster().startNodes(2);
createIndex(
"test",
@ -135,7 +135,7 @@ public class Zen2RestApiIT extends OpenSearchNetty4IntegTestCase {
}
public void testClearVotingTombstonesNotWaitingForRemoval() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(2);
internalCluster().setBootstrapClusterManagerNodeIndex(2);
List<String> nodes = internalCluster().startNodes(3);
ensureStableCluster(3);
RestClient restClient = getRestClient();
@ -150,7 +150,7 @@ public class Zen2RestApiIT extends OpenSearchNetty4IntegTestCase {
}
public void testClearVotingTombstonesWaitingForRemoval() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(2);
internalCluster().setBootstrapClusterManagerNodeIndex(2);
List<String> nodes = internalCluster().startNodes(3);
ensureStableCluster(3);
RestClient restClient = getRestClient();
@ -165,7 +165,7 @@ public class Zen2RestApiIT extends OpenSearchNetty4IntegTestCase {
}
public void testFailsOnUnknownNode() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(2);
internalCluster().setBootstrapClusterManagerNodeIndex(2);
internalCluster().startNodes(3);
ensureStableCluster(3);
RestClient restClient = getRestClient();
@ -182,7 +182,7 @@ public class Zen2RestApiIT extends OpenSearchNetty4IntegTestCase {
}
public void testRemoveTwoNodesAtOnce() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(2);
internalCluster().setBootstrapClusterManagerNodeIndex(2);
List<String> nodes = internalCluster().startNodes(3);
ensureStableCluster(3);
RestClient restClient = getRestClient();

View File

@ -86,7 +86,7 @@ public class GceDiscoverTests extends OpenSearchIntegTestCase {
public void testJoin() {
// start master node
final String masterNode = internalCluster().startMasterOnlyNode();
final String masterNode = internalCluster().startClusterManagerOnlyNode();
registerGceNode(masterNode);
ClusterStateResponse clusterStateResponse = client(masterNode).admin()

View File

@ -91,7 +91,7 @@ public class DanglingIndicesRestIT extends HttpSmokeTestCase {
* Check that when dangling indices are discovered, then they can be listed via the REST API.
*/
public void testDanglingIndicesCanBeListed() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(1);
internalCluster().setBootstrapClusterManagerNodeIndex(1);
internalCluster().startNodes(3, buildSettings(0));
final DanglingIndexDetails danglingIndexDetails = createDanglingIndices(INDEX_NAME);
@ -121,7 +121,7 @@ public class DanglingIndicesRestIT extends HttpSmokeTestCase {
* Check that dangling indices can be imported.
*/
public void testDanglingIndicesCanBeImported() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(1);
internalCluster().setBootstrapClusterManagerNodeIndex(1);
internalCluster().startNodes(3, buildSettings(0));
createDanglingIndices(INDEX_NAME);
@ -157,7 +157,7 @@ public class DanglingIndicesRestIT extends HttpSmokeTestCase {
* deleted through the API
*/
public void testDanglingIndicesCanBeDeleted() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(1);
internalCluster().setBootstrapClusterManagerNodeIndex(1);
internalCluster().startNodes(3, buildSettings(1));
createDanglingIndices(INDEX_NAME, OTHER_INDEX_NAME);

View File

@ -47,7 +47,7 @@ public class ClientTimeoutIT extends OpenSearchIntegTestCase {
}
public void testNodesInfoTimeout() {
String masterNode = internalCluster().startMasterOnlyNode();
String clusterManagerNode = internalCluster().startClusterManagerOnlyNode();
String dataNode = internalCluster().startDataOnlyNode();
String anotherDataNode = internalCluster().startDataOnlyNode();
@ -65,11 +65,11 @@ public class ClientTimeoutIT extends OpenSearchIntegTestCase {
nodes.add(node.getNode().getName());
}
assertThat(response.getNodes().size(), equalTo(2));
assertThat(nodes.contains(masterNode), is(true));
assertThat(nodes.contains(clusterManagerNode), is(true));
}
public void testNodesStatsTimeout() {
String masterNode = internalCluster().startMasterOnlyNode();
String clusterManagerNode = internalCluster().startClusterManagerOnlyNode();
String dataNode = internalCluster().startDataOnlyNode();
String anotherDataNode = internalCluster().startDataOnlyNode();
TimeValue timeout = TimeValue.timeValueMillis(1000);
@ -87,11 +87,11 @@ public class ClientTimeoutIT extends OpenSearchIntegTestCase {
nodes.add(node.getNode().getName());
}
assertThat(response.getNodes().size(), equalTo(2));
assertThat(nodes.contains(masterNode), is(true));
assertThat(nodes.contains(clusterManagerNode), is(true));
}
public void testListTasksTimeout() {
String masterNode = internalCluster().startMasterOnlyNode();
String clusterManagerNode = internalCluster().startClusterManagerOnlyNode();
String dataNode = internalCluster().startDataOnlyNode();
String anotherDataNode = internalCluster().startDataOnlyNode();
TimeValue timeout = TimeValue.timeValueMillis(1000);
@ -108,7 +108,7 @@ public class ClientTimeoutIT extends OpenSearchIntegTestCase {
}
public void testRecoveriesWithTimeout() {
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
String dataNode = internalCluster().startDataOnlyNode();
String anotherDataNode = internalCluster().startDataOnlyNode();
@ -148,7 +148,7 @@ public class ClientTimeoutIT extends OpenSearchIntegTestCase {
}
public void testStatsWithTimeout() {
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
String dataNode = internalCluster().startDataOnlyNode();
String anotherDataNode = internalCluster().startDataOnlyNode();

View File

@ -1110,7 +1110,7 @@ public final class ClusterAllocationExplainIT extends OpenSearchIntegTestCase {
public void testCannotAllocateStaleReplicaExplanation() throws Exception {
logger.info("--> starting 3 nodes");
final String masterNode = internalCluster().startNode();
final String clusterManagerNode = internalCluster().startNode();
// start replica node first, so it's path will be used first when we start a node after
// stopping all of them at end of test.
final String replicaNode = internalCluster().startNode();
@ -1123,7 +1123,7 @@ public final class ClusterAllocationExplainIT extends OpenSearchIntegTestCase {
1,
Settings.builder()
.put("index.routing.allocation.include._name", primaryNode)
.put("index.routing.allocation.exclude._name", masterNode)
.put("index.routing.allocation.exclude._name", clusterManagerNode)
.build(),
ActiveShardCount.ONE
);
@ -1165,7 +1165,7 @@ public final class ClusterAllocationExplainIT extends OpenSearchIntegTestCase {
logger.info("--> restart the node with the stale replica");
String restartedNode = internalCluster().startDataOnlyNode(replicaDataPathSettings);
ensureClusterSizeConsistency(); // wait for the master to finish processing join.
ensureClusterSizeConsistency(); // wait for the cluster-manager to finish processing join.
// wait until the system has fetched shard data and we know there is no valid shard copy
assertBusy(() -> {

View File

@ -117,7 +117,7 @@ import static org.hamcrest.Matchers.startsWith;
/**
* Integration tests for task management API
* <p>
* We need at least 2 nodes so we have a master node a non-master node
* We need at least 2 nodes so we have a cluster-manager node a non-cluster-manager node
*/
@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, minNumDataNodes = 2)
public class TasksIT extends OpenSearchIntegTestCase {
@ -154,17 +154,17 @@ public class TasksIT extends OpenSearchIntegTestCase {
assertThat(response.getTasks().size(), greaterThanOrEqualTo(cluster().numDataNodes()));
}
public void testMasterNodeOperationTasks() {
public void testClusterManagerNodeOperationTasks() {
registerTaskManagerListeners(ClusterHealthAction.NAME);
// First run the health on the master node - should produce only one task on the master node
// First run the health on the cluster-manager node - should produce only one task on the cluster-manager node
internalCluster().masterClient().admin().cluster().prepareHealth().get();
assertEquals(1, numberOfEvents(ClusterHealthAction.NAME, Tuple::v1)); // counting only registration events
assertEquals(1, numberOfEvents(ClusterHealthAction.NAME, event -> event.v1() == false)); // counting only unregistration events
resetTaskManagerListeners(ClusterHealthAction.NAME);
// Now run the health on a non-master node - should produce one task on master and one task on another node
// Now run the health on a non-cluster-manager node - should produce one task on cluster-manager and one task on another node
internalCluster().nonMasterClient().admin().cluster().prepareHealth().get();
assertEquals(2, numberOfEvents(ClusterHealthAction.NAME, Tuple::v1)); // counting only registration events
assertEquals(2, numberOfEvents(ClusterHealthAction.NAME, event -> event.v1() == false)); // counting only unregistration events

View File

@ -68,8 +68,8 @@ public class TransportClusterStateActionDisruptionIT extends OpenSearchIntegTest
return Collections.singletonList(MockTransportService.TestPlugin.class);
}
public void testNonLocalRequestAlwaysFindsMaster() throws Exception {
runRepeatedlyWhileChangingMaster(() -> {
public void testNonLocalRequestAlwaysFindsClusterManager() throws Exception {
runRepeatedlyWhileChangingClusterManager(() -> {
final ClusterStateRequestBuilder clusterStateRequestBuilder = client().admin()
.cluster()
.prepareState()
@ -82,12 +82,12 @@ public class TransportClusterStateActionDisruptionIT extends OpenSearchIntegTest
} catch (MasterNotDiscoveredException e) {
return; // ok, we hit the disconnected node
}
assertNotNull("should always contain a master node", clusterStateResponse.getState().nodes().getMasterNodeId());
assertNotNull("should always contain a cluster-manager node", clusterStateResponse.getState().nodes().getMasterNodeId());
});
}
public void testLocalRequestAlwaysSucceeds() throws Exception {
runRepeatedlyWhileChangingMaster(() -> {
runRepeatedlyWhileChangingClusterManager(() -> {
final String node = randomFrom(internalCluster().getNodeNames());
final DiscoveryNodes discoveryNodes = client(node).admin()
.cluster()
@ -108,8 +108,8 @@ public class TransportClusterStateActionDisruptionIT extends OpenSearchIntegTest
});
}
public void testNonLocalRequestAlwaysFindsMasterAndWaitsForMetadata() throws Exception {
runRepeatedlyWhileChangingMaster(() -> {
public void testNonLocalRequestAlwaysFindsClusterManagerAndWaitsForMetadata() throws Exception {
runRepeatedlyWhileChangingClusterManager(() -> {
final String node = randomFrom(internalCluster().getNodeNames());
final long metadataVersion = internalCluster().getInstance(ClusterService.class, node)
.getClusterApplierService()
@ -134,14 +134,14 @@ public class TransportClusterStateActionDisruptionIT extends OpenSearchIntegTest
}
if (clusterStateResponse.isWaitForTimedOut() == false) {
final ClusterState state = clusterStateResponse.getState();
assertNotNull("should always contain a master node", state.nodes().getMasterNodeId());
assertNotNull("should always contain a cluster-manager node", state.nodes().getMasterNodeId());
assertThat("waited for metadata version", state.metadata().version(), greaterThanOrEqualTo(waitForMetadataVersion));
}
});
}
public void testLocalRequestWaitsForMetadata() throws Exception {
runRepeatedlyWhileChangingMaster(() -> {
runRepeatedlyWhileChangingClusterManager(() -> {
final String node = randomFrom(internalCluster().getNodeNames());
final long metadataVersion = internalCluster().getInstance(ClusterService.class, node)
.getClusterApplierService()
@ -170,7 +170,7 @@ public class TransportClusterStateActionDisruptionIT extends OpenSearchIntegTest
});
}
public void runRepeatedlyWhileChangingMaster(Runnable runnable) throws Exception {
public void runRepeatedlyWhileChangingClusterManager(Runnable runnable) throws Exception {
internalCluster().startNodes(3);
assertBusy(
@ -191,7 +191,7 @@ public class TransportClusterStateActionDisruptionIT extends OpenSearchIntegTest
)
);
final String masterName = internalCluster().getMasterName();
final String clusterManagerName = internalCluster().getMasterName();
final AtomicBoolean shutdown = new AtomicBoolean();
final Thread assertingThread = new Thread(() -> {
@ -204,9 +204,12 @@ public class TransportClusterStateActionDisruptionIT extends OpenSearchIntegTest
String value = "none";
while (shutdown.get() == false) {
value = "none".equals(value) ? "all" : "none";
final String nonMasterNode = randomValueOtherThan(masterName, () -> randomFrom(internalCluster().getNodeNames()));
final String nonClusterManagerNode = randomValueOtherThan(
clusterManagerName,
() -> randomFrom(internalCluster().getNodeNames())
);
assertAcked(
client(nonMasterNode).admin()
client(nonClusterManagerNode).admin()
.cluster()
.prepareUpdateSettings()
.setPersistentSettings(Settings.builder().put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), value))
@ -222,22 +225,25 @@ public class TransportClusterStateActionDisruptionIT extends OpenSearchIntegTest
assertingThread.start();
updatingThread.start();
final MockTransportService masterTransportService = (MockTransportService) internalCluster().getInstance(
final MockTransportService clusterManagerTransportService = (MockTransportService) internalCluster().getInstance(
TransportService.class,
masterName
clusterManagerName
);
for (MockTransportService mockTransportService : mockTransportServices) {
if (masterTransportService != mockTransportService) {
masterTransportService.addFailToSendNoConnectRule(mockTransportService);
mockTransportService.addFailToSendNoConnectRule(masterTransportService);
if (clusterManagerTransportService != mockTransportService) {
clusterManagerTransportService.addFailToSendNoConnectRule(mockTransportService);
mockTransportService.addFailToSendNoConnectRule(clusterManagerTransportService);
}
}
assertBusy(() -> {
final String nonMasterNode = randomValueOtherThan(masterName, () -> randomFrom(internalCluster().getNodeNames()));
final String claimedMasterName = internalCluster().getMasterName(nonMasterNode);
assertThat(claimedMasterName, not(equalTo(masterName)));
final String nonClusterManagerNode = randomValueOtherThan(
clusterManagerName,
() -> randomFrom(internalCluster().getNodeNames())
);
final String claimedClusterManagerName = internalCluster().getMasterName(nonClusterManagerNode);
assertThat(claimedClusterManagerName, not(equalTo(clusterManagerName)));
});
shutdown.set(true);

View File

@ -97,7 +97,7 @@ public class ClusterStatsIT extends OpenSearchIntegTestCase {
for (int i = 0; i < numNodes; i++) {
boolean isDataNode = randomBoolean();
boolean isIngestNode = randomBoolean();
boolean isMasterNode = randomBoolean();
boolean isClusterManagerNode = randomBoolean();
boolean isRemoteClusterClientNode = false;
final Set<DiscoveryNodeRole> roles = new HashSet<>();
if (isDataNode) {
@ -106,7 +106,7 @@ public class ClusterStatsIT extends OpenSearchIntegTestCase {
if (isIngestNode) {
roles.add(DiscoveryNodeRole.INGEST_ROLE);
}
if (isMasterNode) {
if (isClusterManagerNode) {
roles.add(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE);
}
if (isRemoteClusterClientNode) {
@ -128,14 +128,14 @@ public class ClusterStatsIT extends OpenSearchIntegTestCase {
if (isIngestNode) {
incrementCountForRole(DiscoveryNodeRole.INGEST_ROLE.roleName(), expectedCounts);
}
if (isMasterNode) {
if (isClusterManagerNode) {
incrementCountForRole(DiscoveryNodeRole.MASTER_ROLE.roleName(), expectedCounts);
incrementCountForRole(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE.roleName(), expectedCounts);
}
if (isRemoteClusterClientNode) {
incrementCountForRole(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE.roleName(), expectedCounts);
}
if (!isDataNode && !isMasterNode && !isIngestNode && !isRemoteClusterClientNode) {
if (!isDataNode && !isClusterManagerNode && !isIngestNode && !isRemoteClusterClientNode) {
incrementCountForRole(ClusterStatsNodes.Counts.COORDINATING_ONLY, expectedCounts);
}
@ -254,12 +254,12 @@ public class ClusterStatsIT extends OpenSearchIntegTestCase {
}
public void testClusterStatusWhenStateNotRecovered() throws Exception {
internalCluster().startMasterOnlyNode(Settings.builder().put("gateway.recover_after_nodes", 2).build());
internalCluster().startClusterManagerOnlyNode(Settings.builder().put("gateway.recover_after_nodes", 2).build());
ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get();
assertThat(response.getStatus(), equalTo(ClusterHealthStatus.RED));
if (randomBoolean()) {
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
} else {
internalCluster().startDataOnlyNode();
}

View File

@ -48,7 +48,7 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertRequestBui
public class IndicesExistsIT extends OpenSearchIntegTestCase {
public void testIndexExistsWithBlocksInPlace() throws IOException {
internalCluster().setBootstrapMasterNodeIndex(0);
internalCluster().setBootstrapClusterManagerNodeIndex(0);
Settings settings = Settings.builder().put(GatewayService.RECOVER_AFTER_NODES_SETTING.getKey(), 99).build();
String node = internalCluster().startNode(settings);

View File

@ -65,7 +65,7 @@ public class IndexingMasterFailoverIT extends OpenSearchIntegTestCase {
public void testMasterFailoverDuringIndexingWithMappingChanges() throws Throwable {
logger.info("--> start 4 nodes, 3 master, 1 data");
internalCluster().setBootstrapMasterNodeIndex(2);
internalCluster().setBootstrapClusterManagerNodeIndex(2);
internalCluster().startMasterOnlyNodes(3, Settings.EMPTY);

View File

@ -93,9 +93,13 @@ import static org.hamcrest.Matchers.is;
public class ClusterStateDiffIT extends OpenSearchIntegTestCase {
public void testClusterStateDiffSerialization() throws Exception {
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(ClusterModule.getNamedWriteables());
DiscoveryNode masterNode = randomNode("master");
DiscoveryNode clusterManagerNode = randomNode("master");
DiscoveryNode otherNode = randomNode("other");
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(masterNode).add(otherNode).localNodeId(masterNode.getId()).build();
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder()
.add(clusterManagerNode)
.add(otherNode)
.localNodeId(clusterManagerNode.getId())
.build();
ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build();
ClusterState clusterStateFromDiffs = ClusterState.Builder.fromBytes(
ClusterState.Builder.toBytes(clusterState),

View File

@ -84,7 +84,7 @@ public class MinimumMasterNodesIT extends OpenSearchIntegTestCase {
}
public void testTwoNodesNoMasterBlock() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(1);
internalCluster().setBootstrapClusterManagerNodeIndex(1);
Settings settings = Settings.builder().put("discovery.initial_state_timeout", "500ms").build();
@ -250,7 +250,7 @@ public class MinimumMasterNodesIT extends OpenSearchIntegTestCase {
}
public void testThreeNodesNoMasterBlock() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(2);
internalCluster().setBootstrapClusterManagerNodeIndex(2);
Settings settings = Settings.builder().put("discovery.initial_state_timeout", "500ms").build();
@ -309,11 +309,11 @@ public class MinimumMasterNodesIT extends OpenSearchIntegTestCase {
assertHitCount(client().prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet(), 100);
}
List<String> nonMasterNodes = new ArrayList<>(
List<String> nonClusterManagerNodes = new ArrayList<>(
Sets.difference(Sets.newHashSet(internalCluster().getNodeNames()), Collections.singleton(internalCluster().getMasterName()))
);
Settings nonMasterDataPathSettings1 = internalCluster().dataPathSettings(nonMasterNodes.get(0));
Settings nonMasterDataPathSettings2 = internalCluster().dataPathSettings(nonMasterNodes.get(1));
Settings nonMasterDataPathSettings1 = internalCluster().dataPathSettings(nonClusterManagerNodes.get(0));
Settings nonMasterDataPathSettings2 = internalCluster().dataPathSettings(nonClusterManagerNodes.get(1));
internalCluster().stopRandomNonMasterNode();
internalCluster().stopRandomNonMasterNode();
@ -340,7 +340,7 @@ public class MinimumMasterNodesIT extends OpenSearchIntegTestCase {
}
public void testCannotCommitStateThreeNodes() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(2);
internalCluster().setBootstrapClusterManagerNodeIndex(2);
Settings settings = Settings.builder().put("discovery.initial_state_timeout", "500ms").build();
@ -393,7 +393,7 @@ public class MinimumMasterNodesIT extends OpenSearchIntegTestCase {
// otherwise persistent setting (which is a part of accepted state on old master) will be propagated to other nodes
logger.debug("--> wait for master to be elected in major partition");
assertBusy(() -> {
DiscoveryNode masterNode = internalCluster().client(randomFrom(otherNodes))
DiscoveryNode clusterManagerNode = internalCluster().client(randomFrom(otherNodes))
.admin()
.cluster()
.prepareState()
@ -402,8 +402,8 @@ public class MinimumMasterNodesIT extends OpenSearchIntegTestCase {
.getState()
.nodes()
.getMasterNode();
assertThat(masterNode, notNullValue());
assertThat(masterNode.getName(), not(equalTo(master)));
assertThat(clusterManagerNode, notNullValue());
assertThat(clusterManagerNode.getName(), not(equalTo(master)));
});
partition.stopDisrupting();

View File

@ -56,7 +56,7 @@ import static org.hamcrest.Matchers.nullValue;
public class SpecificMasterNodesIT extends OpenSearchIntegTestCase {
public void testSimpleOnlyMasterNodeElection() throws IOException {
internalCluster().setBootstrapMasterNodeIndex(0);
internalCluster().setBootstrapClusterManagerNodeIndex(0);
logger.info("--> start data node / non master node");
internalCluster().startNode(Settings.builder().put(dataOnlyNode()).put("discovery.initial_state_timeout", "1s"));
try {
@ -77,7 +77,7 @@ public class SpecificMasterNodesIT extends OpenSearchIntegTestCase {
// all is well, no cluster-manager elected
}
logger.info("--> start master node");
final String masterNodeName = internalCluster().startMasterOnlyNode();
final String masterNodeName = internalCluster().startClusterManagerOnlyNode();
assertThat(
internalCluster().nonMasterClient()
.admin()
@ -160,7 +160,7 @@ public class SpecificMasterNodesIT extends OpenSearchIntegTestCase {
}
public void testElectOnlyBetweenMasterNodes() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(0);
internalCluster().setBootstrapClusterManagerNodeIndex(0);
logger.info("--> start data node / non master node");
internalCluster().startNode(Settings.builder().put(dataOnlyNode()).put("discovery.initial_state_timeout", "1s"));
try {
@ -181,7 +181,7 @@ public class SpecificMasterNodesIT extends OpenSearchIntegTestCase {
// all is well, no cluster-manager elected
}
logger.info("--> start master node (1)");
final String masterNodeName = internalCluster().startMasterOnlyNode();
final String masterNodeName = internalCluster().startClusterManagerOnlyNode();
assertThat(
internalCluster().nonMasterClient()
.admin()
@ -210,7 +210,7 @@ public class SpecificMasterNodesIT extends OpenSearchIntegTestCase {
);
logger.info("--> start master node (2)");
final String nextMasterEligableNodeName = internalCluster().startMasterOnlyNode();
final String nextMasterEligableNodeName = internalCluster().startClusterManagerOnlyNode();
assertThat(
internalCluster().nonMasterClient()
.admin()
@ -312,9 +312,9 @@ public class SpecificMasterNodesIT extends OpenSearchIntegTestCase {
}
public void testAliasFilterValidation() {
internalCluster().setBootstrapMasterNodeIndex(0);
internalCluster().setBootstrapClusterManagerNodeIndex(0);
logger.info("--> start master node / non data");
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
logger.info("--> start data node / non master node");
internalCluster().startDataOnlyNode();

View File

@ -173,7 +173,7 @@ public class RareClusterStateIT extends OpenSearchIntegTestCase {
}
public void testDeleteCreateInOneBulk() throws Exception {
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
String dataNode = internalCluster().startDataOnlyNode();
assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut());
prepareCreate("test").setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)).get();

View File

@ -46,7 +46,7 @@ import static org.hamcrest.Matchers.containsString;
public class RemoveCustomsCommandIT extends OpenSearchIntegTestCase {
public void testRemoveCustomsAbortedByUser() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(0);
internalCluster().setBootstrapClusterManagerNodeIndex(0);
String node = internalCluster().startNode();
Settings dataPathSettings = internalCluster().dataPathSettings(node);
ensureStableCluster(1);
@ -59,7 +59,7 @@ public class RemoveCustomsCommandIT extends OpenSearchIntegTestCase {
}
public void testRemoveCustomsSuccessful() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(0);
internalCluster().setBootstrapClusterManagerNodeIndex(0);
String node = internalCluster().startNode();
createIndex("test");
client().admin().indices().prepareDelete("test").get();
@ -85,7 +85,7 @@ public class RemoveCustomsCommandIT extends OpenSearchIntegTestCase {
}
public void testCustomDoesNotMatch() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(0);
internalCluster().setBootstrapClusterManagerNodeIndex(0);
String node = internalCluster().startNode();
createIndex("test");
client().admin().indices().prepareDelete("test").get();

View File

@ -49,7 +49,7 @@ import static org.hamcrest.Matchers.not;
public class RemoveSettingsCommandIT extends OpenSearchIntegTestCase {
public void testRemoveSettingsAbortedByUser() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(0);
internalCluster().setBootstrapClusterManagerNodeIndex(0);
String node = internalCluster().startNode();
client().admin()
.cluster()
@ -78,7 +78,7 @@ public class RemoveSettingsCommandIT extends OpenSearchIntegTestCase {
}
public void testRemoveSettingsSuccessful() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(0);
internalCluster().setBootstrapClusterManagerNodeIndex(0);
String node = internalCluster().startNode();
client().admin()
.cluster()
@ -122,7 +122,7 @@ public class RemoveSettingsCommandIT extends OpenSearchIntegTestCase {
}
public void testSettingDoesNotMatch() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(0);
internalCluster().setBootstrapClusterManagerNodeIndex(0);
String node = internalCluster().startNode();
client().admin()
.cluster()

View File

@ -228,7 +228,7 @@ public class UnsafeBootstrapAndDetachCommandIT extends OpenSearchIntegTestCase {
}
public void testBootstrapNoClusterState() throws IOException {
internalCluster().setBootstrapMasterNodeIndex(0);
internalCluster().setBootstrapClusterManagerNodeIndex(0);
String node = internalCluster().startNode();
Settings dataPathSettings = internalCluster().dataPathSettings(node);
ensureStableCluster(1);
@ -243,7 +243,7 @@ public class UnsafeBootstrapAndDetachCommandIT extends OpenSearchIntegTestCase {
}
public void testDetachNoClusterState() throws IOException {
internalCluster().setBootstrapMasterNodeIndex(0);
internalCluster().setBootstrapClusterManagerNodeIndex(0);
String node = internalCluster().startNode();
Settings dataPathSettings = internalCluster().dataPathSettings(node);
ensureStableCluster(1);
@ -258,7 +258,7 @@ public class UnsafeBootstrapAndDetachCommandIT extends OpenSearchIntegTestCase {
}
public void testBootstrapAbortedByUser() throws IOException {
internalCluster().setBootstrapMasterNodeIndex(0);
internalCluster().setBootstrapClusterManagerNodeIndex(0);
String node = internalCluster().startNode();
Settings dataPathSettings = internalCluster().dataPathSettings(node);
ensureStableCluster(1);
@ -271,7 +271,7 @@ public class UnsafeBootstrapAndDetachCommandIT extends OpenSearchIntegTestCase {
}
public void testDetachAbortedByUser() throws IOException {
internalCluster().setBootstrapMasterNodeIndex(0);
internalCluster().setBootstrapClusterManagerNodeIndex(0);
String node = internalCluster().startNode();
Settings dataPathSettings = internalCluster().dataPathSettings(node);
ensureStableCluster(1);
@ -284,12 +284,12 @@ public class UnsafeBootstrapAndDetachCommandIT extends OpenSearchIntegTestCase {
}
public void test3MasterNodes2Failed() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(2);
internalCluster().setBootstrapClusterManagerNodeIndex(2);
List<String> masterNodes = new ArrayList<>();
logger.info("--> start 1st cluster-manager-eligible node");
masterNodes.add(
internalCluster().startMasterOnlyNode(
internalCluster().startClusterManagerOnlyNode(
Settings.builder().put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s").build()
)
); // node ordinal 0
@ -364,7 +364,7 @@ public class UnsafeBootstrapAndDetachCommandIT extends OpenSearchIntegTestCase {
);
logger.info("--> start 1st cluster-manager-eligible node");
String masterNode2 = internalCluster().startMasterOnlyNode(master1DataPathSettings);
String masterNode2 = internalCluster().startClusterManagerOnlyNode(master1DataPathSettings);
logger.info("--> detach-cluster on data-only node");
Environment environmentData = TestEnvironment.newEnvironment(
@ -410,15 +410,15 @@ public class UnsafeBootstrapAndDetachCommandIT extends OpenSearchIntegTestCase {
detachCluster(environmentMaster3, false);
logger.info("--> start 2nd and 3rd cluster-manager-eligible nodes and ensure 4 nodes stable cluster");
bootstrappedNodes.add(internalCluster().startMasterOnlyNode(master2DataPathSettings));
bootstrappedNodes.add(internalCluster().startMasterOnlyNode(master3DataPathSettings));
bootstrappedNodes.add(internalCluster().startClusterManagerOnlyNode(master2DataPathSettings));
bootstrappedNodes.add(internalCluster().startClusterManagerOnlyNode(master3DataPathSettings));
ensureStableCluster(4);
bootstrappedNodes.forEach(node -> ensureReadOnlyBlock(true, node));
removeBlock();
}
public void testAllMasterEligibleNodesFailedDanglingIndexImport() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(0);
internalCluster().setBootstrapClusterManagerNodeIndex(0);
Settings settings = Settings.builder().put(AUTO_IMPORT_DANGLING_INDICES_SETTING.getKey(), true).build();
@ -478,21 +478,21 @@ public class UnsafeBootstrapAndDetachCommandIT extends OpenSearchIntegTestCase {
}
public void testNoInitialBootstrapAfterDetach() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(0);
String masterNode = internalCluster().startMasterOnlyNode();
Settings masterNodeDataPathSettings = internalCluster().dataPathSettings(masterNode);
internalCluster().setBootstrapClusterManagerNodeIndex(0);
String clusterManagerNode = internalCluster().startClusterManagerOnlyNode();
Settings clusterManagerNodeDataPathSettings = internalCluster().dataPathSettings(clusterManagerNode);
internalCluster().stopCurrentMasterNode();
final Environment environment = TestEnvironment.newEnvironment(
Settings.builder().put(internalCluster().getDefaultSettings()).put(masterNodeDataPathSettings).build()
Settings.builder().put(internalCluster().getDefaultSettings()).put(clusterManagerNodeDataPathSettings).build()
);
detachCluster(environment);
String node = internalCluster().startMasterOnlyNode(
String node = internalCluster().startClusterManagerOnlyNode(
Settings.builder()
// give the cluster 2 seconds to elect the master (it should not)
.put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "2s")
.put(masterNodeDataPathSettings)
.put(clusterManagerNodeDataPathSettings)
.build()
);
@ -503,9 +503,9 @@ public class UnsafeBootstrapAndDetachCommandIT extends OpenSearchIntegTestCase {
}
public void testCanRunUnsafeBootstrapAfterErroneousDetachWithoutLoosingMetadata() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(0);
String masterNode = internalCluster().startMasterOnlyNode();
Settings masterNodeDataPathSettings = internalCluster().dataPathSettings(masterNode);
internalCluster().setBootstrapClusterManagerNodeIndex(0);
String clusterManagerNode = internalCluster().startClusterManagerOnlyNode();
Settings clusterManagerNodeDataPathSettings = internalCluster().dataPathSettings(clusterManagerNode);
ClusterUpdateSettingsRequest req = new ClusterUpdateSettingsRequest().persistentSettings(
Settings.builder().put(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "1234kb")
);
@ -514,17 +514,17 @@ public class UnsafeBootstrapAndDetachCommandIT extends OpenSearchIntegTestCase {
ClusterState state = internalCluster().client().admin().cluster().prepareState().execute().actionGet().getState();
assertThat(state.metadata().persistentSettings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()), equalTo("1234kb"));
ensureReadOnlyBlock(false, masterNode);
ensureReadOnlyBlock(false, clusterManagerNode);
internalCluster().stopCurrentMasterNode();
final Environment environment = TestEnvironment.newEnvironment(
Settings.builder().put(internalCluster().getDefaultSettings()).put(masterNodeDataPathSettings).build()
Settings.builder().put(internalCluster().getDefaultSettings()).put(clusterManagerNodeDataPathSettings).build()
);
detachCluster(environment);
unsafeBootstrap(environment); // read-only block will remain same as one before bootstrap, in this case it is false
String masterNode2 = internalCluster().startMasterOnlyNode(masterNodeDataPathSettings);
String masterNode2 = internalCluster().startClusterManagerOnlyNode(clusterManagerNodeDataPathSettings);
ensureGreen();
ensureReadOnlyBlock(false, masterNode2);
@ -533,9 +533,9 @@ public class UnsafeBootstrapAndDetachCommandIT extends OpenSearchIntegTestCase {
}
public void testUnsafeBootstrapWithApplyClusterReadOnlyBlockAsFalse() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(0);
String masterNode = internalCluster().startMasterOnlyNode();
Settings masterNodeDataPathSettings = internalCluster().dataPathSettings(masterNode);
internalCluster().setBootstrapClusterManagerNodeIndex(0);
String clusterManagerNode = internalCluster().startClusterManagerOnlyNode();
Settings clusterManagerNodeDataPathSettings = internalCluster().dataPathSettings(clusterManagerNode);
ClusterUpdateSettingsRequest req = new ClusterUpdateSettingsRequest().persistentSettings(
Settings.builder().put(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "1234kb")
);
@ -544,18 +544,18 @@ public class UnsafeBootstrapAndDetachCommandIT extends OpenSearchIntegTestCase {
ClusterState state = internalCluster().client().admin().cluster().prepareState().execute().actionGet().getState();
assertThat(state.metadata().persistentSettings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()), equalTo("1234kb"));
ensureReadOnlyBlock(false, masterNode);
ensureReadOnlyBlock(false, clusterManagerNode);
internalCluster().stopCurrentMasterNode();
final Environment environment = TestEnvironment.newEnvironment(
Settings.builder().put(internalCluster().getDefaultSettings()).put(masterNodeDataPathSettings).build()
Settings.builder().put(internalCluster().getDefaultSettings()).put(clusterManagerNodeDataPathSettings).build()
);
unsafeBootstrap(environment, false, false);
String masterNode2 = internalCluster().startMasterOnlyNode(masterNodeDataPathSettings);
String clusterManagerNode2 = internalCluster().startClusterManagerOnlyNode(clusterManagerNodeDataPathSettings);
ensureGreen();
ensureReadOnlyBlock(false, masterNode2);
ensureReadOnlyBlock(false, clusterManagerNode2);
state = internalCluster().client().admin().cluster().prepareState().execute().actionGet().getState();
assertThat(state.metadata().settings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()), equalTo("1234kb"));

View File

@ -62,7 +62,7 @@ public class VotingConfigurationIT extends OpenSearchIntegTestCase {
}
public void testAbdicateAfterVotingConfigExclusionAdded() throws ExecutionException, InterruptedException {
internalCluster().setBootstrapMasterNodeIndex(0);
internalCluster().setBootstrapClusterManagerNodeIndex(0);
internalCluster().startNodes(2);
final String originalMaster = internalCluster().getMasterName();
@ -73,7 +73,7 @@ public class VotingConfigurationIT extends OpenSearchIntegTestCase {
}
public void testElectsNodeNotInVotingConfiguration() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(0);
internalCluster().setBootstrapClusterManagerNodeIndex(0);
final List<String> nodeNames = internalCluster().startNodes(4);
// a 4-node cluster settles on a 3-node configuration; we then prevent the nodes in the configuration from winning an election

View File

@ -60,7 +60,7 @@ import java.util.concurrent.TimeoutException;
import static org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest.Metric.DISCOVERY;
import static org.opensearch.test.NodeRoles.dataNode;
import static org.opensearch.test.NodeRoles.masterOnlyNode;
import static org.opensearch.test.NodeRoles.clusterManagerOnlyNode;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
@ -73,7 +73,7 @@ public class ZenDiscoveryIT extends OpenSearchIntegTestCase {
public void testNoShardRelocationsOccurWhenElectedMasterNodeFails() throws Exception {
Settings masterNodeSettings = masterOnlyNode();
Settings masterNodeSettings = clusterManagerOnlyNode();
internalCluster().startNodes(2, masterNodeSettings);
Settings dateNodeSettings = dataNode();
internalCluster().startNodes(2, dateNodeSettings);
@ -106,10 +106,10 @@ public class ZenDiscoveryIT extends OpenSearchIntegTestCase {
}
public void testHandleNodeJoin_incompatibleClusterState() throws InterruptedException, ExecutionException, TimeoutException {
String masterNode = internalCluster().startMasterOnlyNode();
String clusterManagerNode = internalCluster().startClusterManagerOnlyNode();
String node1 = internalCluster().startNode();
ClusterService clusterService = internalCluster().getInstance(ClusterService.class, node1);
Coordinator coordinator = (Coordinator) internalCluster().getInstance(Discovery.class, masterNode);
Coordinator coordinator = (Coordinator) internalCluster().getInstance(Discovery.class, clusterManagerNode);
final ClusterState state = clusterService.state();
Metadata.Builder mdBuilder = Metadata.builder(state.metadata());
mdBuilder.putCustom(CustomMetadata.TYPE, new CustomMetadata("data"));

View File

@ -100,7 +100,7 @@ public class AllocationIdIT extends OpenSearchIntegTestCase {
// initial set up
final String indexName = "index42";
final String master = internalCluster().startMasterOnlyNode();
final String clusterManager = internalCluster().startClusterManagerOnlyNode();
String node1 = internalCluster().startNode();
createIndex(
indexName,

View File

@ -110,7 +110,7 @@ public class PrimaryAllocationIT extends OpenSearchIntegTestCase {
}
public void testBulkWeirdScenario() throws Exception {
String master = internalCluster().startMasterOnlyNode(Settings.EMPTY);
String master = internalCluster().startClusterManagerOnlyNode(Settings.EMPTY);
internalCluster().startDataOnlyNodes(2);
assertAcked(
@ -203,7 +203,7 @@ public class PrimaryAllocationIT extends OpenSearchIntegTestCase {
public void testDoNotAllowStaleReplicasToBePromotedToPrimary() throws Exception {
logger.info("--> starting 3 nodes, 1 master, 2 data");
String master = internalCluster().startMasterOnlyNode(Settings.EMPTY);
String master = internalCluster().startClusterManagerOnlyNode(Settings.EMPTY);
internalCluster().startDataOnlyNodes(2);
assertAcked(
client().admin()
@ -292,7 +292,7 @@ public class PrimaryAllocationIT extends OpenSearchIntegTestCase {
public void testForceStaleReplicaToBePromotedToPrimary() throws Exception {
logger.info("--> starting 3 nodes, 1 master, 2 data");
String master = internalCluster().startMasterOnlyNode(Settings.EMPTY);
String clusterManager = internalCluster().startClusterManagerOnlyNode(Settings.EMPTY);
internalCluster().startDataOnlyNodes(2);
assertAcked(
client().admin()
@ -305,7 +305,7 @@ public class PrimaryAllocationIT extends OpenSearchIntegTestCase {
Set<String> historyUUIDs = Arrays.stream(client().admin().indices().prepareStats("test").clear().get().getShards())
.map(shard -> shard.getCommitStats().getUserData().get(Engine.HISTORY_UUID_KEY))
.collect(Collectors.toSet());
createStaleReplicaScenario(master);
createStaleReplicaScenario(clusterManager);
if (randomBoolean()) {
assertAcked(client().admin().indices().prepareClose("test").setWaitForActiveShards(0));
}
@ -343,7 +343,7 @@ public class PrimaryAllocationIT extends OpenSearchIntegTestCase {
}
logger.info("expected allocation ids: {} actual allocation ids: {}", expectedAllocationIds, allocationIds);
};
final ClusterService clusterService = internalCluster().getInstance(ClusterService.class, master);
final ClusterService clusterService = internalCluster().getInstance(ClusterService.class, clusterManager);
clusterService.addListener(clusterStateListener);
rerouteBuilder.get();
@ -390,7 +390,7 @@ public class PrimaryAllocationIT extends OpenSearchIntegTestCase {
}
public void testForceStaleReplicaToBePromotedToPrimaryOnWrongNode() throws Exception {
String master = internalCluster().startMasterOnlyNode(Settings.EMPTY);
String clusterManager = internalCluster().startClusterManagerOnlyNode(Settings.EMPTY);
internalCluster().startDataOnlyNodes(2);
final String idxName = "test";
assertAcked(
@ -401,14 +401,14 @@ public class PrimaryAllocationIT extends OpenSearchIntegTestCase {
.get()
);
ensureGreen();
createStaleReplicaScenario(master);
createStaleReplicaScenario(clusterManager);
// Ensure the stopped primary's data is deleted so that it doesn't get picked up by the next datanode we start
internalCluster().wipePendingDataDirectories();
internalCluster().startDataOnlyNodes(1);
ensureStableCluster(3, master);
ensureStableCluster(3, clusterManager);
final int shardId = 0;
final List<String> nodeNames = new ArrayList<>(Arrays.asList(internalCluster().getNodeNames()));
nodeNames.remove(master);
nodeNames.remove(clusterManager);
client().admin()
.indices()
.prepareShardStores(idxName)
@ -434,7 +434,7 @@ public class PrimaryAllocationIT extends OpenSearchIntegTestCase {
}
public void testForceStaleReplicaToBePromotedForGreenIndex() {
internalCluster().startMasterOnlyNode(Settings.EMPTY);
internalCluster().startClusterManagerOnlyNode(Settings.EMPTY);
final List<String> dataNodes = internalCluster().startDataOnlyNodes(2);
final String idxName = "test";
assertAcked(
@ -459,7 +459,7 @@ public class PrimaryAllocationIT extends OpenSearchIntegTestCase {
}
public void testForceStaleReplicaToBePromotedForMissingIndex() {
internalCluster().startMasterOnlyNode(Settings.EMPTY);
internalCluster().startClusterManagerOnlyNode(Settings.EMPTY);
final String dataNode = internalCluster().startDataOnlyNode();
final String idxName = "test";
IndexNotFoundException ex = expectThrows(
@ -497,7 +497,7 @@ public class PrimaryAllocationIT extends OpenSearchIntegTestCase {
}
public void testDoNotRemoveAllocationIdOnNodeLeave() throws Exception {
internalCluster().startMasterOnlyNode(Settings.EMPTY);
internalCluster().startClusterManagerOnlyNode(Settings.EMPTY);
internalCluster().startDataOnlyNode(Settings.EMPTY);
assertAcked(
client().admin()
@ -537,7 +537,7 @@ public class PrimaryAllocationIT extends OpenSearchIntegTestCase {
}
public void testRemoveAllocationIdOnWriteAfterNodeLeave() throws Exception {
internalCluster().startMasterOnlyNode(Settings.EMPTY);
internalCluster().startClusterManagerOnlyNode(Settings.EMPTY);
internalCluster().startDataOnlyNode(Settings.EMPTY);
assertAcked(
client().admin()
@ -657,7 +657,7 @@ public class PrimaryAllocationIT extends OpenSearchIntegTestCase {
* This test asserts that replicas failed to execute resync operations will be failed but not marked as stale.
*/
public void testPrimaryReplicaResyncFailed() throws Exception {
String master = internalCluster().startMasterOnlyNode(Settings.EMPTY);
String master = internalCluster().startClusterManagerOnlyNode(Settings.EMPTY);
final int numberOfReplicas = between(2, 3);
final String oldPrimary = internalCluster().startDataOnlyNode();
assertAcked(

View File

@ -154,7 +154,7 @@ public class DiskThresholdDeciderIT extends OpenSearchIntegTestCase {
}
public void testHighWatermarkNotExceeded() throws Exception {
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode();
final String dataNodeName = internalCluster().startDataOnlyNode();
ensureStableCluster(3);
@ -189,7 +189,7 @@ public class DiskThresholdDeciderIT extends OpenSearchIntegTestCase {
}
public void testRestoreSnapshotAllocationDoesNotExceedWatermark() throws Exception {
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode();
final String dataNodeName = internalCluster().startDataOnlyNode();
ensureStableCluster(3);

View File

@ -63,7 +63,7 @@ public class ClusterDisruptionCleanSettingsIT extends OpenSearchIntegTestCase {
public void testSearchWithRelocationAndSlowClusterStateProcessing() throws Exception {
// Don't use AbstractDisruptionTestCase.DEFAULT_SETTINGS as settings
// (which can cause node disconnects on a slow CI machine)
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
final String node_1 = internalCluster().startDataOnlyNode();
logger.info("--> creating index [test] with one shard and on replica");

View File

@ -338,26 +338,26 @@ public class ClusterDisruptionIT extends AbstractDisruptionTestCase {
// simulate handling of sending shard failure during an isolation
public void testSendingShardFailure() throws Exception {
List<String> nodes = startCluster(3);
String masterNode = internalCluster().getMasterName();
List<String> nonMasterNodes = nodes.stream().filter(node -> !node.equals(masterNode)).collect(Collectors.toList());
String nonMasterNode = randomFrom(nonMasterNodes);
String clusterManagerNode = internalCluster().getMasterName();
List<String> nonClusterManagerNodes = nodes.stream().filter(node -> !node.equals(clusterManagerNode)).collect(Collectors.toList());
String nonClusterManagerNode = randomFrom(nonClusterManagerNodes);
assertAcked(
prepareCreate("test").setSettings(
Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 3).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2)
)
);
ensureGreen();
String nonMasterNodeId = internalCluster().clusterService(nonMasterNode).localNode().getId();
String nonClusterManagerNodeId = internalCluster().clusterService(nonClusterManagerNode).localNode().getId();
// fail a random shard
ShardRouting failedShard = randomFrom(
clusterService().state().getRoutingNodes().node(nonMasterNodeId).shardsWithState(ShardRoutingState.STARTED)
clusterService().state().getRoutingNodes().node(nonClusterManagerNodeId).shardsWithState(ShardRoutingState.STARTED)
);
ShardStateAction service = internalCluster().getInstance(ShardStateAction.class, nonMasterNode);
ShardStateAction service = internalCluster().getInstance(ShardStateAction.class, nonClusterManagerNode);
CountDownLatch latch = new CountDownLatch(1);
AtomicBoolean success = new AtomicBoolean();
String isolatedNode = randomBoolean() ? masterNode : nonMasterNode;
String isolatedNode = randomBoolean() ? clusterManagerNode : nonClusterManagerNode;
TwoPartitions partitions = isolateNode(isolatedNode);
// we cannot use the NetworkUnresponsive disruption type here as it will swallow the "shard failed" request, calling neither
// onSuccess nor onFailure on the provided listener.
@ -385,10 +385,10 @@ public class ClusterDisruptionIT extends AbstractDisruptionTestCase {
}
);
if (isolatedNode.equals(nonMasterNode)) {
assertNoMaster(nonMasterNode);
if (isolatedNode.equals(nonClusterManagerNode)) {
assertNoMaster(nonClusterManagerNode);
} else {
ensureStableCluster(2, nonMasterNode);
ensureStableCluster(2, nonClusterManagerNode);
}
// heal the partition
@ -410,10 +410,10 @@ public class ClusterDisruptionIT extends AbstractDisruptionTestCase {
}
public void testCannotJoinIfMasterLostDataFolder() throws Exception {
String masterNode = internalCluster().startMasterOnlyNode();
String clusterManagerNode = internalCluster().startClusterManagerOnlyNode();
String dataNode = internalCluster().startDataOnlyNode();
internalCluster().restartNode(masterNode, new InternalTestCluster.RestartCallback() {
internalCluster().restartNode(clusterManagerNode, new InternalTestCluster.RestartCallback() {
@Override
public boolean clearData(String nodeName) {
return true;
@ -442,9 +442,9 @@ public class ClusterDisruptionIT extends AbstractDisruptionTestCase {
});
assertBusy(() -> {
assertFalse(internalCluster().client(masterNode).admin().cluster().prepareHealth().get().isTimedOut());
assertFalse(internalCluster().client(clusterManagerNode).admin().cluster().prepareHealth().get().isTimedOut());
assertTrue(
internalCluster().client(masterNode)
internalCluster().client(clusterManagerNode)
.admin()
.cluster()
.prepareHealth()

View File

@ -64,29 +64,29 @@ public class DiscoveryDisruptionIT extends AbstractDisruptionTestCase {
* Test cluster join with issues in cluster state publishing *
*/
public void testClusterJoinDespiteOfPublishingIssues() throws Exception {
String masterNode = internalCluster().startMasterOnlyNode();
String nonMasterNode = internalCluster().startDataOnlyNode();
String clusterManagerNode = internalCluster().startClusterManagerOnlyNode();
String nonClusterManagerNode = internalCluster().startDataOnlyNode();
DiscoveryNodes discoveryNodes = internalCluster().getInstance(ClusterService.class, nonMasterNode).state().nodes();
DiscoveryNodes discoveryNodes = internalCluster().getInstance(ClusterService.class, nonClusterManagerNode).state().nodes();
TransportService masterTranspotService = internalCluster().getInstance(
TransportService.class,
discoveryNodes.getMasterNode().getName()
);
logger.info("blocking requests from non master [{}] to master [{}]", nonMasterNode, masterNode);
logger.info("blocking requests from non master [{}] to master [{}]", nonClusterManagerNode, clusterManagerNode);
MockTransportService nonMasterTransportService = (MockTransportService) internalCluster().getInstance(
TransportService.class,
nonMasterNode
nonClusterManagerNode
);
nonMasterTransportService.addFailToSendNoConnectRule(masterTranspotService);
assertNoMaster(nonMasterNode);
assertNoMaster(nonClusterManagerNode);
logger.info("blocking cluster state publishing from master [{}] to non master [{}]", masterNode, nonMasterNode);
logger.info("blocking cluster state publishing from master [{}] to non master [{}]", clusterManagerNode, nonClusterManagerNode);
MockTransportService masterTransportService = (MockTransportService) internalCluster().getInstance(
TransportService.class,
masterNode
clusterManagerNode
);
TransportService localTransportService = internalCluster().getInstance(
TransportService.class,
@ -98,7 +98,11 @@ public class DiscoveryDisruptionIT extends AbstractDisruptionTestCase {
masterTransportService.addFailToSendNoConnectRule(localTransportService, PublicationTransportHandler.COMMIT_STATE_ACTION_NAME);
}
logger.info("allowing requests from non master [{}] to master [{}], waiting for two join request", nonMasterNode, masterNode);
logger.info(
"allowing requests from non master [{}] to master [{}], waiting for two join request",
nonClusterManagerNode,
clusterManagerNode
);
final CountDownLatch countDownLatch = new CountDownLatch(2);
nonMasterTransportService.addSendBehavior(masterTransportService, (connection, requestId, action, request, options) -> {
if (action.equals(JoinHelper.JOIN_ACTION_NAME)) {
@ -197,31 +201,31 @@ public class DiscoveryDisruptionIT extends AbstractDisruptionTestCase {
public void testNodeNotReachableFromMaster() throws Exception {
startCluster(3);
String masterNode = internalCluster().getMasterName();
String nonMasterNode = null;
while (nonMasterNode == null) {
nonMasterNode = randomFrom(internalCluster().getNodeNames());
if (nonMasterNode.equals(masterNode)) {
nonMasterNode = null;
String clusterManagerNode = internalCluster().getMasterName();
String nonClusterManagerNode = null;
while (nonClusterManagerNode == null) {
nonClusterManagerNode = randomFrom(internalCluster().getNodeNames());
if (nonClusterManagerNode.equals(clusterManagerNode)) {
nonClusterManagerNode = null;
}
}
logger.info("blocking request from master [{}] to [{}]", masterNode, nonMasterNode);
logger.info("blocking request from master [{}] to [{}]", clusterManagerNode, nonClusterManagerNode);
MockTransportService masterTransportService = (MockTransportService) internalCluster().getInstance(
TransportService.class,
masterNode
clusterManagerNode
);
if (randomBoolean()) {
masterTransportService.addUnresponsiveRule(internalCluster().getInstance(TransportService.class, nonMasterNode));
masterTransportService.addUnresponsiveRule(internalCluster().getInstance(TransportService.class, nonClusterManagerNode));
} else {
masterTransportService.addFailToSendNoConnectRule(internalCluster().getInstance(TransportService.class, nonMasterNode));
masterTransportService.addFailToSendNoConnectRule(internalCluster().getInstance(TransportService.class, nonClusterManagerNode));
}
logger.info("waiting for [{}] to be removed from cluster", nonMasterNode);
ensureStableCluster(2, masterNode);
logger.info("waiting for [{}] to be removed from cluster", nonClusterManagerNode);
ensureStableCluster(2, clusterManagerNode);
logger.info("waiting for [{}] to have no cluster-manager", nonMasterNode);
assertNoMaster(nonMasterNode);
logger.info("waiting for [{}] to have no cluster-manager", nonClusterManagerNode);
assertNoMaster(nonClusterManagerNode);
logger.info("healing partition and checking cluster reforms");
masterTransportService.clearAllRules();

View File

@ -71,33 +71,40 @@ public class MasterDisruptionIT extends AbstractDisruptionTestCase {
public void testMasterNodeGCs() throws Exception {
List<String> nodes = startCluster(3);
String oldMasterNode = internalCluster().getMasterName();
String oldClusterManagerNode = internalCluster().getMasterName();
// a very long GC, but it's OK as we remove the disruption when it has had an effect
SingleNodeDisruption masterNodeDisruption = new IntermittentLongGCDisruption(random(), oldMasterNode, 100, 200, 30000, 60000);
SingleNodeDisruption masterNodeDisruption = new IntermittentLongGCDisruption(
random(),
oldClusterManagerNode,
100,
200,
30000,
60000
);
internalCluster().setDisruptionScheme(masterNodeDisruption);
masterNodeDisruption.startDisrupting();
Set<String> oldNonMasterNodesSet = new HashSet<>(nodes);
oldNonMasterNodesSet.remove(oldMasterNode);
Set<String> oldNonClusterManagerNodesSet = new HashSet<>(nodes);
oldNonClusterManagerNodesSet.remove(oldClusterManagerNode);
List<String> oldNonMasterNodes = new ArrayList<>(oldNonMasterNodesSet);
List<String> oldNonClusterManagerNodes = new ArrayList<>(oldNonClusterManagerNodesSet);
logger.info("waiting for nodes to de-elect master [{}]", oldMasterNode);
for (String node : oldNonMasterNodesSet) {
assertDifferentMaster(node, oldMasterNode);
logger.info("waiting for nodes to de-elect master [{}]", oldClusterManagerNode);
for (String node : oldNonClusterManagerNodesSet) {
assertDifferentMaster(node, oldClusterManagerNode);
}
logger.info("waiting for nodes to elect a new master");
ensureStableCluster(2, oldNonMasterNodes.get(0));
ensureStableCluster(2, oldNonClusterManagerNodes.get(0));
// restore GC
masterNodeDisruption.stopDisrupting();
final TimeValue waitTime = new TimeValue(DISRUPTION_HEALING_OVERHEAD.millis() + masterNodeDisruption.expectedTimeToHeal().millis());
ensureStableCluster(3, waitTime, false, oldNonMasterNodes.get(0));
ensureStableCluster(3, waitTime, false, oldNonClusterManagerNodes.get(0));
// make sure all nodes agree on master
String newMaster = internalCluster().getMasterName();
assertThat(newMaster, not(equalTo(oldMasterNode)));
assertThat(newMaster, not(equalTo(oldClusterManagerNode)));
assertMaster(newMaster, nodes);
}

View File

@ -73,8 +73,8 @@ import static java.util.Collections.singleton;
import static org.hamcrest.Matchers.equalTo;
/**
* Tests relating to the loss of the master, but which work with the default fault detection settings which are rather lenient and will
* not detect a master failure too quickly.
* Tests relating to the loss of the cluster-manager, but which work with the default fault detection settings which are rather lenient and will
* not detect a cluster-manager failure too quickly.
*/
@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0)
public class StableMasterDisruptionIT extends OpenSearchIntegTestCase {
@ -228,9 +228,9 @@ public class StableMasterDisruptionIT extends OpenSearchIntegTestCase {
event.state(),
event.previousState()
);
String previousMasterNodeName = previousMaster != null ? previousMaster.getName() : null;
String previousClusterManagerNodeName = previousMaster != null ? previousMaster.getName() : null;
String currentMasterNodeName = currentMaster != null ? currentMaster.getName() : null;
masters.get(node).add(new Tuple<>(previousMasterNodeName, currentMasterNodeName));
masters.get(node).add(new Tuple<>(previousClusterManagerNodeName, currentMasterNodeName));
}
});
}

View File

@ -56,7 +56,7 @@ public class NodeRepurposeCommandIT extends OpenSearchIntegTestCase {
final String indexName = "test-repurpose";
logger.info("--> starting two nodes");
final String masterNode = internalCluster().startMasterOnlyNode();
final String clusterManagerNode = internalCluster().startClusterManagerOnlyNode();
final String dataNode = internalCluster().startDataOnlyNode(
Settings.builder().put(IndicesService.WRITE_DANGLING_INDICES_INFO_SETTING.getKey(), false).build()
);
@ -71,20 +71,20 @@ public class NodeRepurposeCommandIT extends OpenSearchIntegTestCase {
assertTrue(client().prepareGet(indexName, "1").get().isExists());
final Settings masterNodeDataPathSettings = internalCluster().dataPathSettings(masterNode);
final Settings clusterManagerNodeDataPathSettings = internalCluster().dataPathSettings(clusterManagerNode);
final Settings dataNodeDataPathSettings = internalCluster().dataPathSettings(dataNode);
final Settings noMasterNoDataSettings = NodeRoles.removeRoles(
final Settings noClusterManagerNoDataSettings = NodeRoles.removeRoles(
Collections.unmodifiableSet(new HashSet<>(Arrays.asList(DiscoveryNodeRole.DATA_ROLE, DiscoveryNodeRole.MASTER_ROLE)))
);
final Settings noMasterNoDataSettingsForMasterNode = Settings.builder()
.put(noMasterNoDataSettings)
.put(masterNodeDataPathSettings)
final Settings noClusterManagerNoDataSettingsForClusterManagerNode = Settings.builder()
.put(noClusterManagerNoDataSettings)
.put(clusterManagerNodeDataPathSettings)
.build();
final Settings noMasterNoDataSettingsForDataNode = Settings.builder()
.put(noMasterNoDataSettings)
final Settings noClusterManagerNoDataSettingsForDataNode = Settings.builder()
.put(noClusterManagerNoDataSettings)
.put(dataNodeDataPathSettings)
.build();
@ -99,11 +99,11 @@ public class NodeRepurposeCommandIT extends OpenSearchIntegTestCase {
);
logger.info("--> Repurposing node 1");
executeRepurposeCommand(noMasterNoDataSettingsForDataNode, 1, 1);
executeRepurposeCommand(noClusterManagerNoDataSettingsForDataNode, 1, 1);
OpenSearchException lockedException = expectThrows(
OpenSearchException.class,
() -> executeRepurposeCommand(noMasterNoDataSettingsForMasterNode, 1, 1)
() -> executeRepurposeCommand(noClusterManagerNoDataSettingsForClusterManagerNode, 1, 1)
);
assertThat(lockedException.getMessage(), containsString(NodeRepurposeCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG));
@ -119,11 +119,11 @@ public class NodeRepurposeCommandIT extends OpenSearchIntegTestCase {
internalCluster().stopRandomNode(s -> true);
internalCluster().stopRandomNode(s -> true);
executeRepurposeCommand(noMasterNoDataSettingsForMasterNode, 1, 0);
executeRepurposeCommand(noClusterManagerNoDataSettingsForClusterManagerNode, 1, 0);
// by restarting as master and data node, we can check that the index definition was really deleted and also that the tool
// does not mess things up so much that the nodes cannot boot as master or data node any longer.
internalCluster().startMasterOnlyNode(masterNodeDataPathSettings);
internalCluster().startClusterManagerOnlyNode(clusterManagerNodeDataPathSettings);
internalCluster().startDataOnlyNode(dataNodeDataPathSettings);
ensureGreen();

View File

@ -277,7 +277,7 @@ public class GatewayIndexStateIT extends OpenSearchIntegTestCase {
logger.info("--> cleaning nodes");
logger.info("--> starting 1 master node non data");
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode();
logger.info("--> create an index");

View File

@ -60,7 +60,7 @@ import static org.hamcrest.Matchers.equalTo;
public class MetadataNodesIT extends OpenSearchIntegTestCase {
public void testMetaWrittenAlsoOnDataNode() throws Exception {
// this test checks that index state is written on data only nodes if they have a shard allocated
String masterNode = internalCluster().startMasterOnlyNode(Settings.EMPTY);
String masterNode = internalCluster().startClusterManagerOnlyNode(Settings.EMPTY);
String dataNode = internalCluster().startDataOnlyNode(Settings.EMPTY);
assertAcked(prepareCreate("test").setSettings(Settings.builder().put("index.number_of_replicas", 0)));
index("test", "_doc", "1", jsonBuilder().startObject().field("text", "some text").endObject());
@ -71,7 +71,7 @@ public class MetadataNodesIT extends OpenSearchIntegTestCase {
public void testIndexFilesAreRemovedIfAllShardsFromIndexRemoved() throws Exception {
// this test checks that the index data is removed from a data only node once all shards have been allocated away from it
String masterNode = internalCluster().startMasterOnlyNode(Settings.EMPTY);
String masterNode = internalCluster().startClusterManagerOnlyNode(Settings.EMPTY);
List<String> nodeNames = internalCluster().startDataOnlyNodes(2);
String node1 = nodeNames.get(0);
String node2 = nodeNames.get(1);
@ -114,7 +114,7 @@ public class MetadataNodesIT extends OpenSearchIntegTestCase {
@SuppressWarnings("unchecked")
public void testMetaWrittenWhenIndexIsClosedAndMetaUpdated() throws Exception {
String masterNode = internalCluster().startMasterOnlyNode(Settings.EMPTY);
String masterNode = internalCluster().startClusterManagerOnlyNode(Settings.EMPTY);
final String dataNode = internalCluster().startDataOnlyNode(Settings.EMPTY);
final String index = "index";

View File

@ -44,7 +44,7 @@ import org.opensearch.test.OpenSearchIntegTestCase.Scope;
import java.util.Set;
import static org.opensearch.test.NodeRoles.dataOnlyNode;
import static org.opensearch.test.NodeRoles.masterOnlyNode;
import static org.opensearch.test.NodeRoles.clusterManagerOnlyNode;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasItem;
@ -75,7 +75,7 @@ public class RecoverAfterNodesIT extends OpenSearchIntegTestCase {
}
public void testRecoverAfterNodes() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(0);
internalCluster().setBootstrapClusterManagerNodeIndex(0);
logger.info("--> start node (1)");
Client clientNode1 = startNode(Settings.builder().put("gateway.recover_after_nodes", 3));
assertThat(
@ -128,9 +128,9 @@ public class RecoverAfterNodesIT extends OpenSearchIntegTestCase {
}
public void testRecoverAfterMasterNodes() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(0);
internalCluster().setBootstrapClusterManagerNodeIndex(0);
logger.info("--> start master_node (1)");
Client master1 = startNode(Settings.builder().put("gateway.recover_after_master_nodes", 2).put(masterOnlyNode()));
Client master1 = startNode(Settings.builder().put("gateway.recover_after_master_nodes", 2).put(clusterManagerOnlyNode()));
assertThat(
master1.admin()
.cluster()
@ -211,7 +211,7 @@ public class RecoverAfterNodesIT extends OpenSearchIntegTestCase {
);
logger.info("--> start master_node (2)");
Client master2 = startNode(Settings.builder().put("gateway.recover_after_master_nodes", 2).put(masterOnlyNode()));
Client master2 = startNode(Settings.builder().put("gateway.recover_after_master_nodes", 2).put(clusterManagerOnlyNode()));
assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, master1).isEmpty(), equalTo(true));
assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, master2).isEmpty(), equalTo(true));
assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, data1).isEmpty(), equalTo(true));
@ -219,9 +219,9 @@ public class RecoverAfterNodesIT extends OpenSearchIntegTestCase {
}
public void testRecoverAfterDataNodes() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(0);
internalCluster().setBootstrapClusterManagerNodeIndex(0);
logger.info("--> start master_node (1)");
Client master1 = startNode(Settings.builder().put("gateway.recover_after_data_nodes", 2).put(masterOnlyNode()));
Client master1 = startNode(Settings.builder().put("gateway.recover_after_data_nodes", 2).put(clusterManagerOnlyNode()));
assertThat(
master1.admin()
.cluster()
@ -263,7 +263,7 @@ public class RecoverAfterNodesIT extends OpenSearchIntegTestCase {
);
logger.info("--> start master_node (2)");
Client master2 = startNode(Settings.builder().put("gateway.recover_after_data_nodes", 2).put(masterOnlyNode()));
Client master2 = startNode(Settings.builder().put("gateway.recover_after_data_nodes", 2).put(clusterManagerOnlyNode()));
assertThat(
master2.admin()
.cluster()

View File

@ -512,7 +512,7 @@ public class RecoveryFromGatewayIT extends OpenSearchIntegTestCase {
}
public void testReuseInFileBasedPeerRecovery() throws Exception {
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
final String primaryNode = internalCluster().startDataOnlyNode(nodeSettings(0));
// create the index with our mapping

View File

@ -327,7 +327,7 @@ public class ReplicaShardAllocatorIT extends OpenSearchIntegTestCase {
public void testPreferCopyWithHighestMatchingOperations() throws Exception {
String indexName = "test";
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNodes(3);
assertAcked(
client().admin()
@ -402,7 +402,7 @@ public class ReplicaShardAllocatorIT extends OpenSearchIntegTestCase {
* Make sure that we do not repeatedly cancel an ongoing recovery for a noop copy on a broken node.
*/
public void testDoNotCancelRecoveryForBrokenNode() throws Exception {
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
String nodeWithPrimary = internalCluster().startDataOnlyNode();
String indexName = "test";
assertAcked(
@ -518,7 +518,7 @@ public class ReplicaShardAllocatorIT extends OpenSearchIntegTestCase {
* this behavior by changing the global checkpoint in phase1 to unassigned.
*/
public void testSimulateRecoverySourceOnOldNode() throws Exception {
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
String source = internalCluster().startDataOnlyNode();
String indexName = "test";
assertAcked(

View File

@ -63,7 +63,7 @@ public class PeerRecoveryRetentionLeaseCreationIT extends OpenSearchIntegTestCas
* primary that is recovering from store creates a lease for itself.
*/
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
final String dataNode = internalCluster().startDataOnlyNode();
final Path[] nodeDataPaths = internalCluster().getInstance(NodeEnvironment.class, dataNode).nodeDataPaths();

View File

@ -497,7 +497,7 @@ public class RemoveCorruptedShardDataCommandIT extends OpenSearchIntegTestCase {
}
public void testCorruptTranslogTruncationOfReplica() throws Exception {
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
final String node1 = internalCluster().startDataOnlyNode();
final String node2 = internalCluster().startDataOnlyNode();

View File

@ -42,7 +42,7 @@ public class DedicatedMasterGetFieldMappingIT extends SimpleGetFieldMappingsIT {
@Before
public void before1() throws Exception {
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
internalCluster().startNode();
}

View File

@ -1212,7 +1212,7 @@ public class IndexRecoveryIT extends OpenSearchIntegTestCase {
.build();
TimeValue disconnectAfterDelay = TimeValue.timeValueMillis(randomIntBetween(0, 100));
// start a master node
String masterNodeName = internalCluster().startMasterOnlyNode(nodeSettings);
String masterNodeName = internalCluster().startClusterManagerOnlyNode(nodeSettings);
final String blueNodeName = internalCluster().startNode(
Settings.builder().put("node.attr.color", "blue").put(nodeSettings).build()
@ -2085,7 +2085,7 @@ public class IndexRecoveryIT extends OpenSearchIntegTestCase {
}
public void testAllocateEmptyPrimaryResetsGlobalCheckpoint() throws Exception {
internalCluster().startMasterOnlyNode(Settings.EMPTY);
internalCluster().startClusterManagerOnlyNode(Settings.EMPTY);
final List<String> dataNodes = internalCluster().startDataOnlyNodes(2);
final Settings randomNodeDataPathSettings = internalCluster().dataPathSettings(randomFrom(dataNodes));
final String indexName = "test";
@ -2185,7 +2185,7 @@ public class IndexRecoveryIT extends OpenSearchIntegTestCase {
}
public void testCancelRecoveryWithAutoExpandReplicas() throws Exception {
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
assertAcked(
client().admin()
.indices()

View File

@ -345,7 +345,7 @@ public class IndicesStoreIntegrationIT extends OpenSearchIntegTestCase {
}
public void testShardActiveElsewhereDoesNotDeleteAnother() throws Exception {
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
final List<String> nodes = internalCluster().startDataOnlyNodes(4);
final String node1 = nodes.get(0);
@ -459,11 +459,11 @@ public class IndicesStoreIntegrationIT extends OpenSearchIntegTestCase {
public void testShardActiveElseWhere() throws Exception {
List<String> nodes = internalCluster().startNodes(2);
final String masterNode = internalCluster().getMasterName();
final String nonMasterNode = nodes.get(0).equals(masterNode) ? nodes.get(1) : nodes.get(0);
final String clusterManagerNode = internalCluster().getMasterName();
final String nonClusterManagerNode = nodes.get(0).equals(clusterManagerNode) ? nodes.get(1) : nodes.get(0);
final String masterId = internalCluster().clusterService(masterNode).localNode().getId();
final String nonMasterId = internalCluster().clusterService(nonMasterNode).localNode().getId();
final String clusterManagerId = internalCluster().clusterService(clusterManagerNode).localNode().getId();
final String nonClusterManagerId = internalCluster().clusterService(nonClusterManagerNode).localNode().getId();
final int numShards = scaledRandomIntBetween(2, 10);
assertAcked(
@ -476,14 +476,14 @@ public class IndicesStoreIntegrationIT extends OpenSearchIntegTestCase {
waitNoPendingTasksOnAll();
ClusterStateResponse stateResponse = client().admin().cluster().prepareState().get();
final Index index = stateResponse.getState().metadata().index("test").getIndex();
RoutingNode routingNode = stateResponse.getState().getRoutingNodes().node(nonMasterId);
RoutingNode routingNode = stateResponse.getState().getRoutingNodes().node(nonClusterManagerId);
final int[] node2Shards = new int[routingNode.numberOfOwningShards()];
int i = 0;
for (ShardRouting shardRouting : routingNode) {
node2Shards[i] = shardRouting.shardId().id();
i++;
}
logger.info("Node [{}] has shards: {}", nonMasterNode, Arrays.toString(node2Shards));
logger.info("Node [{}] has shards: {}", nonClusterManagerNode, Arrays.toString(node2Shards));
// disable relocations when we do this, to make sure the shards are not relocated from node2
// due to rebalancing, and delete its content
@ -496,14 +496,14 @@ public class IndicesStoreIntegrationIT extends OpenSearchIntegTestCase {
)
.get();
ClusterApplierService clusterApplierService = internalCluster().getInstance(ClusterService.class, nonMasterNode)
ClusterApplierService clusterApplierService = internalCluster().getInstance(ClusterService.class, nonClusterManagerNode)
.getClusterApplierService();
ClusterState currentState = clusterApplierService.state();
IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index);
for (int j = 0; j < numShards; j++) {
indexRoutingTableBuilder.addIndexShard(
new IndexShardRoutingTable.Builder(new ShardId(index, j)).addShard(
TestShardRouting.newShardRouting("test", j, masterId, true, ShardRoutingState.STARTED)
TestShardRouting.newShardRouting("test", j, clusterManagerId, true, ShardRoutingState.STARTED)
).build()
);
}
@ -528,7 +528,7 @@ public class IndicesStoreIntegrationIT extends OpenSearchIntegTestCase {
waitNoPendingTasksOnAll();
logger.info("Checking if shards aren't removed");
for (int shard : node2Shards) {
assertShardExists(nonMasterNode, index, shard);
assertShardExists(nonClusterManagerNode, index, shard);
}
}

View File

@ -302,8 +302,8 @@ public class IngestClientIT extends OpenSearchIntegTestCase {
assertFalse(response.isFound());
}
public void testWithDedicatedMaster() throws Exception {
String masterOnlyNode = internalCluster().startMasterOnlyNode();
public void testWithDedicatedClusterManager() throws Exception {
String clusterManagerOnlyNode = internalCluster().startClusterManagerOnlyNode();
BytesReference source = BytesReference.bytes(
jsonBuilder().startObject()
.field("description", "my_pipeline")
@ -318,7 +318,7 @@ public class IngestClientIT extends OpenSearchIntegTestCase {
PutPipelineRequest putPipelineRequest = new PutPipelineRequest("_id", source, XContentType.JSON);
client().admin().cluster().putPipeline(putPipelineRequest).get();
BulkItemResponse item = client(masterOnlyNode).prepareBulk()
BulkItemResponse item = client(clusterManagerOnlyNode).prepareBulk()
.add(client().prepareIndex("test").setSource("field", "value2", "drop", true).setPipeline("_id"))
.get()
.getItems()[0];

View File

@ -196,7 +196,7 @@ public class FullRollingRestartIT extends OpenSearchIntegTestCase {
public void testNoRebalanceOnRollingRestart() throws Exception {
// see https://github.com/elastic/elasticsearch/issues/14387
internalCluster().startMasterOnlyNode(Settings.EMPTY);
internalCluster().startClusterManagerOnlyNode(Settings.EMPTY);
internalCluster().startDataOnlyNodes(3);
/**
* We start 3 nodes and a dedicated master. Restart on of the data-nodes and ensure that we got no relocations.

View File

@ -59,7 +59,7 @@ import static org.hamcrest.Matchers.is;
public class AbortedRestoreIT extends AbstractSnapshotIntegTestCase {
public void testAbortedRestoreAlsoAbortFileRestores() throws Exception {
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
final String dataNode = internalCluster().startDataOnlyNode();
final String indexName = "test-abort-restore";

View File

@ -59,7 +59,7 @@ import static org.hamcrest.Matchers.is;
public class BlobStoreIncrementalityIT extends AbstractSnapshotIntegTestCase {
public void testIncrementalBehaviorOnPrimaryFailover() throws InterruptedException, ExecutionException, IOException {
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
final String primaryNode = internalCluster().startDataOnlyNode();
final String indexName = "test-index";
createIndex(
@ -148,7 +148,7 @@ public class BlobStoreIncrementalityIT extends AbstractSnapshotIntegTestCase {
}
public void testForceMergeCausesFullSnapshot() throws Exception {
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
internalCluster().ensureAtLeastNumDataNodes(2);
final String indexName = "test-index";
createIndex(indexName, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).build());

View File

@ -72,7 +72,7 @@ import static org.hamcrest.Matchers.is;
public class CloneSnapshotIT extends AbstractSnapshotIntegTestCase {
public void testShardClone() throws Exception {
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode();
final String repoName = "repo-name";
final Path repoPath = randomRepoPath();
@ -143,7 +143,7 @@ public class CloneSnapshotIT extends AbstractSnapshotIntegTestCase {
}
public void testCloneSnapshotIndex() throws Exception {
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode();
final String repoName = "repo-name";
createRepository(repoName, "fs");
@ -172,7 +172,7 @@ public class CloneSnapshotIT extends AbstractSnapshotIntegTestCase {
}
public void testClonePreventsSnapshotDelete() throws Exception {
final String masterName = internalCluster().startMasterOnlyNode();
final String clusterManagerName = internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode();
final String repoName = "repo-name";
createRepository(repoName, "mock");
@ -185,9 +185,9 @@ public class CloneSnapshotIT extends AbstractSnapshotIntegTestCase {
indexRandomDocs(indexName, randomIntBetween(20, 100));
final String targetSnapshot = "target-snapshot";
blockNodeOnAnyFiles(repoName, masterName);
blockNodeOnAnyFiles(repoName, clusterManagerName);
final ActionFuture<AcknowledgedResponse> cloneFuture = startClone(repoName, sourceSnapshot, targetSnapshot, indexName);
waitForBlock(masterName, repoName, TimeValue.timeValueSeconds(30L));
waitForBlock(clusterManagerName, repoName, TimeValue.timeValueSeconds(30L));
assertFalse(cloneFuture.isDone());
ConcurrentSnapshotExecutionException ex = expectThrows(
@ -196,7 +196,7 @@ public class CloneSnapshotIT extends AbstractSnapshotIntegTestCase {
);
assertThat(ex.getMessage(), containsString("cannot delete snapshot while it is being cloned"));
unblockNode(repoName, masterName);
unblockNode(repoName, clusterManagerName);
assertAcked(cloneFuture.get());
final List<SnapshotStatus> status = clusterAdmin().prepareSnapshotStatus(repoName)
.setSnapshots(sourceSnapshot, targetSnapshot)
@ -210,7 +210,7 @@ public class CloneSnapshotIT extends AbstractSnapshotIntegTestCase {
}
public void testConcurrentCloneAndSnapshot() throws Exception {
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
final String dataNode = internalCluster().startDataOnlyNode();
final String repoName = "repo-name";
createRepository(repoName, "mock");
@ -234,7 +234,7 @@ public class CloneSnapshotIT extends AbstractSnapshotIntegTestCase {
public void testLongRunningCloneAllowsConcurrentSnapshot() throws Exception {
// large snapshot pool so blocked snapshot threads from cloning don't prevent concurrent snapshot finalizations
final String masterNode = internalCluster().startMasterOnlyNode(LARGE_SNAPSHOT_POOL_SETTINGS);
final String masterNode = internalCluster().startClusterManagerOnlyNode(LARGE_SNAPSHOT_POOL_SETTINGS);
internalCluster().startDataOnlyNode();
final String repoName = "test-repo";
createRepository(repoName, "mock");
@ -263,7 +263,7 @@ public class CloneSnapshotIT extends AbstractSnapshotIntegTestCase {
}
public void testLongRunningSnapshotAllowsConcurrentClone() throws Exception {
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
final String dataNode = internalCluster().startDataOnlyNode();
final String repoName = "test-repo";
createRepository(repoName, "mock");
@ -293,7 +293,7 @@ public class CloneSnapshotIT extends AbstractSnapshotIntegTestCase {
}
public void testDeletePreventsClone() throws Exception {
final String masterName = internalCluster().startMasterOnlyNode();
final String clusterManagerName = internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode();
final String repoName = "repo-name";
createRepository(repoName, "mock");
@ -306,9 +306,9 @@ public class CloneSnapshotIT extends AbstractSnapshotIntegTestCase {
indexRandomDocs(indexName, randomIntBetween(20, 100));
final String targetSnapshot = "target-snapshot";
blockNodeOnAnyFiles(repoName, masterName);
blockNodeOnAnyFiles(repoName, clusterManagerName);
final ActionFuture<AcknowledgedResponse> deleteFuture = startDeleteSnapshot(repoName, sourceSnapshot);
waitForBlock(masterName, repoName, TimeValue.timeValueSeconds(30L));
waitForBlock(clusterManagerName, repoName, TimeValue.timeValueSeconds(30L));
assertFalse(deleteFuture.isDone());
ConcurrentSnapshotExecutionException ex = expectThrows(
@ -317,13 +317,13 @@ public class CloneSnapshotIT extends AbstractSnapshotIntegTestCase {
);
assertThat(ex.getMessage(), containsString("cannot clone from snapshot that is being deleted"));
unblockNode(repoName, masterName);
unblockNode(repoName, clusterManagerName);
assertAcked(deleteFuture.get());
}
public void testBackToBackClonesForIndexNotInCluster() throws Exception {
// large snapshot pool so blocked snapshot threads from cloning don't prevent concurrent snapshot finalizations
final String masterNode = internalCluster().startMasterOnlyNode(LARGE_SNAPSHOT_POOL_SETTINGS);
final String masterNode = internalCluster().startClusterManagerOnlyNode(LARGE_SNAPSHOT_POOL_SETTINGS);
internalCluster().startDataOnlyNode();
final String repoName = "test-repo";
createRepository(repoName, "mock");
@ -413,7 +413,7 @@ public class CloneSnapshotIT extends AbstractSnapshotIntegTestCase {
}
public void testFailsOnCloneMissingIndices() {
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode();
final String repoName = "repo-name";
final Path repoPath = randomRepoPath();
@ -481,7 +481,7 @@ public class CloneSnapshotIT extends AbstractSnapshotIntegTestCase {
}
public void testDoesNotStartOnBrokenSourceSnapshot() throws Exception {
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
final String dataNode = internalCluster().startDataOnlyNode();
final String repoName = "test-repo";
createRepository(repoName, "mock");
@ -516,7 +516,7 @@ public class CloneSnapshotIT extends AbstractSnapshotIntegTestCase {
}
public void testStartSnapshotWithSuccessfulShardClonePendingFinalization() throws Exception {
final String masterName = internalCluster().startMasterOnlyNode(LARGE_SNAPSHOT_POOL_SETTINGS);
final String masterName = internalCluster().startClusterManagerOnlyNode(LARGE_SNAPSHOT_POOL_SETTINGS);
final String dataNode = internalCluster().startDataOnlyNode();
final String repoName = "test-repo";
createRepository(repoName, "mock");
@ -546,7 +546,7 @@ public class CloneSnapshotIT extends AbstractSnapshotIntegTestCase {
}
public void testStartCloneWithSuccessfulShardClonePendingFinalization() throws Exception {
final String masterName = internalCluster().startMasterOnlyNode();
final String clusterManagerName = internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode();
final String repoName = "test-repo";
createRepository(repoName, "mock");
@ -560,14 +560,14 @@ public class CloneSnapshotIT extends AbstractSnapshotIntegTestCase {
blockMasterOnWriteIndexFile(repoName);
final String cloneName = "clone-blocked";
final ActionFuture<AcknowledgedResponse> blockedClone = startClone(repoName, sourceSnapshot, cloneName, indexName);
waitForBlock(masterName, repoName, TimeValue.timeValueSeconds(30L));
waitForBlock(clusterManagerName, repoName, TimeValue.timeValueSeconds(30L));
awaitNumberOfSnapshotsInProgress(1);
final String otherCloneName = "other-clone";
final ActionFuture<AcknowledgedResponse> otherClone = startClone(repoName, sourceSnapshot, otherCloneName, indexName);
awaitNumberOfSnapshotsInProgress(2);
assertFalse(blockedClone.isDone());
unblockNode(repoName, masterName);
awaitNoMoreRunningOperations(masterName);
unblockNode(repoName, clusterManagerName);
awaitNoMoreRunningOperations(clusterManagerName);
awaitMasterFinishRepoOperations();
assertAcked(blockedClone.get());
assertAcked(otherClone.get());
@ -576,7 +576,7 @@ public class CloneSnapshotIT extends AbstractSnapshotIntegTestCase {
}
public void testStartCloneWithSuccessfulShardSnapshotPendingFinalization() throws Exception {
final String masterName = internalCluster().startMasterOnlyNode(LARGE_SNAPSHOT_POOL_SETTINGS);
final String masterName = internalCluster().startClusterManagerOnlyNode(LARGE_SNAPSHOT_POOL_SETTINGS);
internalCluster().startDataOnlyNode();
final String repoName = "test-repo";
createRepository(repoName, "mock");

View File

@ -106,7 +106,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
}
public void testLongRunningSnapshotAllowsConcurrentSnapshot() throws Exception {
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
final String dataNode = internalCluster().startDataOnlyNode();
final String repoName = "test-repo";
createRepository(repoName, "mock");
@ -139,7 +139,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
}
public void testDeletesAreBatched() throws Exception {
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
final String dataNode = internalCluster().startDataOnlyNode();
final String repoName = "test-repo";
createRepository(repoName, "mock");
@ -205,7 +205,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
}
public void testBlockedRepoDoesNotBlockOtherRepos() throws Exception {
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode();
final String blockedRepoName = "test-repo-blocked";
final String otherRepoName = "test-repo";
@ -231,7 +231,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
}
public void testMultipleReposAreIndependent() throws Exception {
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
// We're blocking a some of the snapshot threads when we block the first repo below so we have to make sure we have enough threads
// left for the second concurrent snapshot.
final String dataNode = startDataNodeWithLargeSnapshotPool();
@ -255,7 +255,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
}
public void testMultipleReposAreIndependent2() throws Exception {
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
// We're blocking a some of the snapshot threads when we block the first repo below so we have to make sure we have enough threads
// left for the second repository's concurrent operations.
final String dataNode = startDataNodeWithLargeSnapshotPool();
@ -280,7 +280,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
}
public void testMultipleReposAreIndependent3() throws Exception {
final String masterNode = internalCluster().startMasterOnlyNode(LARGE_SNAPSHOT_POOL_SETTINGS);
final String masterNode = internalCluster().startClusterManagerOnlyNode(LARGE_SNAPSHOT_POOL_SETTINGS);
internalCluster().startDataOnlyNode();
final String blockedRepoName = "test-repo-blocked";
final String otherRepoName = "test-repo";
@ -301,7 +301,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
}
public void testSnapshotRunsAfterInProgressDelete() throws Exception {
final String masterNode = internalCluster().startMasterOnlyNode();
final String clusterManagerNode = internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode();
final String repoName = "test-repo";
createRepository(repoName, "mock");
@ -314,11 +314,11 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
blockMasterFromFinalizingSnapshotOnIndexFile(repoName);
final ActionFuture<AcknowledgedResponse> deleteFuture = startDeleteSnapshot(repoName, firstSnapshot);
waitForBlock(masterNode, repoName, TimeValue.timeValueSeconds(30L));
waitForBlock(clusterManagerNode, repoName, TimeValue.timeValueSeconds(30L));
final ActionFuture<CreateSnapshotResponse> snapshotFuture = startFullSnapshot(repoName, "second-snapshot");
unblockNode(repoName, masterNode);
unblockNode(repoName, clusterManagerNode);
final UncategorizedExecutionException ex = expectThrows(UncategorizedExecutionException.class, deleteFuture::actionGet);
assertThat(ex.getRootCause(), instanceOf(IOException.class));
@ -326,7 +326,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
}
public void testAbortOneOfMultipleSnapshots() throws Exception {
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
final String dataNode = internalCluster().startDataOnlyNode();
final String repoName = "test-repo";
createRepository(repoName, "mock");
@ -386,7 +386,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
}
public void testCascadedAborts() throws Exception {
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
final String dataNode = internalCluster().startDataOnlyNode();
final String repoName = "test-repo";
createRepository(repoName, "mock");
@ -540,7 +540,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
}
public void testAssertMultipleSnapshotsAndPrimaryFailOver() throws Exception {
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
final String dataNode = internalCluster().startDataOnlyNode();
final String repoName = "test-repo";
createRepository(repoName, "mock");
@ -571,7 +571,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
}
public void testQueuedDeletesWithFailures() throws Exception {
final String masterNode = internalCluster().startMasterOnlyNode();
final String clusterManagerNode = internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode();
final String repoName = "test-repo";
createRepository(repoName, "mock");
@ -580,7 +580,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
blockMasterFromFinalizingSnapshotOnIndexFile(repoName);
final ActionFuture<AcknowledgedResponse> firstDeleteFuture = startDeleteSnapshot(repoName, "*");
waitForBlock(masterNode, repoName, TimeValue.timeValueSeconds(30L));
waitForBlock(clusterManagerNode, repoName, TimeValue.timeValueSeconds(30L));
final ActionFuture<CreateSnapshotResponse> snapshotFuture = startFullSnapshot(repoName, "snapshot-queued");
awaitNumberOfSnapshotsInProgress(1);
@ -588,7 +588,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
final ActionFuture<AcknowledgedResponse> secondDeleteFuture = startDeleteSnapshot(repoName, "*");
awaitNDeletionsInProgress(2);
unblockNode(repoName, masterNode);
unblockNode(repoName, clusterManagerNode);
expectThrows(UncategorizedExecutionException.class, firstDeleteFuture::actionGet);
// Second delete works out cleanly since the repo is unblocked now
@ -601,7 +601,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
}
public void testQueuedDeletesWithOverlap() throws Exception {
final String masterNode = internalCluster().startMasterOnlyNode();
final String clusterManagerNode = internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode();
final String repoName = "test-repo";
createRepository(repoName, "mock");
@ -615,7 +615,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
final ActionFuture<AcknowledgedResponse> secondDeleteFuture = startDeleteSnapshot(repoName, "*");
awaitNDeletionsInProgress(2);
unblockNode(repoName, masterNode);
unblockNode(repoName, clusterManagerNode);
assertThat(firstDeleteFuture.get().isAcknowledged(), is(true));
// Second delete works out cleanly since the repo is unblocked now
@ -878,7 +878,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
}
public void testMultipleSnapshotsQueuedAfterDelete() throws Exception {
final String masterNode = internalCluster().startMasterOnlyNode();
final String clusterManagerNode = internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode();
final String repoName = "test-repo";
createRepository(repoName, "mock");
@ -889,7 +889,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
final ActionFuture<CreateSnapshotResponse> snapshotThree = startFullSnapshot(repoName, "snapshot-three");
final ActionFuture<CreateSnapshotResponse> snapshotFour = startFullSnapshot(repoName, "snapshot-four");
unblockNode(repoName, masterNode);
unblockNode(repoName, clusterManagerNode);
assertSuccessful(snapshotThree);
assertSuccessful(snapshotFour);
@ -897,7 +897,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
}
public void testMultiplePartialSnapshotsQueuedAfterDelete() throws Exception {
final String masterNode = internalCluster().startMasterOnlyNode();
final String clusterManagerNode = internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode();
final String repoName = "test-repo";
createRepository(repoName, "mock");
@ -911,7 +911,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
awaitNumberOfSnapshotsInProgress(2);
assertAcked(client().admin().indices().prepareDelete("index-two"));
unblockNode(repoName, masterNode);
unblockNode(repoName, clusterManagerNode);
assertThat(snapshotThree.get().getSnapshotInfo().state(), is(SnapshotState.PARTIAL));
assertThat(snapshotFour.get().getSnapshotInfo().state(), is(SnapshotState.PARTIAL));
@ -919,7 +919,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
}
public void testQueuedSnapshotsWaitingForShardReady() throws Exception {
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNodes(2);
final String repoName = "test-repo";
createRepository(repoName, "fs");
@ -977,7 +977,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
}
public void testBackToBackQueuedDeletes() throws Exception {
final String masterName = internalCluster().startMasterOnlyNode();
final String clusterManagerName = internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode();
final String repoName = "test-repo";
createRepository(repoName, "mock");
@ -990,7 +990,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
final ActionFuture<AcknowledgedResponse> deleteSnapshotTwo = startDeleteSnapshot(repoName, snapshotTwo);
awaitNDeletionsInProgress(2);
unblockNode(repoName, masterName);
unblockNode(repoName, clusterManagerName);
assertAcked(deleteSnapshotOne.get());
assertAcked(deleteSnapshotTwo.get());
@ -1024,7 +1024,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
}
public void testStartDeleteDuringFinalizationCleanup() throws Exception {
final String masterName = internalCluster().startMasterOnlyNode();
final String clusterManagerName = internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode();
final String repoName = "test-repo";
createRepository(repoName, "mock");
@ -1033,35 +1033,35 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
final String snapshotName = "snap-name";
blockMasterFromDeletingIndexNFile(repoName);
final ActionFuture<CreateSnapshotResponse> snapshotFuture = startFullSnapshot(repoName, snapshotName);
waitForBlock(masterName, repoName, TimeValue.timeValueSeconds(30L));
waitForBlock(clusterManagerName, repoName, TimeValue.timeValueSeconds(30L));
final ActionFuture<AcknowledgedResponse> deleteFuture = startDeleteSnapshot(repoName, snapshotName);
awaitNDeletionsInProgress(1);
unblockNode(repoName, masterName);
unblockNode(repoName, clusterManagerName);
assertSuccessful(snapshotFuture);
assertAcked(deleteFuture.get(30L, TimeUnit.SECONDS));
}
public void testEquivalentDeletesAreDeduplicated() throws Exception {
final String masterName = internalCluster().startMasterOnlyNode();
final String clusterManagerName = internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode();
final String repoName = "test-repo";
createRepository(repoName, "mock");
createIndexWithContent("index-test");
createNSnapshots(repoName, randomIntBetween(1, 5));
blockNodeOnAnyFiles(repoName, masterName);
blockNodeOnAnyFiles(repoName, clusterManagerName);
final int deletes = randomIntBetween(2, 10);
final List<ActionFuture<AcknowledgedResponse>> deleteResponses = new ArrayList<>(deletes);
for (int i = 0; i < deletes; ++i) {
deleteResponses.add(client().admin().cluster().prepareDeleteSnapshot(repoName, "*").execute());
}
waitForBlock(masterName, repoName, TimeValue.timeValueSeconds(30L));
waitForBlock(clusterManagerName, repoName, TimeValue.timeValueSeconds(30L));
awaitNDeletionsInProgress(1);
for (ActionFuture<AcknowledgedResponse> deleteResponse : deleteResponses) {
assertFalse(deleteResponse.isDone());
}
awaitNDeletionsInProgress(1);
unblockNode(repoName, masterName);
unblockNode(repoName, clusterManagerName);
for (ActionFuture<AcknowledgedResponse> deleteResponse : deleteResponses) {
assertAcked(deleteResponse.get());
}
@ -1102,7 +1102,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
}
public void testStatusMultipleSnapshotsMultipleRepos() throws Exception {
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
// We're blocking a some of the snapshot threads when we block the first repo below so we have to make sure we have enough threads
// left for the second concurrent snapshot.
final String dataNode = startDataNodeWithLargeSnapshotPool();
@ -1146,7 +1146,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
}
public void testInterleavedAcrossMultipleRepos() throws Exception {
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
// We're blocking a some of the snapshot threads when we block the first repo below so we have to make sure we have enough threads
// left for the second concurrent snapshot.
final String dataNode = startDataNodeWithLargeSnapshotPool();
@ -1219,7 +1219,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
}
public void testConcurrentOperationsLimit() throws Exception {
final String masterName = internalCluster().startMasterOnlyNode();
final String clusterManagerName = internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode();
final String repoName = "test-repo";
createRepository(repoName, "mock");
@ -1237,7 +1237,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
);
final List<String> snapshotNames = createNSnapshots(repoName, limitToTest + 1);
blockNodeOnAnyFiles(repoName, masterName);
blockNodeOnAnyFiles(repoName, clusterManagerName);
int blockedSnapshots = 0;
boolean blockedDelete = false;
final List<ActionFuture<CreateSnapshotResponse>> snapshotFutures = new ArrayList<>();
@ -1255,7 +1255,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
if (blockedDelete) {
awaitNDeletionsInProgress(1);
}
waitForBlock(masterName, repoName, TimeValue.timeValueSeconds(30L));
waitForBlock(clusterManagerName, repoName, TimeValue.timeValueSeconds(30L));
final String expectedFailureMessage = "Cannot start another operation, already running ["
+ limitToTest
@ -1275,7 +1275,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
assertThat(csen2.getMessage(), containsString(expectedFailureMessage));
}
unblockNode(repoName, masterName);
unblockNode(repoName, clusterManagerName);
if (deleteFuture != null) {
assertAcked(deleteFuture.get());
}
@ -1285,7 +1285,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
}
public void testConcurrentSnapshotWorksWithOldVersionRepo() throws Exception {
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
final String dataNode = internalCluster().startDataOnlyNode();
final String repoName = "test-repo";
final Path repoPath = randomRepoPath();
@ -1322,23 +1322,23 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
}
public void testQueuedDeleteAfterFinalizationFailure() throws Exception {
final String masterNode = internalCluster().startMasterOnlyNode();
final String clusterManagerNode = internalCluster().startClusterManagerOnlyNode();
final String repoName = "test-repo";
createRepository(repoName, "mock");
blockMasterFromFinalizingSnapshotOnIndexFile(repoName);
final String snapshotName = "snap-1";
final ActionFuture<CreateSnapshotResponse> snapshotFuture = startFullSnapshot(repoName, snapshotName);
waitForBlock(masterNode, repoName, TimeValue.timeValueSeconds(30L));
waitForBlock(clusterManagerNode, repoName, TimeValue.timeValueSeconds(30L));
final ActionFuture<AcknowledgedResponse> deleteFuture = startDeleteSnapshot(repoName, snapshotName);
awaitNDeletionsInProgress(1);
unblockNode(repoName, masterNode);
unblockNode(repoName, clusterManagerNode);
assertAcked(deleteFuture.get());
final SnapshotException sne = expectThrows(SnapshotException.class, snapshotFuture::actionGet);
assertThat(sne.getCause().getMessage(), containsString("exception after block"));
}
public void testAbortNotStartedSnapshotWithoutIO() throws Exception {
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
final String dataNode = internalCluster().startDataOnlyNode();
final String repoName = "test-repo";
createRepository(repoName, "mock");
@ -1365,7 +1365,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
}
public void testStartWithSuccessfulShardSnapshotPendingFinalization() throws Exception {
final String masterName = internalCluster().startMasterOnlyNode();
final String clusterManagerName = internalCluster().startClusterManagerOnlyNode();
final String dataNode = internalCluster().startDataOnlyNode();
final String repoName = "test-repo";
createRepository(repoName, "mock");
@ -1375,13 +1375,13 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
blockMasterOnWriteIndexFile(repoName);
final ActionFuture<CreateSnapshotResponse> blockedSnapshot = startFullSnapshot(repoName, "snap-blocked");
waitForBlock(masterName, repoName, TimeValue.timeValueSeconds(30L));
waitForBlock(clusterManagerName, repoName, TimeValue.timeValueSeconds(30L));
awaitNumberOfSnapshotsInProgress(1);
blockNodeOnAnyFiles(repoName, dataNode);
final ActionFuture<CreateSnapshotResponse> otherSnapshot = startFullSnapshot(repoName, "other-snapshot");
awaitNumberOfSnapshotsInProgress(2);
assertFalse(blockedSnapshot.isDone());
unblockNode(repoName, masterName);
unblockNode(repoName, clusterManagerName);
awaitNumberOfSnapshotsInProgress(1);
awaitMasterFinishRepoOperations();

View File

@ -926,7 +926,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
*/
public void testRestoreShrinkIndex() throws Exception {
logger.info("--> starting a cluster-manager node and a data node");
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode();
final String repo = "test-repo";
@ -1145,7 +1145,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
public void testDataNodeRestartWithBusyMasterDuringSnapshot() throws Exception {
logger.info("--> starting a cluster-manager node and two data nodes");
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNodes(2);
final Path repoPath = randomRepoPath();
createRepository("test-repo", "mock", repoPath);
@ -1201,7 +1201,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
public void testDataNodeRestartAfterShardSnapshotFailure() throws Exception {
logger.info("--> starting a cluster-manager node and two data nodes");
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
final List<String> dataNodes = internalCluster().startDataOnlyNodes(2);
final Path repoPath = randomRepoPath();
createRepository("test-repo", "mock", repoPath);
@ -1323,7 +1323,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
}
public void testAbortWaitsOnDataNode() throws Exception {
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
final String dataNodeName = internalCluster().startDataOnlyNode();
final String indexName = "test-index";
createIndex(indexName);
@ -1375,7 +1375,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
}
public void testPartialSnapshotAllShardsMissing() throws Exception {
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
final String dataNode = internalCluster().startDataOnlyNode();
final String repoName = "test-repo";
createRepository(repoName, "fs");
@ -1393,7 +1393,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
* correctly by testing a snapshot name collision.
*/
public void testCreateSnapshotLegacyPath() throws Exception {
final String masterNode = internalCluster().startMasterOnlyNode();
final String clusterManagerNode = internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode();
final String repoName = "test-repo";
createRepository(repoName, "fs");
@ -1403,7 +1403,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
final Snapshot snapshot1 = PlainActionFuture.get(
f -> snapshotsService.createSnapshotLegacy(new CreateSnapshotRequest(repoName, "snap-1"), f)
);
awaitNoMoreRunningOperations(masterNode);
awaitNoMoreRunningOperations(clusterManagerNode);
final InvalidSnapshotNameException sne = expectThrows(
InvalidSnapshotNameException.class,
@ -1425,7 +1425,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
}
public void testSnapshotDeleteRelocatingPrimaryIndex() throws Exception {
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
final List<String> dataNodes = internalCluster().startDataOnlyNodes(2);
final String repoName = "test-repo";
createRepository(repoName, "fs");

View File

@ -107,13 +107,13 @@ public class MultiClusterRepoAccessIT extends AbstractSnapshotIntegTestCase {
}
public void testConcurrentDeleteFromOtherCluster() throws InterruptedException {
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode();
final String repoNameOnFirstCluster = "test-repo";
final String repoNameOnSecondCluster = randomBoolean() ? "test-repo" : "other-repo";
createRepository(repoNameOnFirstCluster, "fs", repoPath);
secondCluster.startMasterOnlyNode();
secondCluster.startClusterManagerOnlyNode();
secondCluster.startDataOnlyNode();
secondCluster.client()
.admin()

View File

@ -60,7 +60,7 @@ public class SnapshotShardsServiceIT extends AbstractSnapshotIntegTestCase {
}
public void testRetryPostingSnapshotStatusMessages() throws Exception {
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode();
createRepository("test-repo", "mock");

View File

@ -83,7 +83,7 @@ public class DelayedAllocationServiceTests extends OpenSearchAllocationTestCase
threadPool = new TestThreadPool(getTestName());
clusterService = mock(ClusterService.class);
allocationService = createAllocationService(Settings.EMPTY, new DelayedShardsMockGatewayAllocator());
when(clusterService.getSettings()).thenReturn(NodeRoles.masterOnlyNode());
when(clusterService.getSettings()).thenReturn(NodeRoles.clusterManagerOnlyNode());
delayedAllocationService = new TestDelayAllocationService(threadPool, clusterService, allocationService);
verify(clusterService).addListener(delayedAllocationService);
verify(clusterService).getSettings();

View File

@ -737,7 +737,7 @@ public class AllocationCommandsTests extends OpenSearchAllocationTestCase {
"test1",
buildNewFakeTransportAddress(),
emptyMap(),
MASTER_DATA_ROLES,
CLUSTER_MANAGER_DATA_ROLES,
Version.CURRENT
);
DiscoveryNode node2 = new DiscoveryNode(
@ -808,7 +808,7 @@ public class AllocationCommandsTests extends OpenSearchAllocationTestCase {
"test1",
buildNewFakeTransportAddress(),
emptyMap(),
MASTER_DATA_ROLES,
CLUSTER_MANAGER_DATA_ROLES,
Version.CURRENT
);
DiscoveryNode node2 = new DiscoveryNode(

View File

@ -338,21 +338,21 @@ public class NodeVersionAllocationDeciderTests extends OpenSearchAllocationTestC
"newNode",
buildNewFakeTransportAddress(),
emptyMap(),
MASTER_DATA_ROLES,
CLUSTER_MANAGER_DATA_ROLES,
Version.CURRENT
);
final DiscoveryNode oldNode1 = new DiscoveryNode(
"oldNode1",
buildNewFakeTransportAddress(),
emptyMap(),
MASTER_DATA_ROLES,
CLUSTER_MANAGER_DATA_ROLES,
VersionUtils.getPreviousVersion()
);
final DiscoveryNode oldNode2 = new DiscoveryNode(
"oldNode2",
buildNewFakeTransportAddress(),
emptyMap(),
MASTER_DATA_ROLES,
CLUSTER_MANAGER_DATA_ROLES,
VersionUtils.getPreviousVersion()
);
AllocationId allocationId1P = AllocationId.newInitializing();
@ -457,21 +457,21 @@ public class NodeVersionAllocationDeciderTests extends OpenSearchAllocationTestC
"newNode",
buildNewFakeTransportAddress(),
emptyMap(),
MASTER_DATA_ROLES,
CLUSTER_MANAGER_DATA_ROLES,
Version.CURRENT
);
final DiscoveryNode oldNode1 = new DiscoveryNode(
"oldNode1",
buildNewFakeTransportAddress(),
emptyMap(),
MASTER_DATA_ROLES,
CLUSTER_MANAGER_DATA_ROLES,
VersionUtils.getPreviousVersion()
);
final DiscoveryNode oldNode2 = new DiscoveryNode(
"oldNode2",
buildNewFakeTransportAddress(),
emptyMap(),
MASTER_DATA_ROLES,
CLUSTER_MANAGER_DATA_ROLES,
VersionUtils.getPreviousVersion()
);

View File

@ -96,7 +96,7 @@ public class SameShardRoutingTests extends OpenSearchAllocationTestCase {
"test1",
buildNewFakeTransportAddress(),
emptyMap(),
MASTER_DATA_ROLES,
CLUSTER_MANAGER_DATA_ROLES,
Version.CURRENT
)
)
@ -109,7 +109,7 @@ public class SameShardRoutingTests extends OpenSearchAllocationTestCase {
"test1",
buildNewFakeTransportAddress(),
emptyMap(),
MASTER_DATA_ROLES,
CLUSTER_MANAGER_DATA_ROLES,
Version.CURRENT
)
)
@ -138,7 +138,7 @@ public class SameShardRoutingTests extends OpenSearchAllocationTestCase {
"test2",
buildNewFakeTransportAddress(),
emptyMap(),
MASTER_DATA_ROLES,
CLUSTER_MANAGER_DATA_ROLES,
Version.CURRENT
)
)

View File

@ -921,14 +921,14 @@ public class DiskThresholdDeciderTests extends OpenSearchAllocationTestCase {
"node1",
buildNewFakeTransportAddress(),
emptyMap(),
MASTER_DATA_ROLES,
CLUSTER_MANAGER_DATA_ROLES,
Version.CURRENT
);
DiscoveryNode discoveryNode2 = new DiscoveryNode(
"node2",
buildNewFakeTransportAddress(),
emptyMap(),
MASTER_DATA_ROLES,
CLUSTER_MANAGER_DATA_ROLES,
Version.CURRENT
);
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(discoveryNode1).add(discoveryNode2).build();

View File

@ -181,8 +181,8 @@ public class IncrementalClusterStateWriterTests extends OpenSearchAllocationTest
private DiscoveryNodes.Builder generateDiscoveryNodes(boolean masterEligible) {
Set<DiscoveryNodeRole> dataOnlyRoles = Collections.singleton(DiscoveryNodeRole.DATA_ROLE);
return DiscoveryNodes.builder()
.add(newNode("node1", masterEligible ? MASTER_DATA_ROLES : dataOnlyRoles))
.add(newNode("master_node", MASTER_DATA_ROLES))
.add(newNode("node1", masterEligible ? CLUSTER_MANAGER_DATA_ROLES : dataOnlyRoles))
.add(newNode("master_node", CLUSTER_MANAGER_DATA_ROLES))
.localNodeId("node1")
.masterNodeId(masterEligible ? "node1" : "master_node");
}

View File

@ -62,7 +62,7 @@ import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.BiFunction;
import static org.opensearch.test.NodeRoles.masterOnlyNode;
import static org.opensearch.test.NodeRoles.clusterManagerOnlyNode;
import static org.opensearch.test.NodeRoles.nonMasterNode;
import static org.opensearch.test.NodeRoles.removeRoles;
import static org.hamcrest.Matchers.either;
@ -553,7 +553,7 @@ public class RemoteClusterServiceTests extends OpenSearchTestCase {
final Settings settings = Settings.EMPTY;
final List<DiscoveryNode> knownNodes = new CopyOnWriteArrayList<>();
final Settings data = nonMasterNode();
final Settings dedicatedMaster = masterOnlyNode();
final Settings dedicatedMaster = clusterManagerOnlyNode();
try (
MockTransportService c1N1 = startTransport("cluster_1_node_1", knownNodes, Version.CURRENT, dedicatedMaster);
MockTransportService c1N2 = startTransport("cluster_1_node_2", knownNodes, Version.CURRENT, data);

View File

@ -97,7 +97,7 @@ public class ClusterStateCreationUtils {
numberOfNodes++;
}
}
numberOfNodes = Math.max(2, numberOfNodes); // we need a non-local master to test shard failures
numberOfNodes = Math.max(2, numberOfNodes); // we need a non-local cluster-manager to test shard failures
final ShardId shardId = new ShardId(index, "_na_", 0);
DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder();
Set<String> unassignedNodes = new HashSet<>();
@ -107,7 +107,7 @@ public class ClusterStateCreationUtils {
unassignedNodes.add(node.getId());
}
discoBuilder.localNodeId(newNode(0).getId());
discoBuilder.masterNodeId(newNode(1).getId()); // we need a non-local master to test shard failures
discoBuilder.masterNodeId(newNode(1).getId()); // we need a non-local cluster-manager to test shard failures
final int primaryTerm = 1 + randomInt(200);
IndexMetadata indexMetadata = IndexMetadata.builder(index)
.settings(
@ -284,14 +284,14 @@ public class ClusterStateCreationUtils {
*/
public static ClusterState stateWithAssignedPrimariesAndOneReplica(String index, int numberOfShards) {
int numberOfNodes = 2; // we need a non-local master to test shard failures
int numberOfNodes = 2; // we need a non-local cluster-manager to test shard failures
DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder();
for (int i = 0; i < numberOfNodes + 1; i++) {
final DiscoveryNode node = newNode(i);
discoBuilder = discoBuilder.add(node);
}
discoBuilder.localNodeId(newNode(0).getId());
discoBuilder.masterNodeId(newNode(1).getId()); // we need a non-local master to test shard failures
discoBuilder.masterNodeId(newNode(1).getId()); // we need a non-local cluster-manager to test shard failures
IndexMetadata indexMetadata = IndexMetadata.builder(index)
.settings(
Settings.builder()
@ -426,20 +426,20 @@ public class ClusterStateCreationUtils {
}
/**
* Creates a cluster state where local node and master node can be specified
* Creates a cluster state where local node and cluster-manager node can be specified
*
* @param localNode node in allNodes that is the local node
* @param masterNode node in allNodes that is the master node. Can be null if no cluster-manager exists
* @param clusterManagerNode node in allNodes that is the cluster-manager node. Can be null if no cluster-manager exists
* @param allNodes all nodes in the cluster
* @return cluster state
*/
public static ClusterState state(DiscoveryNode localNode, DiscoveryNode masterNode, DiscoveryNode... allNodes) {
public static ClusterState state(DiscoveryNode localNode, DiscoveryNode clusterManagerNode, DiscoveryNode... allNodes) {
DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder();
for (DiscoveryNode node : allNodes) {
discoBuilder.add(node);
}
if (masterNode != null) {
discoBuilder.masterNodeId(masterNode.getId());
if (clusterManagerNode != null) {
discoBuilder.masterNodeId(clusterManagerNode.getId());
}
discoBuilder.localNodeId(localNode.getId());

View File

@ -148,7 +148,7 @@ public abstract class OpenSearchAllocationTestCase extends OpenSearchTestCase {
return new AllocationDeciders(deciders);
}
protected static Set<DiscoveryNodeRole> MASTER_DATA_ROLES = Collections.unmodifiableSet(
protected static Set<DiscoveryNodeRole> CLUSTER_MANAGER_DATA_ROLES = Collections.unmodifiableSet(
new HashSet<>(Arrays.asList(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE, DiscoveryNodeRole.DATA_ROLE))
);
@ -157,11 +157,11 @@ public abstract class OpenSearchAllocationTestCase extends OpenSearchTestCase {
}
protected static DiscoveryNode newNode(String nodeName, String nodeId, Map<String, String> attributes) {
return new DiscoveryNode(nodeName, nodeId, buildNewFakeTransportAddress(), attributes, MASTER_DATA_ROLES, Version.CURRENT);
return new DiscoveryNode(nodeName, nodeId, buildNewFakeTransportAddress(), attributes, CLUSTER_MANAGER_DATA_ROLES, Version.CURRENT);
}
protected static DiscoveryNode newNode(String nodeId, Map<String, String> attributes) {
return new DiscoveryNode(nodeId, buildNewFakeTransportAddress(), attributes, MASTER_DATA_ROLES, Version.CURRENT);
return new DiscoveryNode(nodeId, buildNewFakeTransportAddress(), attributes, CLUSTER_MANAGER_DATA_ROLES, Version.CURRENT);
}
protected static DiscoveryNode newNode(String nodeId, Set<DiscoveryNodeRole> roles) {
@ -169,7 +169,7 @@ public abstract class OpenSearchAllocationTestCase extends OpenSearchTestCase {
}
protected static DiscoveryNode newNode(String nodeId, Version version) {
return new DiscoveryNode(nodeId, buildNewFakeTransportAddress(), emptyMap(), MASTER_DATA_ROLES, version);
return new DiscoveryNode(nodeId, buildNewFakeTransportAddress(), emptyMap(), CLUSTER_MANAGER_DATA_ROLES, version);
}
protected static ClusterState startRandomInitializingShard(ClusterState clusterState, AllocationService strategy) {

View File

@ -217,13 +217,13 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
}
// Updating the cluster state involves up to 7 delays:
// 1. submit the task to the master service
// 1. submit the task to the cluster-manager service
// 2. send PublishRequest
// 3. receive PublishResponse
// 4. send ApplyCommitRequest
// 5. apply committed cluster state
// 6. receive ApplyCommitResponse
// 7. apply committed state on master (last one to apply cluster state)
// 7. apply committed state on cluster-manager (last one to apply cluster state)
public static final long DEFAULT_CLUSTER_STATE_UPDATE_DELAY = 7 * DEFAULT_DELAY_VARIABILITY;
private static final int ELECTION_RETRIES = 10;
@ -288,11 +288,11 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
this(initialNodeCount, true, Settings.EMPTY);
}
Cluster(int initialNodeCount, boolean allNodesMasterEligible, Settings nodeSettings) {
this(initialNodeCount, allNodesMasterEligible, nodeSettings, () -> new StatusInfo(HEALTHY, "healthy-info"));
Cluster(int initialNodeCount, boolean allNodesclusterManagerEligible, Settings nodeSettings) {
this(initialNodeCount, allNodesclusterManagerEligible, nodeSettings, () -> new StatusInfo(HEALTHY, "healthy-info"));
}
Cluster(int initialNodeCount, boolean allNodesMasterEligible, Settings nodeSettings, NodeHealthService nodeHealthService) {
Cluster(int initialNodeCount, boolean allNodesClusterManagerEligible, Settings nodeSettings, NodeHealthService nodeHealthService) {
this.nodeHealthService = nodeHealthService;
bigArrays = usually()
? BigArrays.NON_RECYCLING_INSTANCE
@ -301,29 +301,29 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
assertThat(initialNodeCount, greaterThan(0));
final Set<String> masterEligibleNodeIds = new HashSet<>(initialNodeCount);
final Set<String> clusterManagerEligibleNodeIds = new HashSet<>(initialNodeCount);
clusterNodes = new ArrayList<>(initialNodeCount);
for (int i = 0; i < initialNodeCount; i++) {
final ClusterNode clusterNode = new ClusterNode(
nextNodeIndex.getAndIncrement(),
allNodesMasterEligible || i == 0 || randomBoolean(),
allNodesClusterManagerEligible || i == 0 || randomBoolean(),
nodeSettings,
nodeHealthService
);
clusterNodes.add(clusterNode);
if (clusterNode.getLocalNode().isMasterNode()) {
masterEligibleNodeIds.add(clusterNode.getId());
clusterManagerEligibleNodeIds.add(clusterNode.getId());
}
}
initialConfiguration = new VotingConfiguration(
new HashSet<>(randomSubsetOf(randomIntBetween(1, masterEligibleNodeIds.size()), masterEligibleNodeIds))
new HashSet<>(randomSubsetOf(randomIntBetween(1, clusterManagerEligibleNodeIds.size()), clusterManagerEligibleNodeIds))
);
logger.info(
"--> creating cluster of {} nodes (cluster-manager-eligible nodes: {}) with initial configuration {}",
initialNodeCount,
masterEligibleNodeIds,
clusterManagerEligibleNodeIds,
initialConfiguration
);
}
@ -336,7 +336,7 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
addNodes(newNodesCount);
stabilise(
// The first pinging discovers the master
// The first pinging discovers the cluster-manager
defaultMillis(DISCOVERY_FIND_PEERS_INTERVAL_SETTING)
// One message delay to send a join
+ DEFAULT_DELAY_VARIABILITY
@ -557,7 +557,7 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
final ClusterNode leader = getAnyLeader();
final long leaderTerm = leader.coordinator.getCurrentTerm();
final int pendingTaskCount = leader.masterService.getFakeMasterServicePendingTaskCount();
final int pendingTaskCount = leader.clusterManagerService.getFakeMasterServicePendingTaskCount();
runFor((pendingTaskCount + 1) * DEFAULT_CLUSTER_STATE_UPDATE_DELAY, "draining task queue");
final Matcher<Long> isEqualToLeaderVersion = equalTo(leader.coordinator.getLastAcceptedState().getVersion());
@ -566,7 +566,7 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
assertTrue(leaderId + " has been bootstrapped", leader.coordinator.isInitialConfigurationSet());
assertTrue(leaderId + " exists in its last-applied state", leader.getLastAppliedClusterState().getNodes().nodeExists(leaderId));
assertThat(
leaderId + " has no NO_MASTER_BLOCK",
leaderId + " has no NO_CLUSTER_MANAGER_BLOCK",
leader.getLastAppliedClusterState().blocks().hasGlobalBlockWithId(NO_MASTER_BLOCK_ID),
equalTo(false)
);
@ -616,12 +616,12 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
);
assertTrue(nodeId + " has been bootstrapped", clusterNode.coordinator.isInitialConfigurationSet());
assertThat(
nodeId + " has correct master",
nodeId + " has correct cluster-manager",
clusterNode.getLastAppliedClusterState().nodes().getMasterNode(),
equalTo(leader.getLocalNode())
);
assertThat(
nodeId + " has no NO_MASTER_BLOCK",
nodeId + " has no NO_CLUSTER_MANAGER_BLOCK",
clusterNode.getLastAppliedClusterState().blocks().hasGlobalBlockWithId(NO_MASTER_BLOCK_ID),
equalTo(false)
);
@ -638,7 +638,7 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
nullValue()
);
assertThat(
nodeId + " has NO_MASTER_BLOCK",
nodeId + " has NO_CLUSTER_MANAGER_BLOCK",
clusterNode.getLastAppliedClusterState().blocks().hasGlobalBlockWithId(NO_MASTER_BLOCK_ID),
equalTo(true)
);
@ -898,7 +898,7 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
final long persistedCurrentTerm;
if ( // node is master-ineligible either before or after the restart ...
if ( // node is cluster-manager-ineligible either before or after the restart ...
(oldState.getLastAcceptedState().nodes().getLocalNode().isMasterNode() && newLocalNode.isMasterNode()) == false
// ... and it's accepted some non-initial state so we can roll back ...
&& (oldState.getLastAcceptedState().term() > 0L || oldState.getLastAcceptedState().version() > 0L)
@ -930,7 +930,7 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
final long newValue = randomLong();
logger.trace(
"rolling back persisted cluster state on master-ineligible node [{}]: "
"rolling back persisted cluster state on cluster-manager-ineligible node [{}]: "
+ "previously currentTerm={}, lastAcceptedTerm={}, lastAcceptedVersion={} "
+ "but now currentTerm={}, lastAcceptedTerm={}, lastAcceptedVersion={}",
newLocalNode,
@ -1025,7 +1025,7 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
private final DiscoveryNode localNode;
final MockPersistedState persistedState;
final Settings nodeSettings;
private AckedFakeThreadPoolMasterService masterService;
private AckedFakeThreadPoolClusterManagerService clusterManagerService;
private DisruptableClusterApplierService clusterApplierService;
private ClusterService clusterService;
TransportService transportService;
@ -1033,10 +1033,10 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
private NodeHealthService nodeHealthService;
List<BiConsumer<DiscoveryNode, ClusterState>> extraJoinValidators = new ArrayList<>();
ClusterNode(int nodeIndex, boolean masterEligible, Settings nodeSettings, NodeHealthService nodeHealthService) {
ClusterNode(int nodeIndex, boolean clusterManagerEligible, Settings nodeSettings, NodeHealthService nodeHealthService) {
this(
nodeIndex,
createDiscoveryNode(nodeIndex, masterEligible),
createDiscoveryNode(nodeIndex, clusterManagerEligible),
defaultPersistedStateSupplier,
nodeSettings,
nodeHealthService
@ -1105,7 +1105,7 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
null,
emptySet()
);
masterService = new AckedFakeThreadPoolMasterService(
clusterManagerService = new AckedFakeThreadPoolClusterManagerService(
localNode.getId(),
"test",
threadPool,
@ -1119,7 +1119,7 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
deterministicTaskQueue,
threadPool
);
clusterService = new ClusterService(settings, clusterSettings, masterService, clusterApplierService);
clusterService = new ClusterService(settings, clusterSettings, clusterManagerService, clusterApplierService);
clusterService.setNodeConnectionsService(
new NodeConnectionsService(clusterService.getSettings(), threadPool, transportService)
);
@ -1134,7 +1134,7 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
transportService,
writableRegistry(),
allocationService,
masterService,
clusterManagerService,
this::getPersistedState,
Cluster.this::provideSeedHosts,
clusterApplierService,
@ -1144,7 +1144,7 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
getElectionStrategy(),
nodeHealthService
);
masterService.setClusterStatePublisher(coordinator);
clusterManagerService.setClusterStatePublisher(coordinator);
final GatewayService gatewayService = new GatewayService(
settings,
allocationService,
@ -1261,7 +1261,7 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
void submitSetAutoShrinkVotingConfiguration(final boolean autoShrinkVotingConfiguration) {
submitUpdateTask(
"set master nodes failure tolerance [" + autoShrinkVotingConfiguration + "]",
"set cluster-manager nodes failure tolerance [" + autoShrinkVotingConfiguration + "]",
cs -> ClusterState.builder(cs)
.metadata(
Metadata.builder(cs.metadata())
@ -1331,11 +1331,11 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
onNode(() -> {
logger.trace("[{}] submitUpdateTask: enqueueing [{}]", localNode.getId(), source);
final long submittedTerm = coordinator.getCurrentTerm();
masterService.submitStateUpdateTask(source, new ClusterStateUpdateTask() {
clusterManagerService.submitStateUpdateTask(source, new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
assertThat(currentState.term(), greaterThanOrEqualTo(submittedTerm));
masterService.nextAckCollector = ackCollector;
clusterManagerService.nextAckCollector = ackCollector;
return clusterStateUpdate.apply(currentState);
}
@ -1347,7 +1347,7 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
@Override
public void onNoLongerMaster(String source) {
logger.trace("no longer master: [{}]", source);
logger.trace("no longer cluster-manager: [{}]", source);
taskListener.onNoLongerMaster(source);
}
@ -1510,11 +1510,11 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
}
}
static class AckedFakeThreadPoolMasterService extends FakeThreadPoolMasterService {
static class AckedFakeThreadPoolClusterManagerService extends FakeThreadPoolMasterService {
AckCollector nextAckCollector = new AckCollector();
AckedFakeThreadPoolMasterService(
AckedFakeThreadPoolClusterManagerService(
String nodeName,
String serviceName,
ThreadPool threadPool,
@ -1610,7 +1610,7 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
}
}
protected DiscoveryNode createDiscoveryNode(int nodeIndex, boolean masterEligible) {
protected DiscoveryNode createDiscoveryNode(int nodeIndex, boolean clusterManagerEligible) {
final TransportAddress address = buildNewFakeTransportAddress();
return new DiscoveryNode(
"",
@ -1620,7 +1620,7 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
address.getAddress(),
address,
Collections.emptyMap(),
masterEligible ? DiscoveryNodeRole.BUILT_IN_ROLES : emptySet(),
clusterManagerEligible ? DiscoveryNodeRole.BUILT_IN_ROLES : emptySet(),
Version.CURRENT
);
}

View File

@ -285,11 +285,11 @@ public class CoordinationStateTestCluster {
} else if (rarely() && rarely()) {
randomFrom(clusterNodes).reboot();
} else if (rarely()) {
final List<ClusterNode> masterNodes = clusterNodes.stream()
final List<ClusterNode> clusterManangerNodes = clusterNodes.stream()
.filter(cn -> cn.state.electionWon())
.collect(Collectors.toList());
if (masterNodes.isEmpty() == false) {
final ClusterNode clusterNode = randomFrom(masterNodes);
if (clusterManangerNodes.isEmpty() == false) {
final ClusterNode clusterNode = randomFrom(clusterManangerNodes);
final long term = rarely() ? randomLongBetween(0, maxTerm + 1) : clusterNode.state.getCurrentTerm();
final long version = rarely() ? randomIntBetween(0, 5) : clusterNode.state.getLastPublishedVersion() + 1;
final CoordinationMetadata.VotingConfiguration acceptedConfig = rarely()
@ -323,13 +323,15 @@ public class CoordinationStateTestCluster {
}
void invariant() {
// one master per term
// one cluster-manager per term
messages.stream()
.filter(m -> m.payload instanceof PublishRequest)
.collect(Collectors.groupingBy(m -> ((PublishRequest) m.payload).getAcceptedState().term()))
.forEach((term, publishMessages) -> {
Set<DiscoveryNode> mastersForTerm = publishMessages.stream().collect(Collectors.groupingBy(m -> m.sourceNode)).keySet();
assertThat("Multiple masters " + mastersForTerm + " for term " + term, mastersForTerm, hasSize(1));
Set<DiscoveryNode> clusterManagersForTerm = publishMessages.stream()
.collect(Collectors.groupingBy(m -> m.sourceNode))
.keySet();
assertThat("Multiple cluster-managers " + clusterManagersForTerm + " for term " + term, clusterManagersForTerm, hasSize(1));
});
// unique cluster state per (term, version) pair

View File

@ -116,7 +116,7 @@ public class FakeThreadPoolMasterService extends MasterService {
onTaskAvailableToRun.accept(new Runnable() {
@Override
public String toString() {
return "master service scheduling next task";
return "cluster-manager service scheduling next task";
}
@Override
@ -125,7 +125,7 @@ public class FakeThreadPoolMasterService extends MasterService {
assert waitForPublish == false;
assert scheduledNextTask;
final int taskIndex = randomInt(pendingTasks.size() - 1);
logger.debug("next master service task: choosing task {} of {}", taskIndex, pendingTasks.size());
logger.debug("next cluster-manager service task: choosing task {} of {}", taskIndex, pendingTasks.size());
final Runnable task = pendingTasks.remove(taskIndex);
taskInProgress = true;
scheduledNextTask = false;

View File

@ -66,21 +66,22 @@ public class MockBigArrays extends BigArrays {
private static final ConcurrentMap<Object, Object> ACQUIRED_ARRAYS = new ConcurrentHashMap<>();
public static void ensureAllArraysAreReleased() throws Exception {
final Map<Object, Object> masterCopy = new HashMap<>(ACQUIRED_ARRAYS);
if (!masterCopy.isEmpty()) {
final Map<Object, Object> clusterManagerCopy = new HashMap<>(ACQUIRED_ARRAYS);
if (!clusterManagerCopy.isEmpty()) {
// not empty, we might be executing on a shared cluster that keeps on obtaining
// and releasing arrays, lets make sure that after a reasonable timeout, all master
// and releasing arrays, lets make sure that after a reasonable timeout, all cluster-manager
// copy (snapshot) have been released
try {
assertBusy(() -> assertTrue(Sets.haveEmptyIntersection(masterCopy.keySet(), ACQUIRED_ARRAYS.keySet())));
assertBusy(() -> assertTrue(Sets.haveEmptyIntersection(clusterManagerCopy.keySet(), ACQUIRED_ARRAYS.keySet())));
} catch (AssertionError ex) {
masterCopy.keySet().retainAll(ACQUIRED_ARRAYS.keySet());
ACQUIRED_ARRAYS.keySet().removeAll(masterCopy.keySet()); // remove all existing master copy we will report on
if (!masterCopy.isEmpty()) {
Iterator<Object> causes = masterCopy.values().iterator();
clusterManagerCopy.keySet().retainAll(ACQUIRED_ARRAYS.keySet());
// remove all existing cluster-manager copy we will report on
ACQUIRED_ARRAYS.keySet().removeAll(clusterManagerCopy.keySet());
if (!clusterManagerCopy.isEmpty()) {
Iterator<Object> causes = clusterManagerCopy.values().iterator();
Object firstCause = causes.next();
RuntimeException exception = new RuntimeException(
masterCopy.size() + " arrays have not been released",
clusterManagerCopy.size() + " arrays have not been released",
firstCause instanceof Throwable ? (Throwable) firstCause : null
);
while (causes.hasNext()) {

View File

@ -53,19 +53,23 @@ public class MockPageCacheRecycler extends PageCacheRecycler {
private static final ConcurrentMap<Object, Throwable> ACQUIRED_PAGES = new ConcurrentHashMap<>();
public static void ensureAllPagesAreReleased() throws Exception {
final Map<Object, Throwable> masterCopy = new HashMap<>(ACQUIRED_PAGES);
if (!masterCopy.isEmpty()) {
final Map<Object, Throwable> clusterManagerCopy = new HashMap<>(ACQUIRED_PAGES);
if (!clusterManagerCopy.isEmpty()) {
// not empty, we might be executing on a shared cluster that keeps on obtaining
// and releasing pages, lets make sure that after a reasonable timeout, all master
// and releasing pages, lets make sure that after a reasonable timeout, all cluster-manager
// copy (snapshot) have been released
final boolean success = waitUntil(() -> Sets.haveEmptyIntersection(masterCopy.keySet(), ACQUIRED_PAGES.keySet()));
final boolean success = waitUntil(() -> Sets.haveEmptyIntersection(clusterManagerCopy.keySet(), ACQUIRED_PAGES.keySet()));
if (!success) {
masterCopy.keySet().retainAll(ACQUIRED_PAGES.keySet());
ACQUIRED_PAGES.keySet().removeAll(masterCopy.keySet()); // remove all existing master copy we will report on
if (!masterCopy.isEmpty()) {
Iterator<Throwable> causes = masterCopy.values().iterator();
clusterManagerCopy.keySet().retainAll(ACQUIRED_PAGES.keySet());
// remove all existing cluster-manager copy we will report on
ACQUIRED_PAGES.keySet().removeAll(clusterManagerCopy.keySet());
if (!clusterManagerCopy.isEmpty()) {
Iterator<Throwable> causes = clusterManagerCopy.values().iterator();
Throwable firstCause = causes.next();
RuntimeException exception = new RuntimeException(masterCopy.size() + " pages have not been released", firstCause);
RuntimeException exception = new RuntimeException(
clusterManagerCopy.size() + " pages have not been released",
firstCause
);
while (causes.hasNext()) {
exception.addSuppressed(causes.next());
}

View File

@ -410,7 +410,7 @@ public final class BlobStoreTestUtil {
final ClusterService clusterService = mock(ClusterService.class);
final ClusterApplierService clusterApplierService = mock(ClusterApplierService.class);
when(clusterService.getClusterApplierService()).thenReturn(clusterApplierService);
// Setting local node as master so it may update the repository metadata in the cluster state
// Setting local node as cluster-manager so it may update the repository metadata in the cluster state
final DiscoveryNode localNode = new DiscoveryNode("", buildNewFakeTransportAddress(), Version.CURRENT);
final AtomicReference<ClusterState> currentState = new AtomicReference<>(
ClusterState.builder(initialState)

View File

@ -252,30 +252,30 @@ public abstract class AbstractSnapshotIntegTestCase extends OpenSearchIntegTestC
}
public static String blockMasterFromFinalizingSnapshotOnIndexFile(final String repositoryName) {
final String masterName = internalCluster().getMasterName();
((MockRepository) internalCluster().getInstance(RepositoriesService.class, masterName).repository(repositoryName))
final String clusterManagerName = internalCluster().getMasterName();
((MockRepository) internalCluster().getInstance(RepositoriesService.class, clusterManagerName).repository(repositoryName))
.setBlockAndFailOnWriteIndexFile();
return masterName;
return clusterManagerName;
}
public static String blockMasterOnWriteIndexFile(final String repositoryName) {
final String masterName = internalCluster().getMasterName();
final String clusterManagerName = internalCluster().getMasterName();
((MockRepository) internalCluster().getMasterNodeInstance(RepositoriesService.class).repository(repositoryName))
.setBlockOnWriteIndexFile();
return masterName;
return clusterManagerName;
}
public static void blockMasterFromDeletingIndexNFile(String repositoryName) {
final String masterName = internalCluster().getMasterName();
((MockRepository) internalCluster().getInstance(RepositoriesService.class, masterName).repository(repositoryName))
final String clusterManagerName = internalCluster().getMasterName();
((MockRepository) internalCluster().getInstance(RepositoriesService.class, clusterManagerName).repository(repositoryName))
.setBlockOnDeleteIndexFile();
}
public static String blockMasterFromFinalizingSnapshotOnSnapFile(final String repositoryName) {
final String masterName = internalCluster().getMasterName();
((MockRepository) internalCluster().getInstance(RepositoriesService.class, masterName).repository(repositoryName))
final String clusterManagerName = internalCluster().getMasterName();
((MockRepository) internalCluster().getInstance(RepositoriesService.class, clusterManagerName).repository(repositoryName))
.setBlockAndFailOnWriteSnapFiles(true);
return masterName;
return clusterManagerName;
}
public static String blockNodeWithIndex(final String repositoryName, final String indexName) {
@ -628,10 +628,10 @@ public abstract class AbstractSnapshotIntegTestCase extends OpenSearchIntegTestC
}
protected void awaitMasterFinishRepoOperations() throws Exception {
logger.info("--> waiting for master to finish all repo operations on its SNAPSHOT pool");
final ThreadPool masterThreadPool = internalCluster().getMasterNodeInstance(ThreadPool.class);
logger.info("--> waiting for cluster-manager to finish all repo operations on its SNAPSHOT pool");
final ThreadPool clusterManagerThreadPool = internalCluster().getMasterNodeInstance(ThreadPool.class);
assertBusy(() -> {
for (ThreadPoolStats.Stats stat : masterThreadPool.stats()) {
for (ThreadPoolStats.Stats stat : clusterManagerThreadPool.stats()) {
if (ThreadPool.Names.SNAPSHOT.equals(stat.getName())) {
assertEquals(stat.getActive(), 0);
break;

View File

@ -143,7 +143,7 @@ public class MockRepository extends FsRepository {
*/
private volatile boolean blockOnWriteIndexFile;
/** Allows blocking on writing the snapshot file at the end of snapshot creation to simulate a died master node */
/** Allows blocking on writing the snapshot file at the end of snapshot creation to simulate a died cluster-manager node */
private volatile boolean blockAndFailOnWriteSnapFile;
private volatile boolean blockOnWriteShardLevelMeta;
@ -191,7 +191,7 @@ public class MockRepository extends FsRepository {
}
private static RepositoryMetadata overrideSettings(RepositoryMetadata metadata, Environment environment) {
// TODO: use another method of testing not being able to read the test file written by the master...
// TODO: use another method of testing not being able to read the test file written by the cluster-manager...
// this is super duper hacky
if (metadata.settings().getAsBoolean("localize_location", false)) {
Path location = PathUtils.get(metadata.settings().get("location"));

View File

@ -248,10 +248,10 @@ public abstract class AbstractBuilderTestCase extends OpenSearchTestCase {
assert serviceHolderWithNoType == null;
// we initialize the serviceHolder and serviceHolderWithNoType just once, but need some
// calls to the randomness source during its setup. In order to not mix these calls with
// the randomness source that is later used in the test method, we use the master seed during
// the randomness source that is later used in the test method, we use the cluster-manager seed during
// this setup
long masterSeed = SeedUtils.parseSeed(RandomizedTest.getContext().getRunnerSeedAsString());
RandomizedTest.getContext().runWithPrivateRandomness(masterSeed, (Callable<Void>) () -> {
long clusterManagerSeed = SeedUtils.parseSeed(RandomizedTest.getContext().getRunnerSeedAsString());
RandomizedTest.getContext().runWithPrivateRandomness(clusterManagerSeed, (Callable<Void>) () -> {
serviceHolder = new ServiceHolder(
nodeSettings,
createTestIndexSettings(),

View File

@ -62,19 +62,19 @@ import static junit.framework.TestCase.fail;
public class ClusterServiceUtils {
public static MasterService createMasterService(ThreadPool threadPool, ClusterState initialClusterState) {
MasterService masterService = new MasterService(
Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test_master_node").build(),
MasterService clusterManagerService = new MasterService(
Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test_cluster_manager_node").build(),
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS),
threadPool
);
AtomicReference<ClusterState> clusterStateRef = new AtomicReference<>(initialClusterState);
masterService.setClusterStatePublisher((event, publishListener, ackListener) -> {
clusterManagerService.setClusterStatePublisher((event, publishListener, ackListener) -> {
clusterStateRef.set(event.state());
publishListener.onResponse(null);
});
masterService.setClusterStateSupplier(clusterStateRef::get);
masterService.start();
return masterService;
clusterManagerService.setClusterStateSupplier(clusterStateRef::get);
clusterManagerService.start();
return clusterManagerService;
}
public static MasterService createMasterService(ThreadPool threadPool, DiscoveryNode localNode) {

View File

@ -90,7 +90,7 @@ public final class ExternalTestCluster extends TestCluster {
private final String clusterName;
private final int numDataNodes;
private final int numMasterAndDataNodes;
private final int numClusterManagerAndDataNodes;
public ExternalTestCluster(
Path tempDir,
@ -141,19 +141,19 @@ public final class ExternalTestCluster extends TestCluster {
.get();
httpAddresses = new InetSocketAddress[nodeInfos.getNodes().size()];
int dataNodes = 0;
int masterAndDataNodes = 0;
int clusterManagerAndDataNodes = 0;
for (int i = 0; i < nodeInfos.getNodes().size(); i++) {
NodeInfo nodeInfo = nodeInfos.getNodes().get(i);
httpAddresses[i] = nodeInfo.getInfo(HttpInfo.class).address().publishAddress().address();
if (DiscoveryNode.isDataNode(nodeInfo.getSettings())) {
dataNodes++;
masterAndDataNodes++;
clusterManagerAndDataNodes++;
} else if (DiscoveryNode.isMasterNode(nodeInfo.getSettings())) {
masterAndDataNodes++;
clusterManagerAndDataNodes++;
}
}
this.numDataNodes = dataNodes;
this.numMasterAndDataNodes = masterAndDataNodes;
this.numClusterManagerAndDataNodes = clusterManagerAndDataNodes;
this.client = client;
this.node = node;
@ -191,7 +191,7 @@ public final class ExternalTestCluster extends TestCluster {
@Override
public int numDataAndMasterNodes() {
return numMasterAndDataNodes;
return numClusterManagerAndDataNodes;
}
@Override

View File

@ -166,7 +166,6 @@ import static org.opensearch.discovery.FileBasedSeedHostsProvider.UNICAST_HOSTS_
import static org.opensearch.test.OpenSearchTestCase.assertBusy;
import static org.opensearch.test.OpenSearchTestCase.randomFrom;
import static org.opensearch.test.NodeRoles.dataOnlyNode;
import static org.opensearch.test.NodeRoles.masterOnlyNode;
import static org.opensearch.test.NodeRoles.noRoles;
import static org.opensearch.test.NodeRoles.onlyRole;
import static org.opensearch.test.NodeRoles.removeRoles;
@ -200,11 +199,11 @@ public final class InternalTestCluster extends TestCluster {
nodeAndClient.node.settings()
);
private static final Predicate<NodeAndClient> NO_DATA_NO_MASTER_PREDICATE = nodeAndClient -> DiscoveryNode.isMasterNode(
private static final Predicate<NodeAndClient> NO_DATA_NO_CLUSTER_MANAGER_PREDICATE = nodeAndClient -> DiscoveryNode.isMasterNode(
nodeAndClient.node.settings()
) == false && DiscoveryNode.isDataNode(nodeAndClient.node.settings()) == false;
private static final Predicate<NodeAndClient> MASTER_NODE_PREDICATE = nodeAndClient -> DiscoveryNode.isMasterNode(
private static final Predicate<NodeAndClient> CLUSTER_MANAGER_NODE_PREDICATE = nodeAndClient -> DiscoveryNode.isMasterNode(
nodeAndClient.node.settings()
);
@ -238,8 +237,8 @@ public final class InternalTestCluster extends TestCluster {
* fully shared cluster to be more reproducible */
private final long[] sharedNodesSeeds;
// if set to 0, data nodes will also assume the master role
private final int numSharedDedicatedMasterNodes;
// if set to 0, data nodes will also assume the cluster-manager role
private final int numSharedDedicatedClusterManagerNodes;
private final int numSharedDataNodes;
@ -249,7 +248,7 @@ public final class InternalTestCluster extends TestCluster {
private final ExecutorService executor;
private final boolean autoManageMasterNodes;
private final boolean autoManageClusterManagerNodes;
private final Collection<Class<? extends Plugin>> mockPlugins;
@ -266,13 +265,13 @@ public final class InternalTestCluster extends TestCluster {
private ServiceDisruptionScheme activeDisruptionScheme;
private final Function<Client, Client> clientWrapper;
private int bootstrapMasterNodeIndex = -1;
private int bootstrapClusterManagerNodeIndex = -1;
public InternalTestCluster(
final long clusterSeed,
final Path baseDir,
final boolean randomlyAddDedicatedMasters,
final boolean autoManageMasterNodes,
final boolean randomlyAddDedicatedClusterManagers,
final boolean autoManageClusterManagerNodes,
final int minNumDataNodes,
final int maxNumDataNodes,
final String clusterName,
@ -285,8 +284,8 @@ public final class InternalTestCluster extends TestCluster {
this(
clusterSeed,
baseDir,
randomlyAddDedicatedMasters,
autoManageMasterNodes,
randomlyAddDedicatedClusterManagers,
autoManageClusterManagerNodes,
minNumDataNodes,
maxNumDataNodes,
clusterName,
@ -302,8 +301,8 @@ public final class InternalTestCluster extends TestCluster {
public InternalTestCluster(
final long clusterSeed,
final Path baseDir,
final boolean randomlyAddDedicatedMasters,
final boolean autoManageMasterNodes,
final boolean randomlyAddDedicatedClusterManagers,
final boolean autoManageClusterManagerNodes,
final int minNumDataNodes,
final int maxNumDataNodes,
final String clusterName,
@ -315,7 +314,7 @@ public final class InternalTestCluster extends TestCluster {
final boolean forbidPrivateIndexSettings
) {
super(clusterSeed);
this.autoManageMasterNodes = autoManageMasterNodes;
this.autoManageClusterManagerNodes = autoManageClusterManagerNodes;
this.clientWrapper = clientWrapper;
this.forbidPrivateIndexSettings = forbidPrivateIndexSettings;
this.baseDir = baseDir;
@ -330,24 +329,24 @@ public final class InternalTestCluster extends TestCluster {
Random random = new Random(clusterSeed);
boolean useDedicatedMasterNodes = randomlyAddDedicatedMasters ? random.nextBoolean() : false;
boolean useDedicatedClusterManagerNodes = randomlyAddDedicatedClusterManagers ? random.nextBoolean() : false;
this.numSharedDataNodes = RandomNumbers.randomIntBetween(random, minNumDataNodes, maxNumDataNodes);
assert this.numSharedDataNodes >= 0;
if (numSharedDataNodes == 0) {
this.numSharedCoordOnlyNodes = 0;
this.numSharedDedicatedMasterNodes = 0;
this.numSharedDedicatedClusterManagerNodes = 0;
} else {
if (useDedicatedMasterNodes) {
if (useDedicatedClusterManagerNodes) {
if (random.nextBoolean()) {
// use a dedicated master, but only low number to reduce overhead to tests
this.numSharedDedicatedMasterNodes = DEFAULT_LOW_NUM_MASTER_NODES;
// use a dedicated cluster-manager, but only low number to reduce overhead to tests
this.numSharedDedicatedClusterManagerNodes = DEFAULT_LOW_NUM_MASTER_NODES;
} else {
this.numSharedDedicatedMasterNodes = DEFAULT_HIGH_NUM_MASTER_NODES;
this.numSharedDedicatedClusterManagerNodes = DEFAULT_HIGH_NUM_MASTER_NODES;
}
} else {
this.numSharedDedicatedMasterNodes = 0;
this.numSharedDedicatedClusterManagerNodes = 0;
}
if (numClientNodes < 0) {
this.numSharedCoordOnlyNodes = RandomNumbers.randomIntBetween(
@ -367,20 +366,20 @@ public final class InternalTestCluster extends TestCluster {
this.mockPlugins = mockPlugins;
sharedNodesSeeds = new long[numSharedDedicatedMasterNodes + numSharedDataNodes + numSharedCoordOnlyNodes];
sharedNodesSeeds = new long[numSharedDedicatedClusterManagerNodes + numSharedDataNodes + numSharedCoordOnlyNodes];
for (int i = 0; i < sharedNodesSeeds.length; i++) {
sharedNodesSeeds[i] = random.nextLong();
}
logger.info(
"Setup InternalTestCluster [{}] with seed [{}] using [{}] dedicated masters, "
+ "[{}] (data) nodes and [{}] coord only nodes (min_master_nodes are [{}])",
"Setup InternalTestCluster [{}] with seed [{}] using [{}] dedicated cluster-managers, "
+ "[{}] (data) nodes and [{}] coord only nodes (min_cluster_manager_nodes are [{}])",
clusterName,
SeedUtils.formatSeed(clusterSeed),
numSharedDedicatedMasterNodes,
numSharedDedicatedClusterManagerNodes,
numSharedDataNodes,
numSharedCoordOnlyNodes,
autoManageMasterNodes ? "auto-managed" : "manual"
autoManageClusterManagerNodes ? "auto-managed" : "manual"
);
this.nodeConfigurationSource = nodeConfigurationSource;
numDataPaths = random.nextInt(5) == 0 ? 2 + random.nextInt(3) : 1;
@ -433,8 +432,8 @@ public final class InternalTestCluster extends TestCluster {
RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_OPERATIONS_SETTING.getKey(),
RandomNumbers.randomIntBetween(random, 1, 4)
);
// TODO: currently we only randomize "cluster.no_master_block" between "write" and "metadata_write", as "all" is fragile
// and fails shards when a master abdicates, which breaks many tests.
// TODO: currently we only randomize "cluster.no_cluster_manager_block" between "write" and "metadata_write", as "all" is fragile
// and fails shards when a cluster-manager abdicates, which breaks many tests.
builder.put(NoMasterBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), randomFrom(random, "write", "metadata_write"));
defaultSettings = builder.build();
executor = OpenSearchExecutors.newScaling(
@ -449,14 +448,15 @@ public final class InternalTestCluster extends TestCluster {
}
/**
* Sets {@link #bootstrapMasterNodeIndex} to the given value, see {@link #bootstrapMasterNodeWithSpecifiedIndex(List)}
* Sets {@link #bootstrapClusterManagerNodeIndex} to the given value, see {@link #bootstrapClusterManagerNodeWithSpecifiedIndex(List)}
* for the description of how this field is used.
* It's only possible to change {@link #bootstrapMasterNodeIndex} value if autoManageMasterNodes is false.
* It's only possible to change {@link #bootstrapClusterManagerNodeIndex} value if autoManageClusterManagerNodes is false.
*/
public void setBootstrapMasterNodeIndex(int bootstrapMasterNodeIndex) {
assert autoManageMasterNodes == false || bootstrapMasterNodeIndex == -1
: "bootstrapMasterNodeIndex should be -1 if autoManageMasterNodes is true, but was " + bootstrapMasterNodeIndex;
this.bootstrapMasterNodeIndex = bootstrapMasterNodeIndex;
public void setBootstrapClusterManagerNodeIndex(int bootstrapClusterManagerNodeIndex) {
assert autoManageClusterManagerNodes == false || bootstrapClusterManagerNodeIndex == -1
: "bootstrapClusterManagerNodeIndex should be -1 if autoManageClusterManagerNodes is true, but was "
+ bootstrapClusterManagerNodeIndex;
this.bootstrapClusterManagerNodeIndex = bootstrapClusterManagerNodeIndex;
}
@Override
@ -648,7 +648,7 @@ public final class InternalTestCluster extends TestCluster {
int size = numDataNodes();
if (size < n) {
logger.info("increasing cluster size from {} to {}", size, n);
if (numSharedDedicatedMasterNodes > 0) {
if (numSharedDedicatedClusterManagerNodes > 0) {
startDataOnlyNodes(n - size);
} else {
startNodes(n - size);
@ -667,7 +667,7 @@ public final class InternalTestCluster extends TestCluster {
if (size <= n) {
return;
}
// prevent killing the master if possible and client nodes
// prevent killing the cluster-manager if possible and client nodes
final Stream<NodeAndClient> collection = n == 0
? nodes.values().stream()
: nodes.values().stream().filter(DATA_NODE_PREDICATE.and(new NodeNamePredicate(getMasterName()).negate()));
@ -687,7 +687,12 @@ public final class InternalTestCluster extends TestCluster {
}
}
private Settings getNodeSettings(final int nodeId, final long seed, final Settings extraSettings, final int defaultMinMasterNodes) {
private Settings getNodeSettings(
final int nodeId,
final long seed,
final Settings extraSettings,
final int defaultMinClusterManagerNodes
) {
final Settings settings = getSettings(nodeId, seed, extraSettings);
final String name = buildNodeName(nodeId, settings);
@ -718,9 +723,9 @@ public final class InternalTestCluster extends TestCluster {
final String discoveryType = DISCOVERY_TYPE_SETTING.get(updatedSettings.build());
final boolean usingSingleNodeDiscovery = discoveryType.equals("single-node");
if (usingSingleNodeDiscovery == false) {
if (autoManageMasterNodes) {
if (autoManageClusterManagerNodes) {
assertThat(
"if master nodes are automatically managed then nodes must complete a join cycle when starting",
"if cluster-manager nodes are automatically managed then nodes must complete a join cycle when starting",
updatedSettings.get(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey()),
nullValue()
);
@ -817,26 +822,26 @@ public final class InternalTestCluster extends TestCluster {
}
/**
* Returns a node client to the current master node.
* Returns a node client to the current cluster-manager node.
* Note: use this with care tests should not rely on a certain nodes client.
*/
public Client masterClient() {
NodeAndClient randomNodeAndClient = getRandomNodeAndClient(new NodeNamePredicate(getMasterName()));
if (randomNodeAndClient != null) {
return randomNodeAndClient.nodeClient(); // ensure node client master is requested
return randomNodeAndClient.nodeClient(); // ensure node client cluster-manager is requested
}
throw new AssertionError("No cluster-manager client found");
}
/**
* Returns a node client to random node but not the master. This method will fail if no non-master client is available.
* Returns a node client to random node but not the cluster-manager. This method will fail if no non-cluster-manager client is available.
*/
public Client nonMasterClient() {
NodeAndClient randomNodeAndClient = getRandomNodeAndClient(new NodeNamePredicate(getMasterName()).negate());
if (randomNodeAndClient != null) {
return randomNodeAndClient.nodeClient(); // ensure node client non-master is requested
return randomNodeAndClient.nodeClient(); // ensure node client non-cluster-manager is requested
}
throw new AssertionError("No non-master client found");
throw new AssertionError("No non-cluster-manager client found");
}
/**
@ -844,14 +849,14 @@ public final class InternalTestCluster extends TestCluster {
*/
public synchronized Client coordOnlyNodeClient() {
ensureOpen();
NodeAndClient randomNodeAndClient = getRandomNodeAndClient(NO_DATA_NO_MASTER_PREDICATE);
NodeAndClient randomNodeAndClient = getRandomNodeAndClient(NO_DATA_NO_CLUSTER_MANAGER_PREDICATE);
if (randomNodeAndClient != null) {
return randomNodeAndClient.client();
}
int nodeId = nextNodeId.getAndIncrement();
Settings settings = getSettings(nodeId, random.nextLong(), Settings.EMPTY);
startCoordinatingOnlyNode(settings);
return getRandomNodeAndClient(NO_DATA_NO_MASTER_PREDICATE).client();
return getRandomNodeAndClient(NO_DATA_NO_CLUSTER_MANAGER_PREDICATE).client();
}
public synchronized String startCoordinatingOnlyNode(Settings settings) {
@ -981,7 +986,7 @@ public final class InternalTestCluster extends TestCluster {
/**
* closes the node and prepares it to be restarted
*/
Settings closeForRestart(RestartCallback callback, int minMasterNodes) throws Exception {
Settings closeForRestart(RestartCallback callback, int minClusterManagerNodes) throws Exception {
assert callback != null;
close();
removeNode(this);
@ -989,7 +994,7 @@ public final class InternalTestCluster extends TestCluster {
assert callbackSettings != null;
Settings.Builder newSettings = Settings.builder();
newSettings.put(callbackSettings);
if (minMasterNodes >= 0) {
if (minClusterManagerNodes >= 0) {
if (INITIAL_CLUSTER_MANAGER_NODES_SETTING.exists(callbackSettings) == false) {
newSettings.putList(INITIAL_CLUSTER_MANAGER_NODES_SETTING.getKey());
}
@ -1128,54 +1133,56 @@ public final class InternalTestCluster extends TestCluster {
final int prevNodeCount = nodes.size();
// start any missing node
assert newSize == numSharedDedicatedMasterNodes + numSharedDataNodes + numSharedCoordOnlyNodes;
final int numberOfMasterNodes = numSharedDedicatedMasterNodes > 0 ? numSharedDedicatedMasterNodes : numSharedDataNodes;
final int defaultMinMasterNodes = (numberOfMasterNodes / 2) + 1;
assert newSize == numSharedDedicatedClusterManagerNodes + numSharedDataNodes + numSharedCoordOnlyNodes;
final int numberOfClusterManagerNodes = numSharedDedicatedClusterManagerNodes > 0
? numSharedDedicatedClusterManagerNodes
: numSharedDataNodes;
final int defaultMinClusterManagerNodes = (numberOfClusterManagerNodes / 2) + 1;
final List<NodeAndClient> toStartAndPublish = new ArrayList<>(); // we want to start nodes in one go
final Runnable onTransportServiceStarted = () -> rebuildUnicastHostFiles(toStartAndPublish);
final List<Settings> settings = new ArrayList<>();
for (int i = 0; i < numSharedDedicatedMasterNodes; i++) {
final Settings nodeSettings = getNodeSettings(i, sharedNodesSeeds[i], Settings.EMPTY, defaultMinMasterNodes);
for (int i = 0; i < numSharedDedicatedClusterManagerNodes; i++) {
final Settings nodeSettings = getNodeSettings(i, sharedNodesSeeds[i], Settings.EMPTY, defaultMinClusterManagerNodes);
settings.add(removeRoles(nodeSettings, Collections.singleton(DiscoveryNodeRole.DATA_ROLE)));
}
for (int i = numSharedDedicatedMasterNodes; i < numSharedDedicatedMasterNodes + numSharedDataNodes; i++) {
final Settings nodeSettings = getNodeSettings(i, sharedNodesSeeds[i], Settings.EMPTY, defaultMinMasterNodes);
if (numSharedDedicatedMasterNodes > 0) {
for (int i = numSharedDedicatedClusterManagerNodes; i < numSharedDedicatedClusterManagerNodes + numSharedDataNodes; i++) {
final Settings nodeSettings = getNodeSettings(i, sharedNodesSeeds[i], Settings.EMPTY, defaultMinClusterManagerNodes);
if (numSharedDedicatedClusterManagerNodes > 0) {
settings.add(removeRoles(nodeSettings, Collections.singleton(DiscoveryNodeRole.MASTER_ROLE)));
} else {
// if we don't have dedicated master nodes, keep things default
// if we don't have dedicated cluster-manager nodes, keep things default
settings.add(nodeSettings);
}
}
for (int i = numSharedDedicatedMasterNodes + numSharedDataNodes; i < numSharedDedicatedMasterNodes + numSharedDataNodes
+ numSharedCoordOnlyNodes; i++) {
for (int i = numSharedDedicatedClusterManagerNodes + numSharedDataNodes; i < numSharedDedicatedClusterManagerNodes
+ numSharedDataNodes + numSharedCoordOnlyNodes; i++) {
final Builder extraSettings = Settings.builder().put(noRoles());
settings.add(getNodeSettings(i, sharedNodesSeeds[i], extraSettings.build(), defaultMinMasterNodes));
settings.add(getNodeSettings(i, sharedNodesSeeds[i], extraSettings.build(), defaultMinClusterManagerNodes));
}
int autoBootstrapMasterNodeIndex = -1;
final List<String> masterNodeNames = settings.stream()
int autoBootstrapClusterManagerNodeIndex = -1;
final List<String> clusterManagerNodeNames = settings.stream()
.filter(DiscoveryNode::isMasterNode)
.map(Node.NODE_NAME_SETTING::get)
.collect(Collectors.toList());
if (prevNodeCount == 0 && autoManageMasterNodes) {
if (numSharedDedicatedMasterNodes > 0) {
autoBootstrapMasterNodeIndex = RandomNumbers.randomIntBetween(random, 0, numSharedDedicatedMasterNodes - 1);
if (prevNodeCount == 0 && autoManageClusterManagerNodes) {
if (numSharedDedicatedClusterManagerNodes > 0) {
autoBootstrapClusterManagerNodeIndex = RandomNumbers.randomIntBetween(random, 0, numSharedDedicatedClusterManagerNodes - 1);
} else if (numSharedDataNodes > 0) {
autoBootstrapMasterNodeIndex = RandomNumbers.randomIntBetween(random, 0, numSharedDataNodes - 1);
autoBootstrapClusterManagerNodeIndex = RandomNumbers.randomIntBetween(random, 0, numSharedDataNodes - 1);
}
}
final List<Settings> updatedSettings = bootstrapMasterNodeWithSpecifiedIndex(settings);
final List<Settings> updatedSettings = bootstrapClusterManagerNodeWithSpecifiedIndex(settings);
for (int i = 0; i < numSharedDedicatedMasterNodes + numSharedDataNodes + numSharedCoordOnlyNodes; i++) {
for (int i = 0; i < numSharedDedicatedClusterManagerNodes + numSharedDataNodes + numSharedCoordOnlyNodes; i++) {
Settings nodeSettings = updatedSettings.get(i);
if (i == autoBootstrapMasterNodeIndex) {
if (i == autoBootstrapClusterManagerNodeIndex) {
nodeSettings = Settings.builder()
.putList(INITIAL_CLUSTER_MANAGER_NODES_SETTING.getKey(), masterNodeNames)
.putList(INITIAL_CLUSTER_MANAGER_NODES_SETTING.getKey(), clusterManagerNodeNames)
.put(nodeSettings)
.build();
}
@ -1187,7 +1194,7 @@ public final class InternalTestCluster extends TestCluster {
nextNodeId.set(newSize);
assert size() == newSize;
if (autoManageMasterNodes && newSize > 0) {
if (autoManageClusterManagerNodes && newSize > 0) {
validateClusterFormed();
}
logger.debug(
@ -1214,11 +1221,11 @@ public final class InternalTestCluster extends TestCluster {
.map(ClusterService::state)
.collect(Collectors.toList());
final String debugString = ", expected nodes: " + expectedNodes + " and actual cluster states " + states;
// all nodes have a master
assertTrue("Missing master" + debugString, states.stream().allMatch(cs -> cs.nodes().getMasterNodeId() != null));
// all nodes have the same master (in same term)
// all nodes have a cluster-manager
assertTrue("Missing cluster-manager" + debugString, states.stream().allMatch(cs -> cs.nodes().getMasterNodeId() != null));
// all nodes have the same cluster-manager (in same term)
assertEquals(
"Not all masters in same term" + debugString,
"Not all cluster-managers in same term" + debugString,
1,
states.stream().mapToLong(ClusterState::term).distinct().count()
);
@ -1555,11 +1562,11 @@ public final class InternalTestCluster extends TestCluster {
}
/**
* Returns an Iterable to all instances for the given class &gt;T&lt; across all data and master nodes
* Returns an Iterable to all instances for the given class &gt;T&lt; across all data and cluster-manager nodes
* in the cluster.
*/
public <T> Iterable<T> getDataOrMasterNodeInstances(Class<T> clazz) {
return getInstances(clazz, DATA_NODE_PREDICATE.or(MASTER_NODE_PREDICATE));
return getInstances(clazz, DATA_NODE_PREDICATE.or(CLUSTER_MANAGER_NODE_PREDICATE));
}
private <T> Iterable<T> getInstances(Class<T> clazz, Predicate<NodeAndClient> predicate) {
@ -1583,7 +1590,7 @@ public final class InternalTestCluster extends TestCluster {
}
public <T> T getMasterNodeInstance(Class<T> clazz) {
return getInstance(clazz, MASTER_NODE_PREDICATE);
return getInstance(clazz, CLUSTER_MANAGER_NODE_PREDICATE);
}
private synchronized <T> T getInstance(Class<T> clazz, Predicate<NodeAndClient> predicate) {
@ -1653,13 +1660,13 @@ public final class InternalTestCluster extends TestCluster {
if (nodePrefix.equals(OpenSearchIntegTestCase.SUITE_CLUSTER_NODE_PREFIX)
&& nodeAndClient.nodeAndClientId() < sharedNodesSeeds.length
&& nodeAndClient.isMasterEligible()
&& autoManageMasterNodes
&& autoManageClusterManagerNodes
&& nodes.values()
.stream()
.filter(NodeAndClient::isMasterEligible)
.filter(n -> n.nodeAndClientId() < sharedNodesSeeds.length)
.count() == 1) {
throw new AssertionError("Tried to stop the only master eligible shared node");
throw new AssertionError("Tried to stop the only cluster-manager eligible shared node");
}
logger.info("Closing filtered random node [{}] ", nodeAndClient.name);
stopNodesAndClient(nodeAndClient);
@ -1667,36 +1674,36 @@ public final class InternalTestCluster extends TestCluster {
}
/**
* Stops the current master node forcefully
* Stops the current cluster-manager node forcefully
*/
public synchronized void stopCurrentMasterNode() throws IOException {
ensureOpen();
assert size() > 0;
String masterNodeName = getMasterName();
final NodeAndClient masterNode = nodes.get(masterNodeName);
assert masterNode != null;
logger.info("Closing master node [{}] ", masterNodeName);
stopNodesAndClient(masterNode);
String clusterManagerNodeName = getMasterName();
final NodeAndClient clusterManagerNode = nodes.get(clusterManagerNodeName);
assert clusterManagerNode != null;
logger.info("Closing cluster-manager node [{}] ", clusterManagerNodeName);
stopNodesAndClient(clusterManagerNode);
}
/**
* Stops any of the current nodes but not the master node.
* Stops any of the current nodes but not the cluster-manager node.
*/
public synchronized void stopRandomNonMasterNode() throws IOException {
NodeAndClient nodeAndClient = getRandomNodeAndClient(new NodeNamePredicate(getMasterName()).negate());
if (nodeAndClient != null) {
logger.info("Closing random non master node [{}] current master [{}] ", nodeAndClient.name, getMasterName());
logger.info("Closing random non cluster-manager node [{}] current cluster-manager [{}] ", nodeAndClient.name, getMasterName());
stopNodesAndClient(nodeAndClient);
}
}
private synchronized void startAndPublishNodesAndClients(List<NodeAndClient> nodeAndClients) {
if (nodeAndClients.size() > 0) {
final int newMasters = (int) nodeAndClients.stream()
final int newClusterManagers = (int) nodeAndClients.stream()
.filter(NodeAndClient::isMasterEligible)
.filter(nac -> nodes.containsKey(nac.name) == false) // filter out old masters
.filter(nac -> nodes.containsKey(nac.name) == false) // filter out old cluster-managers
.count();
final int currentMasters = getMasterNodesCount();
final int currentClusterManagers = getClusterManagerNodesCount();
rebuildUnicastHostFiles(nodeAndClients); // ensure that new nodes can find the existing nodes when they start
List<Future<?>> futures = nodeAndClients.stream().map(node -> executor.submit(node::startNode)).collect(Collectors.toList());
@ -1713,11 +1720,11 @@ public final class InternalTestCluster extends TestCluster {
}
nodeAndClients.forEach(this::publishNode);
if (autoManageMasterNodes
&& currentMasters > 0
&& newMasters > 0
&& getMinMasterNodes(currentMasters + newMasters) > currentMasters) {
// update once masters have joined
if (autoManageClusterManagerNodes
&& currentClusterManagers > 0
&& newClusterManagers > 0
&& getMinClusterManagerNodes(currentClusterManagers + newClusterManagers) > currentClusterManagers) {
// update once cluster-managers have joined
validateClusterFormed();
}
}
@ -1758,7 +1765,7 @@ public final class InternalTestCluster extends TestCluster {
}
private synchronized void stopNodesAndClients(Collection<NodeAndClient> nodeAndClients) throws IOException {
final Set<String> excludedNodeIds = excludeMasters(nodeAndClients);
final Set<String> excludedNodeIds = excludeClusterManagers(nodeAndClients);
for (NodeAndClient nodeAndClient : nodeAndClients) {
removeDisruptionSchemeFromNode(nodeAndClient);
@ -1834,11 +1841,11 @@ public final class InternalTestCluster extends TestCluster {
activeDisruptionScheme.removeFromNode(nodeAndClient.name, this);
}
Set<String> excludedNodeIds = excludeMasters(Collections.singleton(nodeAndClient));
Set<String> excludedNodeIds = excludeClusterManagers(Collections.singleton(nodeAndClient));
final Settings newSettings = nodeAndClient.closeForRestart(
callback,
autoManageMasterNodes ? getMinMasterNodes(getMasterNodesCount()) : -1
autoManageClusterManagerNodes ? getMinClusterManagerNodes(getClusterManagerNodesCount()) : -1
);
removeExclusions(excludedNodeIds);
@ -1862,22 +1869,23 @@ public final class InternalTestCluster extends TestCluster {
return previous;
}
private Set<String> excludeMasters(Collection<NodeAndClient> nodeAndClients) {
private Set<String> excludeClusterManagers(Collection<NodeAndClient> nodeAndClients) {
assert Thread.holdsLock(this);
final Set<String> excludedNodeNames = new HashSet<>();
if (autoManageMasterNodes && nodeAndClients.size() > 0) {
if (autoManageClusterManagerNodes && nodeAndClients.size() > 0) {
final long currentMasters = nodes.values().stream().filter(NodeAndClient::isMasterEligible).count();
final long stoppingMasters = nodeAndClients.stream().filter(NodeAndClient::isMasterEligible).count();
final long currentClusterManagers = nodes.values().stream().filter(NodeAndClient::isMasterEligible).count();
final long stoppingClusterManagers = nodeAndClients.stream().filter(NodeAndClient::isMasterEligible).count();
assert stoppingMasters <= currentMasters : currentMasters + " < " + stoppingMasters;
if (stoppingMasters != currentMasters && stoppingMasters > 0) {
// If stopping few enough master-nodes that there's still a majority left, there is no need to withdraw their votes first.
assert stoppingClusterManagers <= currentClusterManagers : currentClusterManagers + " < " + stoppingClusterManagers;
if (stoppingClusterManagers != currentClusterManagers && stoppingClusterManagers > 0) {
// If stopping few enough cluster-manager-nodes that there's still a majority left, there is no need to withdraw their votes
// first.
// However, we do not yet have a way to be sure there's a majority left, because the voting configuration may not yet have
// been updated when the previous nodes shut down, so we must always explicitly withdraw votes.
// TODO add cluster health API to check that voting configuration is optimal so this isn't always needed
nodeAndClients.stream().filter(NodeAndClient::isMasterEligible).map(NodeAndClient::getName).forEach(excludedNodeNames::add);
assert excludedNodeNames.size() == stoppingMasters;
assert excludedNodeNames.size() == stoppingClusterManagers;
logger.info("adding voting config exclusions {} prior to restart/shutdown", excludedNodeNames);
try {
@ -1912,7 +1920,7 @@ public final class InternalTestCluster extends TestCluster {
public synchronized void fullRestart(RestartCallback callback) throws Exception {
int numNodesRestarted = 0;
final Settings[] newNodeSettings = new Settings[nextNodeId.get()];
final int minMasterNodes = autoManageMasterNodes ? getMinMasterNodes(getMasterNodesCount()) : -1;
final int minClusterManagerNodes = autoManageClusterManagerNodes ? getMinClusterManagerNodes(getClusterManagerNodesCount()) : -1;
final List<NodeAndClient> toStartAndPublish = new ArrayList<>(); // we want to start nodes in one go
for (NodeAndClient nodeAndClient : nodes.values()) {
callback.doAfterNodes(numNodesRestarted++, nodeAndClient.nodeClient());
@ -1920,7 +1928,7 @@ public final class InternalTestCluster extends TestCluster {
if (activeDisruptionScheme != null) {
activeDisruptionScheme.removeFromNode(nodeAndClient.name, this);
}
final Settings newSettings = nodeAndClient.closeForRestart(callback, minMasterNodes);
final Settings newSettings = nodeAndClient.closeForRestart(callback, minClusterManagerNodes);
newNodeSettings[nodeAndClient.nodeAndClientId()] = newSettings;
toStartAndPublish.add(nodeAndClient);
}
@ -1943,14 +1951,14 @@ public final class InternalTestCluster extends TestCluster {
}
/**
* Returns the name of the current master node in the cluster.
* Returns the name of the current cluster-manager node in the cluster.
*/
public String getMasterName() {
return getMasterName(null);
}
/**
* Returns the name of the current master node in the cluster and executes the request via the node specified
* Returns the name of the current cluster-manager node in the cluster and executes the request via the node specified
* in the viaNode parameter. If viaNode isn't specified a random node will be picked to the send the request to.
*/
public String getMasterName(@Nullable String viaNode) {
@ -1959,7 +1967,7 @@ public final class InternalTestCluster extends TestCluster {
return client.admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName();
} catch (Exception e) {
logger.warn("Can't fetch cluster state", e);
throw new RuntimeException("Can't get master node " + e.getMessage(), e);
throw new RuntimeException("Can't get cluster-manager node " + e.getMessage(), e);
}
}
@ -1999,14 +2007,14 @@ public final class InternalTestCluster extends TestCluster {
}
/**
* Performs cluster bootstrap when node with index {@link #bootstrapMasterNodeIndex} is started
* Performs cluster bootstrap when node with index {@link #bootstrapClusterManagerNodeIndex} is started
* with the names of all existing and new cluster-manager-eligible nodes.
* Indexing starts from 0.
* If {@link #bootstrapMasterNodeIndex} is -1 (default), this method does nothing.
* If {@link #bootstrapClusterManagerNodeIndex} is -1 (default), this method does nothing.
*/
private List<Settings> bootstrapMasterNodeWithSpecifiedIndex(List<Settings> allNodesSettings) {
private List<Settings> bootstrapClusterManagerNodeWithSpecifiedIndex(List<Settings> allNodesSettings) {
assert Thread.holdsLock(this);
if (bootstrapMasterNodeIndex == -1) { // fast-path
if (bootstrapClusterManagerNodeIndex == -1) { // fast-path
return allNodesSettings;
}
@ -2018,7 +2026,7 @@ public final class InternalTestCluster extends TestCluster {
newSettings.add(settings);
} else {
currentNodeId++;
if (currentNodeId != bootstrapMasterNodeIndex) {
if (currentNodeId != bootstrapClusterManagerNodeIndex) {
newSettings.add(settings);
} else {
List<String> nodeNames = new ArrayList<>();
@ -2042,7 +2050,7 @@ public final class InternalTestCluster extends TestCluster {
.build()
);
setBootstrapMasterNodeIndex(-1);
setBootstrapClusterManagerNodeIndex(-1);
}
}
}
@ -2089,46 +2097,46 @@ public final class InternalTestCluster extends TestCluster {
* Starts multiple nodes with the given settings and returns their names
*/
public synchronized List<String> startNodes(Settings... extraSettings) {
final int newMasterCount = Math.toIntExact(Stream.of(extraSettings).filter(DiscoveryNode::isMasterNode).count());
final int defaultMinMasterNodes;
if (autoManageMasterNodes) {
defaultMinMasterNodes = getMinMasterNodes(getMasterNodesCount() + newMasterCount);
final int newClusterManagerCount = Math.toIntExact(Stream.of(extraSettings).filter(DiscoveryNode::isMasterNode).count());
final int defaultMinClusterManagerNodes;
if (autoManageClusterManagerNodes) {
defaultMinClusterManagerNodes = getMinClusterManagerNodes(getClusterManagerNodesCount() + newClusterManagerCount);
} else {
defaultMinMasterNodes = -1;
defaultMinClusterManagerNodes = -1;
}
final List<NodeAndClient> nodes = new ArrayList<>();
final int prevMasterCount = getMasterNodesCount();
int autoBootstrapMasterNodeIndex = autoManageMasterNodes
&& prevMasterCount == 0
&& newMasterCount > 0
final int prevClusterManagerCount = getClusterManagerNodesCount();
int autoBootstrapClusterManagerNodeIndex = autoManageClusterManagerNodes
&& prevClusterManagerCount == 0
&& newClusterManagerCount > 0
&& Arrays.stream(extraSettings)
.allMatch(s -> DiscoveryNode.isMasterNode(s) == false || ZEN2_DISCOVERY_TYPE.equals(DISCOVERY_TYPE_SETTING.get(s)))
? RandomNumbers.randomIntBetween(random, 0, newMasterCount - 1)
? RandomNumbers.randomIntBetween(random, 0, newClusterManagerCount - 1)
: -1;
final int numOfNodes = extraSettings.length;
final int firstNodeId = nextNodeId.getAndIncrement();
final List<Settings> settings = new ArrayList<>();
for (int i = 0; i < numOfNodes; i++) {
settings.add(getNodeSettings(firstNodeId + i, random.nextLong(), extraSettings[i], defaultMinMasterNodes));
settings.add(getNodeSettings(firstNodeId + i, random.nextLong(), extraSettings[i], defaultMinClusterManagerNodes));
}
nextNodeId.set(firstNodeId + numOfNodes);
final List<String> initialMasterNodes = settings.stream()
final List<String> initialClusterManagerNodes = settings.stream()
.filter(DiscoveryNode::isMasterNode)
.map(Node.NODE_NAME_SETTING::get)
.collect(Collectors.toList());
final List<Settings> updatedSettings = bootstrapMasterNodeWithSpecifiedIndex(settings);
final List<Settings> updatedSettings = bootstrapClusterManagerNodeWithSpecifiedIndex(settings);
for (int i = 0; i < numOfNodes; i++) {
final Settings nodeSettings = updatedSettings.get(i);
final Builder builder = Settings.builder();
if (DiscoveryNode.isMasterNode(nodeSettings)) {
if (autoBootstrapMasterNodeIndex == 0) {
builder.putList(INITIAL_CLUSTER_MANAGER_NODES_SETTING.getKey(), initialMasterNodes);
if (autoBootstrapClusterManagerNodeIndex == 0) {
builder.putList(INITIAL_CLUSTER_MANAGER_NODES_SETTING.getKey(), initialClusterManagerNodes);
}
autoBootstrapMasterNodeIndex -= 1;
autoBootstrapClusterManagerNodeIndex -= 1;
}
final NodeAndClient nodeAndClient = buildNode(
@ -2140,7 +2148,7 @@ public final class InternalTestCluster extends TestCluster {
nodes.add(nodeAndClient);
}
startAndPublishNodesAndClients(nodes);
if (autoManageMasterNodes) {
if (autoManageClusterManagerNodes) {
validateClusterFormed();
}
return nodes.stream().map(NodeAndClient::getName).collect(Collectors.toList());
@ -2162,21 +2170,21 @@ public final class InternalTestCluster extends TestCluster {
return startNodes(numNodes, Settings.builder().put(onlyRole(settings, DiscoveryNodeRole.DATA_ROLE)).build());
}
/** calculates a min master nodes value based on the given number of master nodes */
private static int getMinMasterNodes(int eligibleMasterNodes) {
return eligibleMasterNodes / 2 + 1;
/** calculates a min cluster-manager nodes value based on the given number of cluster-manager nodes */
private static int getMinClusterManagerNodes(int eligibleClusterManagerNodes) {
return eligibleClusterManagerNodes / 2 + 1;
}
private int getMasterNodesCount() {
private int getClusterManagerNodesCount() {
return (int) nodes.values().stream().filter(n -> DiscoveryNode.isMasterNode(n.node().settings())).count();
}
public String startMasterOnlyNode() {
return startMasterOnlyNode(Settings.EMPTY);
public String startClusterManagerOnlyNode() {
return startClusterManagerOnlyNode(Settings.EMPTY);
}
public String startMasterOnlyNode(Settings settings) {
Settings settings1 = Settings.builder().put(settings).put(masterOnlyNode(settings)).build();
public String startClusterManagerOnlyNode(Settings settings) {
Settings settings1 = Settings.builder().put(settings).put(NodeRoles.clusterManagerOnlyNode(settings)).build();
return startNode(settings1);
}
@ -2208,7 +2216,7 @@ public final class InternalTestCluster extends TestCluster {
@Override
public int numDataAndMasterNodes() {
return filterNodes(nodes, DATA_NODE_PREDICATE.or(MASTER_NODE_PREDICATE)).size();
return filterNodes(nodes, DATA_NODE_PREDICATE.or(CLUSTER_MANAGER_NODE_PREDICATE)).size();
}
public int numMasterNodes() {

View File

@ -176,11 +176,11 @@ public class NodeRoles {
return addRoles(settings, Collections.singleton(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE));
}
public static Settings masterOnlyNode() {
return masterOnlyNode(Settings.EMPTY);
public static Settings clusterManagerOnlyNode() {
return clusterManagerOnlyNode(Settings.EMPTY);
}
public static Settings masterOnlyNode(final Settings settings) {
public static Settings clusterManagerOnlyNode(final Settings settings) {
return onlyRole(settings, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE);
}

View File

@ -707,17 +707,19 @@ public abstract class OpenSearchIntegTestCase extends OpenSearchTestCase {
}
/**
* Creates a disruption that isolates the current master node from all other nodes in the cluster.
* Creates a disruption that isolates the current cluster-manager node from all other nodes in the cluster.
*
* @param disruptionType type of disruption to create
* @return disruption
*/
protected static NetworkDisruption isolateMasterDisruption(NetworkDisruption.NetworkLinkDisruptionType disruptionType) {
final String masterNode = internalCluster().getMasterName();
final String clusterManagerNode = internalCluster().getMasterName();
return new NetworkDisruption(
new NetworkDisruption.TwoPartitions(
Collections.singleton(masterNode),
Arrays.stream(internalCluster().getNodeNames()).filter(name -> name.equals(masterNode) == false).collect(Collectors.toSet())
Collections.singleton(clusterManagerNode),
Arrays.stream(internalCluster().getNodeNames())
.filter(name -> name.equals(clusterManagerNode) == false)
.collect(Collectors.toSet())
),
disruptionType
);
@ -949,12 +951,13 @@ public abstract class OpenSearchIntegTestCase extends OpenSearchTestCase {
.waitForNoRelocatingShards(true)
.waitForNoInitializingShards(waitForNoInitializingShards)
// We currently often use ensureGreen or ensureYellow to check whether the cluster is back in a good state after shutting down
// a node. If the node that is stopped is the master node, another node will become master and publish a cluster state where it
// is master but where the node that was stopped hasn't been removed yet from the cluster state. It will only subsequently
// publish a second state where the old master is removed. If the ensureGreen/ensureYellow is timed just right, it will get to
// execute before the second cluster state update removes the old master and the condition ensureGreen / ensureYellow will
// trivially hold if it held before the node was shut down. The following "waitForNodes" condition ensures that the node has
// been removed by the master so that the health check applies to the set of nodes we expect to be part of the cluster.
// a node. If the node that is stopped is the cluster-manager node, another node will become cluster-manager and publish a
// cluster state where it is cluster-manager but where the node that was stopped hasn't been removed yet from the cluster state.
// It will only subsequently publish a second state where the old cluster-manager is removed.
// If the ensureGreen/ensureYellow is timed just right, it will get to execute before the second cluster state update removes
// the old cluster-manager and the condition ensureGreen / ensureYellow will trivially hold if it held before the node was
// shut down. The following "waitForNodes" condition ensures that the node has been removed by the cluster-manager
// so that the health check applies to the set of nodes we expect to be part of the cluster.
.waitForNodes(Integer.toString(cluster().size()));
ClusterHealthResponse actionGet = client().admin().cluster().health(healthRequest).actionGet();
@ -1085,19 +1088,19 @@ public abstract class OpenSearchIntegTestCase extends OpenSearchTestCase {
}
/**
* Verifies that all nodes that have the same version of the cluster state as master have same cluster state
* Verifies that all nodes that have the same version of the cluster state as cluster-manager have same cluster state
*/
protected void ensureClusterStateConsistency() throws IOException {
if (cluster() != null && cluster().size() > 0) {
final NamedWriteableRegistry namedWriteableRegistry = cluster().getNamedWriteableRegistry();
final Client masterClient = client();
ClusterState masterClusterState = masterClient.admin().cluster().prepareState().all().get().getState();
byte[] masterClusterStateBytes = ClusterState.Builder.toBytes(masterClusterState);
final Client clusterManagerClient = client();
ClusterState clusterManagerClusterState = clusterManagerClient.admin().cluster().prepareState().all().get().getState();
byte[] masterClusterStateBytes = ClusterState.Builder.toBytes(clusterManagerClusterState);
// remove local node reference
masterClusterState = ClusterState.Builder.fromBytes(masterClusterStateBytes, null, namedWriteableRegistry);
Map<String, Object> masterStateMap = convertToMap(masterClusterState);
int masterClusterStateSize = masterClusterState.toString().length();
String masterId = masterClusterState.nodes().getMasterNodeId();
clusterManagerClusterState = ClusterState.Builder.fromBytes(masterClusterStateBytes, null, namedWriteableRegistry);
Map<String, Object> clusterManagerStateMap = convertToMap(clusterManagerClusterState);
int clusterManagerClusterStateSize = clusterManagerClusterState.toString().length();
String clusterManagerId = clusterManagerClusterState.nodes().getMasterNodeId();
for (Client client : cluster().getClients()) {
ClusterState localClusterState = client.admin().cluster().prepareState().all().setLocal(true).get().getState();
byte[] localClusterStateBytes = ClusterState.Builder.toBytes(localClusterState);
@ -1105,27 +1108,32 @@ public abstract class OpenSearchIntegTestCase extends OpenSearchTestCase {
localClusterState = ClusterState.Builder.fromBytes(localClusterStateBytes, null, namedWriteableRegistry);
final Map<String, Object> localStateMap = convertToMap(localClusterState);
final int localClusterStateSize = localClusterState.toString().length();
// Check that the non-master node has the same version of the cluster state as the master and
// that the master node matches the master (otherwise there is no requirement for the cluster state to match)
if (masterClusterState.version() == localClusterState.version()
&& masterId.equals(localClusterState.nodes().getMasterNodeId())) {
// Check that the non-cluster-manager node has the same version of the cluster state as the cluster-manager and
// that the cluster-manager node matches the cluster-manager (otherwise there is no requirement for the cluster state to
// match)
if (clusterManagerClusterState.version() == localClusterState.version()
&& clusterManagerId.equals(localClusterState.nodes().getMasterNodeId())) {
try {
assertEquals("cluster state UUID does not match", masterClusterState.stateUUID(), localClusterState.stateUUID());
assertEquals(
"cluster state UUID does not match",
clusterManagerClusterState.stateUUID(),
localClusterState.stateUUID()
);
// We cannot compare serialization bytes since serialization order of maps is not guaranteed
// We also cannot compare byte array size because CompressedXContent's DeflateCompressor uses
// a synced flush that can affect the size of the compressed byte array
// (see: DeflateCompressedXContentTests#testDifferentCompressedRepresentation for an example)
// instead we compare the string length of cluster state - they should be the same
assertEquals("cluster state size does not match", masterClusterStateSize, localClusterStateSize);
assertEquals("cluster state size does not match", clusterManagerClusterStateSize, localClusterStateSize);
// Compare JSON serialization
assertNull(
"cluster state JSON serialization does not match",
differenceBetweenMapsIgnoringArrayOrder(masterStateMap, localStateMap)
differenceBetweenMapsIgnoringArrayOrder(clusterManagerStateMap, localStateMap)
);
} catch (final AssertionError error) {
logger.error(
"Cluster state from master:\n{}\nLocal cluster state:\n{}",
masterClusterState.toString(),
"Cluster state from cluster-manager:\n{}\nLocal cluster state:\n{}",
clusterManagerClusterState.toString(),
localClusterState.toString()
);
throw error;
@ -1138,8 +1146,8 @@ public abstract class OpenSearchIntegTestCase extends OpenSearchTestCase {
protected void ensureClusterStateCanBeReadByNodeTool() throws IOException {
if (cluster() != null && cluster().size() > 0) {
final Client masterClient = client();
Metadata metadata = masterClient.admin().cluster().prepareState().all().get().getState().metadata();
final Client clusterManagerClient = client();
Metadata metadata = clusterManagerClient.admin().cluster().prepareState().all().get().getState().metadata();
final Map<String, String> serializationParams = new HashMap<>(2);
serializationParams.put("binary", "true");
serializationParams.put(Metadata.CONTEXT_MODE_PARAM, Metadata.CONTEXT_MODE_GATEWAY);
@ -1320,7 +1328,7 @@ public abstract class OpenSearchIntegTestCase extends OpenSearchTestCase {
/**
* Ensures that all nodes in the cluster are connected to each other.
*
* Some network disruptions may leave nodes that are not the master disconnected from each other.
* Some network disruptions may leave nodes that are not the cluster-manager disconnected from each other.
* {@link org.opensearch.cluster.NodeConnectionsService} will eventually reconnect but it's
* handy to be able to ensure this happens faster
*/
@ -1738,9 +1746,9 @@ public abstract class OpenSearchIntegTestCase extends OpenSearchTestCase {
int maxNumDataNodes() default -1;
/**
* Indicates whether the cluster can have dedicated master nodes. If {@code false} means data nodes will serve as master nodes
* and there will be no dedicated master (and data) nodes. Default is {@code false} which means
* dedicated master nodes will be randomly used.
* Indicates whether the cluster can have dedicated cluster-manager nodes. If {@code false} means data nodes will serve as cluster-manager nodes
* and there will be no dedicated cluster-manager (and data) nodes. Default is {@code false} which means
* dedicated cluster-manager nodes will be randomly used.
*/
boolean supportsDedicatedMasters() default true;
@ -1829,12 +1837,12 @@ public abstract class OpenSearchIntegTestCase extends OpenSearchTestCase {
return annotation == null ? Scope.SUITE : annotation.scope();
}
private boolean getSupportsDedicatedMasters() {
private boolean getSupportsDedicatedClusterManagers() {
ClusterScope annotation = getAnnotation(this.getClass(), ClusterScope.class);
return annotation == null ? true : annotation.supportsDedicatedMasters();
}
private boolean getAutoManageMasterNodes() {
private boolean getAutoManageClusterManagerNodes() {
ClusterScope annotation = getAnnotation(this.getClass(), ClusterScope.class);
return annotation == null ? true : annotation.autoManageMasterNodes();
}
@ -1959,7 +1967,7 @@ public abstract class OpenSearchIntegTestCase extends OpenSearchTestCase {
throw new OpenSearchException("Scope not supported: " + scope);
}
boolean supportsDedicatedMasters = getSupportsDedicatedMasters();
boolean supportsDedicatedClusterManagers = getSupportsDedicatedClusterManagers();
int numDataNodes = getNumDataNodes();
int minNumDataNodes;
int maxNumDataNodes;
@ -1983,8 +1991,8 @@ public abstract class OpenSearchIntegTestCase extends OpenSearchTestCase {
return new InternalTestCluster(
seed,
createTempDir(),
supportsDedicatedMasters,
getAutoManageMasterNodes(),
supportsDedicatedClusterManagers,
getAutoManageClusterManagerNodes(),
minNumDataNodes,
maxNumDataNodes,
InternalTestCluster.clusterName(scope.name(), seed) + "-cluster",
@ -2227,7 +2235,7 @@ public abstract class OpenSearchIntegTestCase extends OpenSearchTestCase {
// need to check that there are no more in-flight search contexts before
// we remove indices
if (isInternalCluster()) {
internalCluster().setBootstrapMasterNodeIndex(-1);
internalCluster().setBootstrapClusterManagerNodeIndex(-1);
}
super.ensureAllSearchContextsReleased();
if (runTestScopeLifecycle()) {

View File

@ -97,7 +97,7 @@ public abstract class OpenSearchSingleNodeTestCase extends OpenSearchTestCase {
assert NODE == null;
NODE = RandomizedContext.current().runWithPrivateRandomness(seed, this::newNode);
// we must wait for the node to actually be up and running. otherwise the node might have started,
// elected itself master but might not yet have removed the
// elected itself cluster-manager but might not yet have removed the
// SERVICE_UNAVAILABLE/1/state not recovered / initialized block
ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForGreenStatus().get();
assertFalse(clusterHealthResponse.isTimedOut());

View File

@ -125,7 +125,7 @@ public abstract class TestCluster implements Closeable {
public abstract int numDataNodes();
/**
* Returns the number of data and master eligible nodes in the cluster.
* Returns the number of data and cluster-manager eligible nodes in the cluster.
*/
public abstract int numDataAndMasterNodes();

View File

@ -65,7 +65,7 @@ public class VersionUtils {
Map<Integer, List<Version>> majorVersions = Version.getDeclaredVersions(versionClass)
.stream()
.collect(Collectors.groupingBy(v -> (int) v.major));
// this breaks b/c 5.x is still in version list but master doesn't care about it!
// this breaks b/c 5.x is still in version list but cluster-manager doesn't care about it!
// assert majorVersions.size() == 2;
List<List<Version>> oldVersions = new ArrayList<>(0);
List<List<Version>> previousMajor = new ArrayList<>(0);
@ -85,7 +85,7 @@ public class VersionUtils {
List<Version> unreleasedVersions = new ArrayList<>();
final List<List<Version>> stableVersions;
if (currentMajor.size() == 1) {
// on master branch
// on main branch
stableVersions = previousMajor;
// remove current
moveLastToUnreleased(currentMajor, unreleasedVersions);

View File

@ -62,7 +62,7 @@ public class BlockMasterServiceOnMaster extends SingleNodeDisruption {
if (clusterService == null) {
return;
}
logger.info("blocking master service on node [{}]", disruptionNodeCopy);
logger.info("blocking cluster-manager service on node [{}]", disruptionNodeCopy);
boolean success = disruptionLatch.compareAndSet(null, new CountDownLatch(1));
assert success : "startDisrupting called without waiting on stopDisrupting to complete";
final CountDownLatch started = new CountDownLatch(1);

View File

@ -61,7 +61,7 @@ public class BusyMasterServiceDisruption extends SingleNodeDisruption {
if (clusterService == null) {
return;
}
logger.info("making master service busy on node [{}] at priority [{}]", disruptionNodeCopy, priority);
logger.info("making cluster-manager service busy on node [{}] at priority [{}]", disruptionNodeCopy, priority);
active.set(true);
submitTask(clusterService);
}

View File

@ -109,7 +109,7 @@ public class NetworkDisruption implements ServiceDisruptionScheme {
/**
* Ensures that all nodes in the cluster are connected to each other.
*
* Some network disruptions may leave nodes that are not the master disconnected from each other.
* Some network disruptions may leave nodes that are not the cluster-manager disconnected from each other.
* {@link org.opensearch.cluster.NodeConnectionsService} will eventually reconnect but it's
* handy to be able to ensure this happens faster
*/

View File

@ -62,10 +62,10 @@ public final class ClientYamlDocsTestClient extends ClientYamlTestClient {
final RestClient restClient,
final List<HttpHost> hosts,
final Version esVersion,
final Version masterVersion,
final Version clusterManagerVersion,
final CheckedSupplier<RestClientBuilder, IOException> clientBuilderWithSniffedNodes
) {
super(restSpec, restClient, hosts, esVersion, masterVersion, clientBuilderWithSniffedNodes);
super(restSpec, restClient, hosts, esVersion, clusterManagerVersion, clientBuilderWithSniffedNodes);
}
@Override

View File

@ -78,7 +78,7 @@ public class ClientYamlTestClient implements Closeable {
private final ClientYamlSuiteRestSpec restSpec;
private final Map<NodeSelector, RestClient> restClients = new HashMap<>();
private final Version esVersion;
private final Version masterVersion;
private final Version clusterManagerVersion;
private final CheckedSupplier<RestClientBuilder, IOException> clientBuilderWithSniffedNodes;
ClientYamlTestClient(
@ -86,14 +86,14 @@ public class ClientYamlTestClient implements Closeable {
final RestClient restClient,
final List<HttpHost> hosts,
final Version esVersion,
final Version masterVersion,
final Version clusterManagerVersion,
final CheckedSupplier<RestClientBuilder, IOException> clientBuilderWithSniffedNodes
) {
assert hosts.size() > 0;
this.restSpec = restSpec;
this.restClients.put(NodeSelector.ANY, restClient);
this.esVersion = esVersion;
this.masterVersion = masterVersion;
this.clusterManagerVersion = clusterManagerVersion;
this.clientBuilderWithSniffedNodes = clientBuilderWithSniffedNodes;
}
@ -101,8 +101,8 @@ public class ClientYamlTestClient implements Closeable {
return esVersion;
}
public Version getMasterVersion() {
return masterVersion;
public Version getClusterManagerVersion() {
return clusterManagerVersion;
}
/**

View File

@ -228,7 +228,7 @@ public class ClientYamlTestExecutionContext {
}
public Version masterVersion() {
return clientYamlTestClient.getMasterVersion();
return clientYamlTestClient.getClusterManagerVersion();
}
}

View File

@ -144,14 +144,14 @@ public abstract class OpenSearchClientYamlSuiteTestCase extends OpenSearchRestTe
final List<HttpHost> hosts = getClusterHosts();
Tuple<Version, Version> versionVersionTuple = readVersionsFromCatNodes(adminClient());
final Version minVersion = versionVersionTuple.v1();
final Version masterVersion = versionVersionTuple.v2();
final Version clusterManagerVersion = versionVersionTuple.v2();
logger.info(
"initializing client, minimum OpenSearch version [{}], master version, [{}], hosts {}",
"initializing client, minimum OpenSearch version [{}], cluster-manager version, [{}], hosts {}",
minVersion,
masterVersion,
clusterManagerVersion,
hosts
);
clientYamlTestClient = initClientYamlTestClient(restSpec, client(), hosts, minVersion, masterVersion);
clientYamlTestClient = initClientYamlTestClient(restSpec, client(), hosts, minVersion, clusterManagerVersion);
restTestExecutionContext = new ClientYamlTestExecutionContext(clientYamlTestClient, randomizeContentType());
adminExecutionContext = new ClientYamlTestExecutionContext(clientYamlTestClient, false);
final String[] denylist = resolvePathsProperty(REST_TESTS_DENYLIST, null);
@ -179,9 +179,16 @@ public abstract class OpenSearchClientYamlSuiteTestCase extends OpenSearchRestTe
final RestClient restClient,
final List<HttpHost> hosts,
final Version esVersion,
final Version masterVersion
final Version clusterManagerVersion
) {
return new ClientYamlTestClient(restSpec, restClient, hosts, esVersion, masterVersion, this::getClientBuilderWithSniffedHosts);
return new ClientYamlTestClient(
restSpec,
restClient,
hosts,
esVersion,
clusterManagerVersion,
this::getClientBuilderWithSniffedHosts
);
}
@AfterClass
@ -330,28 +337,28 @@ public abstract class OpenSearchClientYamlSuiteTestCase extends OpenSearchRestTe
* Detect minimal node version and master node version of cluster using REST Client.
*
* @param restClient REST client used to discover cluster nodes
* @return {@link Tuple} of [minimal node version, master node version]
* @return {@link Tuple} of [minimal node version, cluster-manager node version]
* @throws IOException When _cat API output parsing fails
*/
private Tuple<Version, Version> readVersionsFromCatNodes(RestClient restClient) throws IOException {
// we simply go to the _cat/nodes API and parse all versions in the cluster
final Request request = new Request("GET", "/_cat/nodes");
request.addParameter("h", "version,master");
request.setOptions(getCatNodesVersionMasterRequestOptions());
request.setOptions(getCatNodesVersionClusterManagerRequestOptions());
Response response = restClient.performRequest(request);
ClientYamlTestResponse restTestResponse = new ClientYamlTestResponse(response);
String nodesCatResponse = restTestResponse.getBodyAsString();
String[] split = nodesCatResponse.split("\n");
Version version = null;
Version masterVersion = null;
Version clusterManagerVersion = null;
for (String perNode : split) {
final String[] versionAndMaster = perNode.split("\\s+");
assert versionAndMaster.length == 2 : "invalid line: " + perNode + " length: " + versionAndMaster.length;
final Version currentVersion = Version.fromString(versionAndMaster[0]);
final boolean master = versionAndMaster[1].trim().equals("*");
if (master) {
assert masterVersion == null;
masterVersion = currentVersion;
final String[] versionAndClusterManager = perNode.split("\\s+");
assert versionAndClusterManager.length == 2 : "invalid line: " + perNode + " length: " + versionAndClusterManager.length;
final Version currentVersion = Version.fromString(versionAndClusterManager[0]);
final boolean clusterManager = versionAndClusterManager[1].trim().equals("*");
if (clusterManager) {
assert clusterManagerVersion == null;
clusterManagerVersion = currentVersion;
}
if (version == null) {
version = currentVersion;
@ -359,10 +366,10 @@ public abstract class OpenSearchClientYamlSuiteTestCase extends OpenSearchRestTe
version = currentVersion;
}
}
return new Tuple<>(version, masterVersion);
return new Tuple<>(version, clusterManagerVersion);
}
protected RequestOptions getCatNodesVersionMasterRequestOptions() {
protected RequestOptions getCatNodesVersionClusterManagerRequestOptions() {
return RequestOptions.DEFAULT;
}

View File

@ -353,7 +353,7 @@ public class DoSection implements ExecutableSection {
/**
* Check that the response contains only the warning headers that we expect.
*/
void checkWarningHeaders(final List<String> warningHeaders, final Version masterVersion) {
void checkWarningHeaders(final List<String> warningHeaders, final Version clusterManagerVersion) {
final List<String> unexpected = new ArrayList<>();
final List<String> unmatched = new ArrayList<>();
final List<String> missing = new ArrayList<>();
@ -367,7 +367,7 @@ public class DoSection implements ExecutableSection {
final boolean matches = matcher.matches();
if (matches) {
final String message = HeaderWarning.extractWarningValueFromWarningHeader(header, true);
if (masterVersion.before(LegacyESVersion.V_7_0_0)
if (clusterManagerVersion.before(LegacyESVersion.V_7_0_0)
&& message.equals(
"the default number of shards will change from [5] to [1] in 7.0.0; "
+ "if you wish to continue using the default of [5] shards, "
@ -375,7 +375,7 @@ public class DoSection implements ExecutableSection {
)) {
/*
* This warning header will come back in the vast majority of our tests that create an index when running against an
* older master. Rather than rewrite our tests to assert this warning header, we assume that it is expected.
* older cluster-manager. Rather than rewrite our tests to assert this warning header, we assume that it is expected.
*/
continue;
}

View File

@ -63,7 +63,7 @@ import static org.mockito.Mockito.when;
public class FakeThreadPoolMasterServiceTests extends OpenSearchTestCase {
public void testFakeMasterService() {
public void testFakeClusterManagerService() {
List<Runnable> runnableTasks = new ArrayList<>();
AtomicReference<ClusterState> lastClusterStateRef = new AtomicReference<>();
DiscoveryNode discoveryNode = new DiscoveryNode(
@ -84,21 +84,21 @@ public class FakeThreadPoolMasterServiceTests extends OpenSearchTestCase {
doAnswer(invocationOnMock -> runnableTasks.add((Runnable) invocationOnMock.getArguments()[0])).when(executorService).execute(any());
when(mockThreadPool.generic()).thenReturn(executorService);
FakeThreadPoolMasterService masterService = new FakeThreadPoolMasterService(
FakeThreadPoolMasterService clusterManagerService = new FakeThreadPoolMasterService(
"test_node",
"test",
mockThreadPool,
runnableTasks::add
);
masterService.setClusterStateSupplier(lastClusterStateRef::get);
masterService.setClusterStatePublisher((event, publishListener, ackListener) -> {
clusterManagerService.setClusterStateSupplier(lastClusterStateRef::get);
clusterManagerService.setClusterStatePublisher((event, publishListener, ackListener) -> {
lastClusterStateRef.set(event.state());
publishingCallback.set(publishListener);
});
masterService.start();
clusterManagerService.start();
AtomicBoolean firstTaskCompleted = new AtomicBoolean();
masterService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() {
clusterManagerService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
return ClusterState.builder(currentState)
@ -124,7 +124,7 @@ public class FakeThreadPoolMasterServiceTests extends OpenSearchTestCase {
assertFalse(firstTaskCompleted.get());
final Runnable scheduleTask = runnableTasks.remove(0);
assertThat(scheduleTask, hasToString("master service scheduling next task"));
assertThat(scheduleTask, hasToString("cluster-manager service scheduling next task"));
scheduleTask.run();
final Runnable publishTask = runnableTasks.remove(0);
@ -138,7 +138,7 @@ public class FakeThreadPoolMasterServiceTests extends OpenSearchTestCase {
assertThat(runnableTasks.size(), equalTo(0));
AtomicBoolean secondTaskCompleted = new AtomicBoolean();
masterService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() {
clusterManagerService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
return ClusterState.builder(currentState)

View File

@ -76,11 +76,11 @@ public class NetworkDisruptionIT extends OpenSearchIntegTestCase {
/**
* Creates 3 to 5 mixed-node cluster and splits it into 2 parts.
* The first part is guaranteed to have at least the majority of the nodes,
* so that master could be elected on this side.
* so that cluster-manager could be elected on this side.
*/
private Tuple<Set<String>, Set<String>> prepareDisruptedCluster() {
int numOfNodes = randomIntBetween(3, 5);
internalCluster().setBootstrapMasterNodeIndex(numOfNodes - 1);
internalCluster().setBootstrapClusterManagerNodeIndex(numOfNodes - 1);
Set<String> nodes = new HashSet<>(internalCluster().startNodes(numOfNodes, DISRUPTION_TUNED_SETTINGS));
ensureGreen();
assertThat(nodes.size(), greaterThanOrEqualTo(3));
@ -127,7 +127,7 @@ public class NetworkDisruptionIT extends OpenSearchIntegTestCase {
}
public void testTransportRespondsEventually() throws InterruptedException {
internalCluster().setBootstrapMasterNodeIndex(0);
internalCluster().setBootstrapClusterManagerNodeIndex(0);
internalCluster().ensureAtLeastNumDataNodes(randomIntBetween(3, 5));
final NetworkDisruption.DisruptedLinks disruptedLinks;
if (randomBoolean()) {

View File

@ -79,7 +79,7 @@ public class InternalTestClusterIT extends OpenSearchIntegTestCase {
}
public void testOperationsDuringRestart() throws Exception {
internalCluster().startMasterOnlyNode();
internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNodes(2);
internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() {
@Override

View File

@ -86,7 +86,7 @@ public class InternalTestClusterTests extends OpenSearchTestCase {
public void testInitializiationIsConsistent() {
long clusterSeed = randomLong();
boolean masterNodes = randomBoolean();
boolean clusterManagerNodes = randomBoolean();
int minNumDataNodes = randomIntBetween(0, 9);
int maxNumDataNodes = randomIntBetween(minNumDataNodes, 10);
String clusterName = randomRealisticUnicodeOfCodepointLengthBetween(1, 10);
@ -98,7 +98,7 @@ public class InternalTestClusterTests extends OpenSearchTestCase {
InternalTestCluster cluster0 = new InternalTestCluster(
clusterSeed,
baseDir,
masterNodes,
clusterManagerNodes,
randomBoolean(),
minNumDataNodes,
maxNumDataNodes,
@ -112,7 +112,7 @@ public class InternalTestClusterTests extends OpenSearchTestCase {
InternalTestCluster cluster1 = new InternalTestCluster(
clusterSeed,
baseDir,
masterNodes,
clusterManagerNodes,
randomBoolean(),
minNumDataNodes,
maxNumDataNodes,
@ -165,23 +165,23 @@ public class InternalTestClusterTests extends OpenSearchTestCase {
}
public void testBeforeTest() throws Exception {
final boolean autoManageMinMasterNodes = randomBoolean();
final boolean autoManageMinClusterManagerNodes = randomBoolean();
long clusterSeed = randomLong();
final boolean masterNodes;
final boolean clusterManagerNodes;
final int minNumDataNodes;
final int maxNumDataNodes;
final int bootstrapMasterNodeIndex;
if (autoManageMinMasterNodes) {
masterNodes = randomBoolean();
final int bootstrapClusterManagerNodeIndex;
if (autoManageMinClusterManagerNodes) {
clusterManagerNodes = randomBoolean();
minNumDataNodes = randomIntBetween(0, 3);
maxNumDataNodes = randomIntBetween(minNumDataNodes, 4);
bootstrapMasterNodeIndex = -1;
bootstrapClusterManagerNodeIndex = -1;
} else {
// if we manage min master nodes, we need to lock down the number of nodes
// if we manage min cluster-manager nodes, we need to lock down the number of nodes
minNumDataNodes = randomIntBetween(0, 4);
maxNumDataNodes = minNumDataNodes;
masterNodes = false;
bootstrapMasterNodeIndex = maxNumDataNodes == 0 ? -1 : randomIntBetween(0, maxNumDataNodes - 1);
clusterManagerNodes = false;
bootstrapClusterManagerNodeIndex = maxNumDataNodes == 0 ? -1 : randomIntBetween(0, maxNumDataNodes - 1);
}
final int numClientNodes = randomIntBetween(0, 2);
NodeConfigurationSource nodeConfigurationSource = new NodeConfigurationSource() {
@ -191,9 +191,9 @@ public class InternalTestClusterTests extends OpenSearchTestCase {
.put(DiscoveryModule.DISCOVERY_SEED_PROVIDERS_SETTING.getKey(), "file")
.putList(SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING.getKey())
.put(NetworkModule.TRANSPORT_TYPE_KEY, getTestTransportType());
if (autoManageMinMasterNodes == false) {
if (autoManageMinClusterManagerNodes == false) {
assert minNumDataNodes == maxNumDataNodes;
assert masterNodes == false;
assert clusterManagerNodes == false;
}
return settings.build();
}
@ -209,8 +209,8 @@ public class InternalTestClusterTests extends OpenSearchTestCase {
InternalTestCluster cluster0 = new InternalTestCluster(
clusterSeed,
createTempDir(),
masterNodes,
autoManageMinMasterNodes,
clusterManagerNodes,
autoManageMinClusterManagerNodes,
minNumDataNodes,
maxNumDataNodes,
"clustername",
@ -220,13 +220,13 @@ public class InternalTestClusterTests extends OpenSearchTestCase {
mockPlugins(),
Function.identity()
);
cluster0.setBootstrapMasterNodeIndex(bootstrapMasterNodeIndex);
cluster0.setBootstrapClusterManagerNodeIndex(bootstrapClusterManagerNodeIndex);
InternalTestCluster cluster1 = new InternalTestCluster(
clusterSeed,
createTempDir(),
masterNodes,
autoManageMinMasterNodes,
clusterManagerNodes,
autoManageMinClusterManagerNodes,
minNumDataNodes,
maxNumDataNodes,
"clustername",
@ -236,7 +236,7 @@ public class InternalTestClusterTests extends OpenSearchTestCase {
mockPlugins(),
Function.identity()
);
cluster1.setBootstrapMasterNodeIndex(bootstrapMasterNodeIndex);
cluster1.setBootstrapClusterManagerNodeIndex(bootstrapClusterManagerNodeIndex);
assertClusters(cluster0, cluster1, false);
long seed = randomLong();
@ -265,7 +265,7 @@ public class InternalTestClusterTests extends OpenSearchTestCase {
public void testDataFolderAssignmentAndCleaning() throws IOException, InterruptedException {
long clusterSeed = randomLong();
boolean masterNodes = randomBoolean();
boolean clusterManagerNodes = randomBoolean();
// we need one stable node
final int minNumDataNodes = 2;
final int maxNumDataNodes = 2;
@ -291,7 +291,7 @@ public class InternalTestClusterTests extends OpenSearchTestCase {
InternalTestCluster cluster = new InternalTestCluster(
clusterSeed,
baseDir,
masterNodes,
clusterManagerNodes,
true,
minNumDataNodes,
maxNumDataNodes,
@ -304,13 +304,13 @@ public class InternalTestClusterTests extends OpenSearchTestCase {
);
try {
cluster.beforeTest(random());
final int originalMasterCount = cluster.numMasterNodes();
final int originalClusterManagerCount = cluster.numMasterNodes();
final Map<String, Path[]> shardNodePaths = new HashMap<>();
for (String name : cluster.getNodeNames()) {
shardNodePaths.put(name, getNodePaths(cluster, name));
}
String poorNode = randomValueOtherThanMany(
n -> originalMasterCount == 1 && n.equals(cluster.getMasterName()),
n -> originalClusterManagerCount == 1 && n.equals(cluster.getMasterName()),
() -> randomFrom(cluster.getNodeNames())
);
Path dataPath = getNodePaths(cluster, poorNode)[0];
@ -409,7 +409,7 @@ public class InternalTestClusterTests extends OpenSearchTestCase {
roles.add(role);
}
cluster.setBootstrapMasterNodeIndex(
cluster.setBootstrapClusterManagerNodeIndex(
randomIntBetween(0, (int) roles.stream().filter(role -> role.equals(clusterManagerRole)).count() - 1)
);
@ -419,7 +419,7 @@ public class InternalTestClusterTests extends OpenSearchTestCase {
final DiscoveryNodeRole role = roles.get(i);
final String node;
if (role == clusterManagerRole) {
node = cluster.startMasterOnlyNode();
node = cluster.startClusterManagerOnlyNode();
} else if (role == DiscoveryNodeRole.DATA_ROLE) {
node = cluster.startDataOnlyNode();
} else if (role == DiscoveryNodeRole.INGEST_ROLE) {