Replace internal usages of 'master' term in 'test' directory (#3283)

* Replace internal usages of 'master' terminology in server/src/internalClusterTest directory

Signed-off-by: Tianli Feng <ftianli@amazon.com>

* Replace 'master' terminology with 'cluster manager' in test directory

Signed-off-by: Tianli Feng <ftianli@amazon.com>

* Replace 'master' terminology with 'cluster manager' in server/src/internalClusterTest directory

Signed-off-by: Tianli Feng <ftianli@amazon.com>

* Adjust format by spotlessApply task

Signed-off-by: Tianli Feng <ftianli@amazon.com>

* Replace 'master' terminology with 'cluster manager' in test directory

Signed-off-by: Tianli Feng <ftianli@amazon.com>

* Adjust format by spotlessApply task

Signed-off-by: Tianli Feng <ftianli@amazon.com>
This commit is contained in:
Tianli Feng 2022-05-11 16:34:57 -07:00 committed by GitHub
parent 677915dc00
commit 10bff0c9f5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
86 changed files with 743 additions and 692 deletions

View File

@ -86,7 +86,7 @@ public class IngestRestartIT extends OpenSearchIntegTestCase {
public void testFailureInConditionalProcessor() { public void testFailureInConditionalProcessor() {
internalCluster().ensureAtLeastNumDataNodes(1); internalCluster().ensureAtLeastNumDataNodes(1);
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
final String pipelineId = "foo"; final String pipelineId = "foo";
client().admin() client().admin()
.cluster() .cluster()

View File

@ -69,7 +69,7 @@ public class Zen2RestApiIT extends OpenSearchNetty4IntegTestCase {
} }
public void testRollingRestartOfTwoNodeCluster() throws Exception { public void testRollingRestartOfTwoNodeCluster() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(1); internalCluster().setBootstrapClusterManagerNodeIndex(1);
final List<String> nodes = internalCluster().startNodes(2); final List<String> nodes = internalCluster().startNodes(2);
createIndex( createIndex(
"test", "test",
@ -135,7 +135,7 @@ public class Zen2RestApiIT extends OpenSearchNetty4IntegTestCase {
} }
public void testClearVotingTombstonesNotWaitingForRemoval() throws Exception { public void testClearVotingTombstonesNotWaitingForRemoval() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(2); internalCluster().setBootstrapClusterManagerNodeIndex(2);
List<String> nodes = internalCluster().startNodes(3); List<String> nodes = internalCluster().startNodes(3);
ensureStableCluster(3); ensureStableCluster(3);
RestClient restClient = getRestClient(); RestClient restClient = getRestClient();
@ -150,7 +150,7 @@ public class Zen2RestApiIT extends OpenSearchNetty4IntegTestCase {
} }
public void testClearVotingTombstonesWaitingForRemoval() throws Exception { public void testClearVotingTombstonesWaitingForRemoval() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(2); internalCluster().setBootstrapClusterManagerNodeIndex(2);
List<String> nodes = internalCluster().startNodes(3); List<String> nodes = internalCluster().startNodes(3);
ensureStableCluster(3); ensureStableCluster(3);
RestClient restClient = getRestClient(); RestClient restClient = getRestClient();
@ -165,7 +165,7 @@ public class Zen2RestApiIT extends OpenSearchNetty4IntegTestCase {
} }
public void testFailsOnUnknownNode() throws Exception { public void testFailsOnUnknownNode() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(2); internalCluster().setBootstrapClusterManagerNodeIndex(2);
internalCluster().startNodes(3); internalCluster().startNodes(3);
ensureStableCluster(3); ensureStableCluster(3);
RestClient restClient = getRestClient(); RestClient restClient = getRestClient();
@ -182,7 +182,7 @@ public class Zen2RestApiIT extends OpenSearchNetty4IntegTestCase {
} }
public void testRemoveTwoNodesAtOnce() throws Exception { public void testRemoveTwoNodesAtOnce() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(2); internalCluster().setBootstrapClusterManagerNodeIndex(2);
List<String> nodes = internalCluster().startNodes(3); List<String> nodes = internalCluster().startNodes(3);
ensureStableCluster(3); ensureStableCluster(3);
RestClient restClient = getRestClient(); RestClient restClient = getRestClient();

View File

@ -86,7 +86,7 @@ public class GceDiscoverTests extends OpenSearchIntegTestCase {
public void testJoin() { public void testJoin() {
// start master node // start master node
final String masterNode = internalCluster().startMasterOnlyNode(); final String masterNode = internalCluster().startClusterManagerOnlyNode();
registerGceNode(masterNode); registerGceNode(masterNode);
ClusterStateResponse clusterStateResponse = client(masterNode).admin() ClusterStateResponse clusterStateResponse = client(masterNode).admin()

View File

@ -91,7 +91,7 @@ public class DanglingIndicesRestIT extends HttpSmokeTestCase {
* Check that when dangling indices are discovered, then they can be listed via the REST API. * Check that when dangling indices are discovered, then they can be listed via the REST API.
*/ */
public void testDanglingIndicesCanBeListed() throws Exception { public void testDanglingIndicesCanBeListed() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(1); internalCluster().setBootstrapClusterManagerNodeIndex(1);
internalCluster().startNodes(3, buildSettings(0)); internalCluster().startNodes(3, buildSettings(0));
final DanglingIndexDetails danglingIndexDetails = createDanglingIndices(INDEX_NAME); final DanglingIndexDetails danglingIndexDetails = createDanglingIndices(INDEX_NAME);
@ -121,7 +121,7 @@ public class DanglingIndicesRestIT extends HttpSmokeTestCase {
* Check that dangling indices can be imported. * Check that dangling indices can be imported.
*/ */
public void testDanglingIndicesCanBeImported() throws Exception { public void testDanglingIndicesCanBeImported() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(1); internalCluster().setBootstrapClusterManagerNodeIndex(1);
internalCluster().startNodes(3, buildSettings(0)); internalCluster().startNodes(3, buildSettings(0));
createDanglingIndices(INDEX_NAME); createDanglingIndices(INDEX_NAME);
@ -157,7 +157,7 @@ public class DanglingIndicesRestIT extends HttpSmokeTestCase {
* deleted through the API * deleted through the API
*/ */
public void testDanglingIndicesCanBeDeleted() throws Exception { public void testDanglingIndicesCanBeDeleted() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(1); internalCluster().setBootstrapClusterManagerNodeIndex(1);
internalCluster().startNodes(3, buildSettings(1)); internalCluster().startNodes(3, buildSettings(1));
createDanglingIndices(INDEX_NAME, OTHER_INDEX_NAME); createDanglingIndices(INDEX_NAME, OTHER_INDEX_NAME);

View File

@ -47,7 +47,7 @@ public class ClientTimeoutIT extends OpenSearchIntegTestCase {
} }
public void testNodesInfoTimeout() { public void testNodesInfoTimeout() {
String masterNode = internalCluster().startMasterOnlyNode(); String clusterManagerNode = internalCluster().startClusterManagerOnlyNode();
String dataNode = internalCluster().startDataOnlyNode(); String dataNode = internalCluster().startDataOnlyNode();
String anotherDataNode = internalCluster().startDataOnlyNode(); String anotherDataNode = internalCluster().startDataOnlyNode();
@ -65,11 +65,11 @@ public class ClientTimeoutIT extends OpenSearchIntegTestCase {
nodes.add(node.getNode().getName()); nodes.add(node.getNode().getName());
} }
assertThat(response.getNodes().size(), equalTo(2)); assertThat(response.getNodes().size(), equalTo(2));
assertThat(nodes.contains(masterNode), is(true)); assertThat(nodes.contains(clusterManagerNode), is(true));
} }
public void testNodesStatsTimeout() { public void testNodesStatsTimeout() {
String masterNode = internalCluster().startMasterOnlyNode(); String clusterManagerNode = internalCluster().startClusterManagerOnlyNode();
String dataNode = internalCluster().startDataOnlyNode(); String dataNode = internalCluster().startDataOnlyNode();
String anotherDataNode = internalCluster().startDataOnlyNode(); String anotherDataNode = internalCluster().startDataOnlyNode();
TimeValue timeout = TimeValue.timeValueMillis(1000); TimeValue timeout = TimeValue.timeValueMillis(1000);
@ -87,11 +87,11 @@ public class ClientTimeoutIT extends OpenSearchIntegTestCase {
nodes.add(node.getNode().getName()); nodes.add(node.getNode().getName());
} }
assertThat(response.getNodes().size(), equalTo(2)); assertThat(response.getNodes().size(), equalTo(2));
assertThat(nodes.contains(masterNode), is(true)); assertThat(nodes.contains(clusterManagerNode), is(true));
} }
public void testListTasksTimeout() { public void testListTasksTimeout() {
String masterNode = internalCluster().startMasterOnlyNode(); String clusterManagerNode = internalCluster().startClusterManagerOnlyNode();
String dataNode = internalCluster().startDataOnlyNode(); String dataNode = internalCluster().startDataOnlyNode();
String anotherDataNode = internalCluster().startDataOnlyNode(); String anotherDataNode = internalCluster().startDataOnlyNode();
TimeValue timeout = TimeValue.timeValueMillis(1000); TimeValue timeout = TimeValue.timeValueMillis(1000);
@ -108,7 +108,7 @@ public class ClientTimeoutIT extends OpenSearchIntegTestCase {
} }
public void testRecoveriesWithTimeout() { public void testRecoveriesWithTimeout() {
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
String dataNode = internalCluster().startDataOnlyNode(); String dataNode = internalCluster().startDataOnlyNode();
String anotherDataNode = internalCluster().startDataOnlyNode(); String anotherDataNode = internalCluster().startDataOnlyNode();
@ -148,7 +148,7 @@ public class ClientTimeoutIT extends OpenSearchIntegTestCase {
} }
public void testStatsWithTimeout() { public void testStatsWithTimeout() {
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
String dataNode = internalCluster().startDataOnlyNode(); String dataNode = internalCluster().startDataOnlyNode();
String anotherDataNode = internalCluster().startDataOnlyNode(); String anotherDataNode = internalCluster().startDataOnlyNode();

View File

@ -1110,7 +1110,7 @@ public final class ClusterAllocationExplainIT extends OpenSearchIntegTestCase {
public void testCannotAllocateStaleReplicaExplanation() throws Exception { public void testCannotAllocateStaleReplicaExplanation() throws Exception {
logger.info("--> starting 3 nodes"); logger.info("--> starting 3 nodes");
final String masterNode = internalCluster().startNode(); final String clusterManagerNode = internalCluster().startNode();
// start replica node first, so it's path will be used first when we start a node after // start replica node first, so it's path will be used first when we start a node after
// stopping all of them at end of test. // stopping all of them at end of test.
final String replicaNode = internalCluster().startNode(); final String replicaNode = internalCluster().startNode();
@ -1123,7 +1123,7 @@ public final class ClusterAllocationExplainIT extends OpenSearchIntegTestCase {
1, 1,
Settings.builder() Settings.builder()
.put("index.routing.allocation.include._name", primaryNode) .put("index.routing.allocation.include._name", primaryNode)
.put("index.routing.allocation.exclude._name", masterNode) .put("index.routing.allocation.exclude._name", clusterManagerNode)
.build(), .build(),
ActiveShardCount.ONE ActiveShardCount.ONE
); );
@ -1165,7 +1165,7 @@ public final class ClusterAllocationExplainIT extends OpenSearchIntegTestCase {
logger.info("--> restart the node with the stale replica"); logger.info("--> restart the node with the stale replica");
String restartedNode = internalCluster().startDataOnlyNode(replicaDataPathSettings); String restartedNode = internalCluster().startDataOnlyNode(replicaDataPathSettings);
ensureClusterSizeConsistency(); // wait for the master to finish processing join. ensureClusterSizeConsistency(); // wait for the cluster-manager to finish processing join.
// wait until the system has fetched shard data and we know there is no valid shard copy // wait until the system has fetched shard data and we know there is no valid shard copy
assertBusy(() -> { assertBusy(() -> {

View File

@ -117,7 +117,7 @@ import static org.hamcrest.Matchers.startsWith;
/** /**
* Integration tests for task management API * Integration tests for task management API
* <p> * <p>
* We need at least 2 nodes so we have a master node a non-master node * We need at least 2 nodes so we have a cluster-manager node a non-cluster-manager node
*/ */
@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, minNumDataNodes = 2) @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, minNumDataNodes = 2)
public class TasksIT extends OpenSearchIntegTestCase { public class TasksIT extends OpenSearchIntegTestCase {
@ -154,17 +154,17 @@ public class TasksIT extends OpenSearchIntegTestCase {
assertThat(response.getTasks().size(), greaterThanOrEqualTo(cluster().numDataNodes())); assertThat(response.getTasks().size(), greaterThanOrEqualTo(cluster().numDataNodes()));
} }
public void testMasterNodeOperationTasks() { public void testClusterManagerNodeOperationTasks() {
registerTaskManagerListeners(ClusterHealthAction.NAME); registerTaskManagerListeners(ClusterHealthAction.NAME);
// First run the health on the master node - should produce only one task on the master node // First run the health on the cluster-manager node - should produce only one task on the cluster-manager node
internalCluster().masterClient().admin().cluster().prepareHealth().get(); internalCluster().masterClient().admin().cluster().prepareHealth().get();
assertEquals(1, numberOfEvents(ClusterHealthAction.NAME, Tuple::v1)); // counting only registration events assertEquals(1, numberOfEvents(ClusterHealthAction.NAME, Tuple::v1)); // counting only registration events
assertEquals(1, numberOfEvents(ClusterHealthAction.NAME, event -> event.v1() == false)); // counting only unregistration events assertEquals(1, numberOfEvents(ClusterHealthAction.NAME, event -> event.v1() == false)); // counting only unregistration events
resetTaskManagerListeners(ClusterHealthAction.NAME); resetTaskManagerListeners(ClusterHealthAction.NAME);
// Now run the health on a non-master node - should produce one task on master and one task on another node // Now run the health on a non-cluster-manager node - should produce one task on cluster-manager and one task on another node
internalCluster().nonMasterClient().admin().cluster().prepareHealth().get(); internalCluster().nonMasterClient().admin().cluster().prepareHealth().get();
assertEquals(2, numberOfEvents(ClusterHealthAction.NAME, Tuple::v1)); // counting only registration events assertEquals(2, numberOfEvents(ClusterHealthAction.NAME, Tuple::v1)); // counting only registration events
assertEquals(2, numberOfEvents(ClusterHealthAction.NAME, event -> event.v1() == false)); // counting only unregistration events assertEquals(2, numberOfEvents(ClusterHealthAction.NAME, event -> event.v1() == false)); // counting only unregistration events

View File

@ -68,8 +68,8 @@ public class TransportClusterStateActionDisruptionIT extends OpenSearchIntegTest
return Collections.singletonList(MockTransportService.TestPlugin.class); return Collections.singletonList(MockTransportService.TestPlugin.class);
} }
public void testNonLocalRequestAlwaysFindsMaster() throws Exception { public void testNonLocalRequestAlwaysFindsClusterManager() throws Exception {
runRepeatedlyWhileChangingMaster(() -> { runRepeatedlyWhileChangingClusterManager(() -> {
final ClusterStateRequestBuilder clusterStateRequestBuilder = client().admin() final ClusterStateRequestBuilder clusterStateRequestBuilder = client().admin()
.cluster() .cluster()
.prepareState() .prepareState()
@ -82,12 +82,12 @@ public class TransportClusterStateActionDisruptionIT extends OpenSearchIntegTest
} catch (MasterNotDiscoveredException e) { } catch (MasterNotDiscoveredException e) {
return; // ok, we hit the disconnected node return; // ok, we hit the disconnected node
} }
assertNotNull("should always contain a master node", clusterStateResponse.getState().nodes().getMasterNodeId()); assertNotNull("should always contain a cluster-manager node", clusterStateResponse.getState().nodes().getMasterNodeId());
}); });
} }
public void testLocalRequestAlwaysSucceeds() throws Exception { public void testLocalRequestAlwaysSucceeds() throws Exception {
runRepeatedlyWhileChangingMaster(() -> { runRepeatedlyWhileChangingClusterManager(() -> {
final String node = randomFrom(internalCluster().getNodeNames()); final String node = randomFrom(internalCluster().getNodeNames());
final DiscoveryNodes discoveryNodes = client(node).admin() final DiscoveryNodes discoveryNodes = client(node).admin()
.cluster() .cluster()
@ -108,8 +108,8 @@ public class TransportClusterStateActionDisruptionIT extends OpenSearchIntegTest
}); });
} }
public void testNonLocalRequestAlwaysFindsMasterAndWaitsForMetadata() throws Exception { public void testNonLocalRequestAlwaysFindsClusterManagerAndWaitsForMetadata() throws Exception {
runRepeatedlyWhileChangingMaster(() -> { runRepeatedlyWhileChangingClusterManager(() -> {
final String node = randomFrom(internalCluster().getNodeNames()); final String node = randomFrom(internalCluster().getNodeNames());
final long metadataVersion = internalCluster().getInstance(ClusterService.class, node) final long metadataVersion = internalCluster().getInstance(ClusterService.class, node)
.getClusterApplierService() .getClusterApplierService()
@ -134,14 +134,14 @@ public class TransportClusterStateActionDisruptionIT extends OpenSearchIntegTest
} }
if (clusterStateResponse.isWaitForTimedOut() == false) { if (clusterStateResponse.isWaitForTimedOut() == false) {
final ClusterState state = clusterStateResponse.getState(); final ClusterState state = clusterStateResponse.getState();
assertNotNull("should always contain a master node", state.nodes().getMasterNodeId()); assertNotNull("should always contain a cluster-manager node", state.nodes().getMasterNodeId());
assertThat("waited for metadata version", state.metadata().version(), greaterThanOrEqualTo(waitForMetadataVersion)); assertThat("waited for metadata version", state.metadata().version(), greaterThanOrEqualTo(waitForMetadataVersion));
} }
}); });
} }
public void testLocalRequestWaitsForMetadata() throws Exception { public void testLocalRequestWaitsForMetadata() throws Exception {
runRepeatedlyWhileChangingMaster(() -> { runRepeatedlyWhileChangingClusterManager(() -> {
final String node = randomFrom(internalCluster().getNodeNames()); final String node = randomFrom(internalCluster().getNodeNames());
final long metadataVersion = internalCluster().getInstance(ClusterService.class, node) final long metadataVersion = internalCluster().getInstance(ClusterService.class, node)
.getClusterApplierService() .getClusterApplierService()
@ -170,7 +170,7 @@ public class TransportClusterStateActionDisruptionIT extends OpenSearchIntegTest
}); });
} }
public void runRepeatedlyWhileChangingMaster(Runnable runnable) throws Exception { public void runRepeatedlyWhileChangingClusterManager(Runnable runnable) throws Exception {
internalCluster().startNodes(3); internalCluster().startNodes(3);
assertBusy( assertBusy(
@ -191,7 +191,7 @@ public class TransportClusterStateActionDisruptionIT extends OpenSearchIntegTest
) )
); );
final String masterName = internalCluster().getMasterName(); final String clusterManagerName = internalCluster().getMasterName();
final AtomicBoolean shutdown = new AtomicBoolean(); final AtomicBoolean shutdown = new AtomicBoolean();
final Thread assertingThread = new Thread(() -> { final Thread assertingThread = new Thread(() -> {
@ -204,9 +204,12 @@ public class TransportClusterStateActionDisruptionIT extends OpenSearchIntegTest
String value = "none"; String value = "none";
while (shutdown.get() == false) { while (shutdown.get() == false) {
value = "none".equals(value) ? "all" : "none"; value = "none".equals(value) ? "all" : "none";
final String nonMasterNode = randomValueOtherThan(masterName, () -> randomFrom(internalCluster().getNodeNames())); final String nonClusterManagerNode = randomValueOtherThan(
clusterManagerName,
() -> randomFrom(internalCluster().getNodeNames())
);
assertAcked( assertAcked(
client(nonMasterNode).admin() client(nonClusterManagerNode).admin()
.cluster() .cluster()
.prepareUpdateSettings() .prepareUpdateSettings()
.setPersistentSettings(Settings.builder().put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), value)) .setPersistentSettings(Settings.builder().put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), value))
@ -222,22 +225,25 @@ public class TransportClusterStateActionDisruptionIT extends OpenSearchIntegTest
assertingThread.start(); assertingThread.start();
updatingThread.start(); updatingThread.start();
final MockTransportService masterTransportService = (MockTransportService) internalCluster().getInstance( final MockTransportService clusterManagerTransportService = (MockTransportService) internalCluster().getInstance(
TransportService.class, TransportService.class,
masterName clusterManagerName
); );
for (MockTransportService mockTransportService : mockTransportServices) { for (MockTransportService mockTransportService : mockTransportServices) {
if (masterTransportService != mockTransportService) { if (clusterManagerTransportService != mockTransportService) {
masterTransportService.addFailToSendNoConnectRule(mockTransportService); clusterManagerTransportService.addFailToSendNoConnectRule(mockTransportService);
mockTransportService.addFailToSendNoConnectRule(masterTransportService); mockTransportService.addFailToSendNoConnectRule(clusterManagerTransportService);
} }
} }
assertBusy(() -> { assertBusy(() -> {
final String nonMasterNode = randomValueOtherThan(masterName, () -> randomFrom(internalCluster().getNodeNames())); final String nonClusterManagerNode = randomValueOtherThan(
final String claimedMasterName = internalCluster().getMasterName(nonMasterNode); clusterManagerName,
assertThat(claimedMasterName, not(equalTo(masterName))); () -> randomFrom(internalCluster().getNodeNames())
);
final String claimedClusterManagerName = internalCluster().getMasterName(nonClusterManagerNode);
assertThat(claimedClusterManagerName, not(equalTo(clusterManagerName)));
}); });
shutdown.set(true); shutdown.set(true);

View File

@ -97,7 +97,7 @@ public class ClusterStatsIT extends OpenSearchIntegTestCase {
for (int i = 0; i < numNodes; i++) { for (int i = 0; i < numNodes; i++) {
boolean isDataNode = randomBoolean(); boolean isDataNode = randomBoolean();
boolean isIngestNode = randomBoolean(); boolean isIngestNode = randomBoolean();
boolean isMasterNode = randomBoolean(); boolean isClusterManagerNode = randomBoolean();
boolean isRemoteClusterClientNode = false; boolean isRemoteClusterClientNode = false;
final Set<DiscoveryNodeRole> roles = new HashSet<>(); final Set<DiscoveryNodeRole> roles = new HashSet<>();
if (isDataNode) { if (isDataNode) {
@ -106,7 +106,7 @@ public class ClusterStatsIT extends OpenSearchIntegTestCase {
if (isIngestNode) { if (isIngestNode) {
roles.add(DiscoveryNodeRole.INGEST_ROLE); roles.add(DiscoveryNodeRole.INGEST_ROLE);
} }
if (isMasterNode) { if (isClusterManagerNode) {
roles.add(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); roles.add(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE);
} }
if (isRemoteClusterClientNode) { if (isRemoteClusterClientNode) {
@ -128,14 +128,14 @@ public class ClusterStatsIT extends OpenSearchIntegTestCase {
if (isIngestNode) { if (isIngestNode) {
incrementCountForRole(DiscoveryNodeRole.INGEST_ROLE.roleName(), expectedCounts); incrementCountForRole(DiscoveryNodeRole.INGEST_ROLE.roleName(), expectedCounts);
} }
if (isMasterNode) { if (isClusterManagerNode) {
incrementCountForRole(DiscoveryNodeRole.MASTER_ROLE.roleName(), expectedCounts); incrementCountForRole(DiscoveryNodeRole.MASTER_ROLE.roleName(), expectedCounts);
incrementCountForRole(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE.roleName(), expectedCounts); incrementCountForRole(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE.roleName(), expectedCounts);
} }
if (isRemoteClusterClientNode) { if (isRemoteClusterClientNode) {
incrementCountForRole(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE.roleName(), expectedCounts); incrementCountForRole(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE.roleName(), expectedCounts);
} }
if (!isDataNode && !isMasterNode && !isIngestNode && !isRemoteClusterClientNode) { if (!isDataNode && !isClusterManagerNode && !isIngestNode && !isRemoteClusterClientNode) {
incrementCountForRole(ClusterStatsNodes.Counts.COORDINATING_ONLY, expectedCounts); incrementCountForRole(ClusterStatsNodes.Counts.COORDINATING_ONLY, expectedCounts);
} }
@ -254,12 +254,12 @@ public class ClusterStatsIT extends OpenSearchIntegTestCase {
} }
public void testClusterStatusWhenStateNotRecovered() throws Exception { public void testClusterStatusWhenStateNotRecovered() throws Exception {
internalCluster().startMasterOnlyNode(Settings.builder().put("gateway.recover_after_nodes", 2).build()); internalCluster().startClusterManagerOnlyNode(Settings.builder().put("gateway.recover_after_nodes", 2).build());
ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get(); ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get();
assertThat(response.getStatus(), equalTo(ClusterHealthStatus.RED)); assertThat(response.getStatus(), equalTo(ClusterHealthStatus.RED));
if (randomBoolean()) { if (randomBoolean()) {
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
} else { } else {
internalCluster().startDataOnlyNode(); internalCluster().startDataOnlyNode();
} }

View File

@ -48,7 +48,7 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertRequestBui
public class IndicesExistsIT extends OpenSearchIntegTestCase { public class IndicesExistsIT extends OpenSearchIntegTestCase {
public void testIndexExistsWithBlocksInPlace() throws IOException { public void testIndexExistsWithBlocksInPlace() throws IOException {
internalCluster().setBootstrapMasterNodeIndex(0); internalCluster().setBootstrapClusterManagerNodeIndex(0);
Settings settings = Settings.builder().put(GatewayService.RECOVER_AFTER_NODES_SETTING.getKey(), 99).build(); Settings settings = Settings.builder().put(GatewayService.RECOVER_AFTER_NODES_SETTING.getKey(), 99).build();
String node = internalCluster().startNode(settings); String node = internalCluster().startNode(settings);

View File

@ -65,7 +65,7 @@ public class IndexingMasterFailoverIT extends OpenSearchIntegTestCase {
public void testMasterFailoverDuringIndexingWithMappingChanges() throws Throwable { public void testMasterFailoverDuringIndexingWithMappingChanges() throws Throwable {
logger.info("--> start 4 nodes, 3 master, 1 data"); logger.info("--> start 4 nodes, 3 master, 1 data");
internalCluster().setBootstrapMasterNodeIndex(2); internalCluster().setBootstrapClusterManagerNodeIndex(2);
internalCluster().startMasterOnlyNodes(3, Settings.EMPTY); internalCluster().startMasterOnlyNodes(3, Settings.EMPTY);

View File

@ -93,9 +93,13 @@ import static org.hamcrest.Matchers.is;
public class ClusterStateDiffIT extends OpenSearchIntegTestCase { public class ClusterStateDiffIT extends OpenSearchIntegTestCase {
public void testClusterStateDiffSerialization() throws Exception { public void testClusterStateDiffSerialization() throws Exception {
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(ClusterModule.getNamedWriteables()); NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(ClusterModule.getNamedWriteables());
DiscoveryNode masterNode = randomNode("master"); DiscoveryNode clusterManagerNode = randomNode("master");
DiscoveryNode otherNode = randomNode("other"); DiscoveryNode otherNode = randomNode("other");
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(masterNode).add(otherNode).localNodeId(masterNode.getId()).build(); DiscoveryNodes discoveryNodes = DiscoveryNodes.builder()
.add(clusterManagerNode)
.add(otherNode)
.localNodeId(clusterManagerNode.getId())
.build();
ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build(); ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build();
ClusterState clusterStateFromDiffs = ClusterState.Builder.fromBytes( ClusterState clusterStateFromDiffs = ClusterState.Builder.fromBytes(
ClusterState.Builder.toBytes(clusterState), ClusterState.Builder.toBytes(clusterState),

View File

@ -84,7 +84,7 @@ public class MinimumMasterNodesIT extends OpenSearchIntegTestCase {
} }
public void testTwoNodesNoMasterBlock() throws Exception { public void testTwoNodesNoMasterBlock() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(1); internalCluster().setBootstrapClusterManagerNodeIndex(1);
Settings settings = Settings.builder().put("discovery.initial_state_timeout", "500ms").build(); Settings settings = Settings.builder().put("discovery.initial_state_timeout", "500ms").build();
@ -250,7 +250,7 @@ public class MinimumMasterNodesIT extends OpenSearchIntegTestCase {
} }
public void testThreeNodesNoMasterBlock() throws Exception { public void testThreeNodesNoMasterBlock() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(2); internalCluster().setBootstrapClusterManagerNodeIndex(2);
Settings settings = Settings.builder().put("discovery.initial_state_timeout", "500ms").build(); Settings settings = Settings.builder().put("discovery.initial_state_timeout", "500ms").build();
@ -309,11 +309,11 @@ public class MinimumMasterNodesIT extends OpenSearchIntegTestCase {
assertHitCount(client().prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet(), 100); assertHitCount(client().prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet(), 100);
} }
List<String> nonMasterNodes = new ArrayList<>( List<String> nonClusterManagerNodes = new ArrayList<>(
Sets.difference(Sets.newHashSet(internalCluster().getNodeNames()), Collections.singleton(internalCluster().getMasterName())) Sets.difference(Sets.newHashSet(internalCluster().getNodeNames()), Collections.singleton(internalCluster().getMasterName()))
); );
Settings nonMasterDataPathSettings1 = internalCluster().dataPathSettings(nonMasterNodes.get(0)); Settings nonMasterDataPathSettings1 = internalCluster().dataPathSettings(nonClusterManagerNodes.get(0));
Settings nonMasterDataPathSettings2 = internalCluster().dataPathSettings(nonMasterNodes.get(1)); Settings nonMasterDataPathSettings2 = internalCluster().dataPathSettings(nonClusterManagerNodes.get(1));
internalCluster().stopRandomNonMasterNode(); internalCluster().stopRandomNonMasterNode();
internalCluster().stopRandomNonMasterNode(); internalCluster().stopRandomNonMasterNode();
@ -340,7 +340,7 @@ public class MinimumMasterNodesIT extends OpenSearchIntegTestCase {
} }
public void testCannotCommitStateThreeNodes() throws Exception { public void testCannotCommitStateThreeNodes() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(2); internalCluster().setBootstrapClusterManagerNodeIndex(2);
Settings settings = Settings.builder().put("discovery.initial_state_timeout", "500ms").build(); Settings settings = Settings.builder().put("discovery.initial_state_timeout", "500ms").build();
@ -393,7 +393,7 @@ public class MinimumMasterNodesIT extends OpenSearchIntegTestCase {
// otherwise persistent setting (which is a part of accepted state on old master) will be propagated to other nodes // otherwise persistent setting (which is a part of accepted state on old master) will be propagated to other nodes
logger.debug("--> wait for master to be elected in major partition"); logger.debug("--> wait for master to be elected in major partition");
assertBusy(() -> { assertBusy(() -> {
DiscoveryNode masterNode = internalCluster().client(randomFrom(otherNodes)) DiscoveryNode clusterManagerNode = internalCluster().client(randomFrom(otherNodes))
.admin() .admin()
.cluster() .cluster()
.prepareState() .prepareState()
@ -402,8 +402,8 @@ public class MinimumMasterNodesIT extends OpenSearchIntegTestCase {
.getState() .getState()
.nodes() .nodes()
.getMasterNode(); .getMasterNode();
assertThat(masterNode, notNullValue()); assertThat(clusterManagerNode, notNullValue());
assertThat(masterNode.getName(), not(equalTo(master))); assertThat(clusterManagerNode.getName(), not(equalTo(master)));
}); });
partition.stopDisrupting(); partition.stopDisrupting();

View File

@ -56,7 +56,7 @@ import static org.hamcrest.Matchers.nullValue;
public class SpecificMasterNodesIT extends OpenSearchIntegTestCase { public class SpecificMasterNodesIT extends OpenSearchIntegTestCase {
public void testSimpleOnlyMasterNodeElection() throws IOException { public void testSimpleOnlyMasterNodeElection() throws IOException {
internalCluster().setBootstrapMasterNodeIndex(0); internalCluster().setBootstrapClusterManagerNodeIndex(0);
logger.info("--> start data node / non master node"); logger.info("--> start data node / non master node");
internalCluster().startNode(Settings.builder().put(dataOnlyNode()).put("discovery.initial_state_timeout", "1s")); internalCluster().startNode(Settings.builder().put(dataOnlyNode()).put("discovery.initial_state_timeout", "1s"));
try { try {
@ -77,7 +77,7 @@ public class SpecificMasterNodesIT extends OpenSearchIntegTestCase {
// all is well, no cluster-manager elected // all is well, no cluster-manager elected
} }
logger.info("--> start master node"); logger.info("--> start master node");
final String masterNodeName = internalCluster().startMasterOnlyNode(); final String masterNodeName = internalCluster().startClusterManagerOnlyNode();
assertThat( assertThat(
internalCluster().nonMasterClient() internalCluster().nonMasterClient()
.admin() .admin()
@ -160,7 +160,7 @@ public class SpecificMasterNodesIT extends OpenSearchIntegTestCase {
} }
public void testElectOnlyBetweenMasterNodes() throws Exception { public void testElectOnlyBetweenMasterNodes() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(0); internalCluster().setBootstrapClusterManagerNodeIndex(0);
logger.info("--> start data node / non master node"); logger.info("--> start data node / non master node");
internalCluster().startNode(Settings.builder().put(dataOnlyNode()).put("discovery.initial_state_timeout", "1s")); internalCluster().startNode(Settings.builder().put(dataOnlyNode()).put("discovery.initial_state_timeout", "1s"));
try { try {
@ -181,7 +181,7 @@ public class SpecificMasterNodesIT extends OpenSearchIntegTestCase {
// all is well, no cluster-manager elected // all is well, no cluster-manager elected
} }
logger.info("--> start master node (1)"); logger.info("--> start master node (1)");
final String masterNodeName = internalCluster().startMasterOnlyNode(); final String masterNodeName = internalCluster().startClusterManagerOnlyNode();
assertThat( assertThat(
internalCluster().nonMasterClient() internalCluster().nonMasterClient()
.admin() .admin()
@ -210,7 +210,7 @@ public class SpecificMasterNodesIT extends OpenSearchIntegTestCase {
); );
logger.info("--> start master node (2)"); logger.info("--> start master node (2)");
final String nextMasterEligableNodeName = internalCluster().startMasterOnlyNode(); final String nextMasterEligableNodeName = internalCluster().startClusterManagerOnlyNode();
assertThat( assertThat(
internalCluster().nonMasterClient() internalCluster().nonMasterClient()
.admin() .admin()
@ -312,9 +312,9 @@ public class SpecificMasterNodesIT extends OpenSearchIntegTestCase {
} }
public void testAliasFilterValidation() { public void testAliasFilterValidation() {
internalCluster().setBootstrapMasterNodeIndex(0); internalCluster().setBootstrapClusterManagerNodeIndex(0);
logger.info("--> start master node / non data"); logger.info("--> start master node / non data");
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
logger.info("--> start data node / non master node"); logger.info("--> start data node / non master node");
internalCluster().startDataOnlyNode(); internalCluster().startDataOnlyNode();

View File

@ -173,7 +173,7 @@ public class RareClusterStateIT extends OpenSearchIntegTestCase {
} }
public void testDeleteCreateInOneBulk() throws Exception { public void testDeleteCreateInOneBulk() throws Exception {
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
String dataNode = internalCluster().startDataOnlyNode(); String dataNode = internalCluster().startDataOnlyNode();
assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut()); assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut());
prepareCreate("test").setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)).get(); prepareCreate("test").setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)).get();

View File

@ -46,7 +46,7 @@ import static org.hamcrest.Matchers.containsString;
public class RemoveCustomsCommandIT extends OpenSearchIntegTestCase { public class RemoveCustomsCommandIT extends OpenSearchIntegTestCase {
public void testRemoveCustomsAbortedByUser() throws Exception { public void testRemoveCustomsAbortedByUser() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(0); internalCluster().setBootstrapClusterManagerNodeIndex(0);
String node = internalCluster().startNode(); String node = internalCluster().startNode();
Settings dataPathSettings = internalCluster().dataPathSettings(node); Settings dataPathSettings = internalCluster().dataPathSettings(node);
ensureStableCluster(1); ensureStableCluster(1);
@ -59,7 +59,7 @@ public class RemoveCustomsCommandIT extends OpenSearchIntegTestCase {
} }
public void testRemoveCustomsSuccessful() throws Exception { public void testRemoveCustomsSuccessful() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(0); internalCluster().setBootstrapClusterManagerNodeIndex(0);
String node = internalCluster().startNode(); String node = internalCluster().startNode();
createIndex("test"); createIndex("test");
client().admin().indices().prepareDelete("test").get(); client().admin().indices().prepareDelete("test").get();
@ -85,7 +85,7 @@ public class RemoveCustomsCommandIT extends OpenSearchIntegTestCase {
} }
public void testCustomDoesNotMatch() throws Exception { public void testCustomDoesNotMatch() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(0); internalCluster().setBootstrapClusterManagerNodeIndex(0);
String node = internalCluster().startNode(); String node = internalCluster().startNode();
createIndex("test"); createIndex("test");
client().admin().indices().prepareDelete("test").get(); client().admin().indices().prepareDelete("test").get();

View File

@ -49,7 +49,7 @@ import static org.hamcrest.Matchers.not;
public class RemoveSettingsCommandIT extends OpenSearchIntegTestCase { public class RemoveSettingsCommandIT extends OpenSearchIntegTestCase {
public void testRemoveSettingsAbortedByUser() throws Exception { public void testRemoveSettingsAbortedByUser() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(0); internalCluster().setBootstrapClusterManagerNodeIndex(0);
String node = internalCluster().startNode(); String node = internalCluster().startNode();
client().admin() client().admin()
.cluster() .cluster()
@ -78,7 +78,7 @@ public class RemoveSettingsCommandIT extends OpenSearchIntegTestCase {
} }
public void testRemoveSettingsSuccessful() throws Exception { public void testRemoveSettingsSuccessful() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(0); internalCluster().setBootstrapClusterManagerNodeIndex(0);
String node = internalCluster().startNode(); String node = internalCluster().startNode();
client().admin() client().admin()
.cluster() .cluster()
@ -122,7 +122,7 @@ public class RemoveSettingsCommandIT extends OpenSearchIntegTestCase {
} }
public void testSettingDoesNotMatch() throws Exception { public void testSettingDoesNotMatch() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(0); internalCluster().setBootstrapClusterManagerNodeIndex(0);
String node = internalCluster().startNode(); String node = internalCluster().startNode();
client().admin() client().admin()
.cluster() .cluster()

View File

@ -228,7 +228,7 @@ public class UnsafeBootstrapAndDetachCommandIT extends OpenSearchIntegTestCase {
} }
public void testBootstrapNoClusterState() throws IOException { public void testBootstrapNoClusterState() throws IOException {
internalCluster().setBootstrapMasterNodeIndex(0); internalCluster().setBootstrapClusterManagerNodeIndex(0);
String node = internalCluster().startNode(); String node = internalCluster().startNode();
Settings dataPathSettings = internalCluster().dataPathSettings(node); Settings dataPathSettings = internalCluster().dataPathSettings(node);
ensureStableCluster(1); ensureStableCluster(1);
@ -243,7 +243,7 @@ public class UnsafeBootstrapAndDetachCommandIT extends OpenSearchIntegTestCase {
} }
public void testDetachNoClusterState() throws IOException { public void testDetachNoClusterState() throws IOException {
internalCluster().setBootstrapMasterNodeIndex(0); internalCluster().setBootstrapClusterManagerNodeIndex(0);
String node = internalCluster().startNode(); String node = internalCluster().startNode();
Settings dataPathSettings = internalCluster().dataPathSettings(node); Settings dataPathSettings = internalCluster().dataPathSettings(node);
ensureStableCluster(1); ensureStableCluster(1);
@ -258,7 +258,7 @@ public class UnsafeBootstrapAndDetachCommandIT extends OpenSearchIntegTestCase {
} }
public void testBootstrapAbortedByUser() throws IOException { public void testBootstrapAbortedByUser() throws IOException {
internalCluster().setBootstrapMasterNodeIndex(0); internalCluster().setBootstrapClusterManagerNodeIndex(0);
String node = internalCluster().startNode(); String node = internalCluster().startNode();
Settings dataPathSettings = internalCluster().dataPathSettings(node); Settings dataPathSettings = internalCluster().dataPathSettings(node);
ensureStableCluster(1); ensureStableCluster(1);
@ -271,7 +271,7 @@ public class UnsafeBootstrapAndDetachCommandIT extends OpenSearchIntegTestCase {
} }
public void testDetachAbortedByUser() throws IOException { public void testDetachAbortedByUser() throws IOException {
internalCluster().setBootstrapMasterNodeIndex(0); internalCluster().setBootstrapClusterManagerNodeIndex(0);
String node = internalCluster().startNode(); String node = internalCluster().startNode();
Settings dataPathSettings = internalCluster().dataPathSettings(node); Settings dataPathSettings = internalCluster().dataPathSettings(node);
ensureStableCluster(1); ensureStableCluster(1);
@ -284,12 +284,12 @@ public class UnsafeBootstrapAndDetachCommandIT extends OpenSearchIntegTestCase {
} }
public void test3MasterNodes2Failed() throws Exception { public void test3MasterNodes2Failed() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(2); internalCluster().setBootstrapClusterManagerNodeIndex(2);
List<String> masterNodes = new ArrayList<>(); List<String> masterNodes = new ArrayList<>();
logger.info("--> start 1st cluster-manager-eligible node"); logger.info("--> start 1st cluster-manager-eligible node");
masterNodes.add( masterNodes.add(
internalCluster().startMasterOnlyNode( internalCluster().startClusterManagerOnlyNode(
Settings.builder().put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s").build() Settings.builder().put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s").build()
) )
); // node ordinal 0 ); // node ordinal 0
@ -364,7 +364,7 @@ public class UnsafeBootstrapAndDetachCommandIT extends OpenSearchIntegTestCase {
); );
logger.info("--> start 1st cluster-manager-eligible node"); logger.info("--> start 1st cluster-manager-eligible node");
String masterNode2 = internalCluster().startMasterOnlyNode(master1DataPathSettings); String masterNode2 = internalCluster().startClusterManagerOnlyNode(master1DataPathSettings);
logger.info("--> detach-cluster on data-only node"); logger.info("--> detach-cluster on data-only node");
Environment environmentData = TestEnvironment.newEnvironment( Environment environmentData = TestEnvironment.newEnvironment(
@ -410,15 +410,15 @@ public class UnsafeBootstrapAndDetachCommandIT extends OpenSearchIntegTestCase {
detachCluster(environmentMaster3, false); detachCluster(environmentMaster3, false);
logger.info("--> start 2nd and 3rd cluster-manager-eligible nodes and ensure 4 nodes stable cluster"); logger.info("--> start 2nd and 3rd cluster-manager-eligible nodes and ensure 4 nodes stable cluster");
bootstrappedNodes.add(internalCluster().startMasterOnlyNode(master2DataPathSettings)); bootstrappedNodes.add(internalCluster().startClusterManagerOnlyNode(master2DataPathSettings));
bootstrappedNodes.add(internalCluster().startMasterOnlyNode(master3DataPathSettings)); bootstrappedNodes.add(internalCluster().startClusterManagerOnlyNode(master3DataPathSettings));
ensureStableCluster(4); ensureStableCluster(4);
bootstrappedNodes.forEach(node -> ensureReadOnlyBlock(true, node)); bootstrappedNodes.forEach(node -> ensureReadOnlyBlock(true, node));
removeBlock(); removeBlock();
} }
public void testAllMasterEligibleNodesFailedDanglingIndexImport() throws Exception { public void testAllMasterEligibleNodesFailedDanglingIndexImport() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(0); internalCluster().setBootstrapClusterManagerNodeIndex(0);
Settings settings = Settings.builder().put(AUTO_IMPORT_DANGLING_INDICES_SETTING.getKey(), true).build(); Settings settings = Settings.builder().put(AUTO_IMPORT_DANGLING_INDICES_SETTING.getKey(), true).build();
@ -478,21 +478,21 @@ public class UnsafeBootstrapAndDetachCommandIT extends OpenSearchIntegTestCase {
} }
public void testNoInitialBootstrapAfterDetach() throws Exception { public void testNoInitialBootstrapAfterDetach() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(0); internalCluster().setBootstrapClusterManagerNodeIndex(0);
String masterNode = internalCluster().startMasterOnlyNode(); String clusterManagerNode = internalCluster().startClusterManagerOnlyNode();
Settings masterNodeDataPathSettings = internalCluster().dataPathSettings(masterNode); Settings clusterManagerNodeDataPathSettings = internalCluster().dataPathSettings(clusterManagerNode);
internalCluster().stopCurrentMasterNode(); internalCluster().stopCurrentMasterNode();
final Environment environment = TestEnvironment.newEnvironment( final Environment environment = TestEnvironment.newEnvironment(
Settings.builder().put(internalCluster().getDefaultSettings()).put(masterNodeDataPathSettings).build() Settings.builder().put(internalCluster().getDefaultSettings()).put(clusterManagerNodeDataPathSettings).build()
); );
detachCluster(environment); detachCluster(environment);
String node = internalCluster().startMasterOnlyNode( String node = internalCluster().startClusterManagerOnlyNode(
Settings.builder() Settings.builder()
// give the cluster 2 seconds to elect the master (it should not) // give the cluster 2 seconds to elect the master (it should not)
.put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "2s") .put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "2s")
.put(masterNodeDataPathSettings) .put(clusterManagerNodeDataPathSettings)
.build() .build()
); );
@ -503,9 +503,9 @@ public class UnsafeBootstrapAndDetachCommandIT extends OpenSearchIntegTestCase {
} }
public void testCanRunUnsafeBootstrapAfterErroneousDetachWithoutLoosingMetadata() throws Exception { public void testCanRunUnsafeBootstrapAfterErroneousDetachWithoutLoosingMetadata() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(0); internalCluster().setBootstrapClusterManagerNodeIndex(0);
String masterNode = internalCluster().startMasterOnlyNode(); String clusterManagerNode = internalCluster().startClusterManagerOnlyNode();
Settings masterNodeDataPathSettings = internalCluster().dataPathSettings(masterNode); Settings clusterManagerNodeDataPathSettings = internalCluster().dataPathSettings(clusterManagerNode);
ClusterUpdateSettingsRequest req = new ClusterUpdateSettingsRequest().persistentSettings( ClusterUpdateSettingsRequest req = new ClusterUpdateSettingsRequest().persistentSettings(
Settings.builder().put(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "1234kb") Settings.builder().put(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "1234kb")
); );
@ -514,17 +514,17 @@ public class UnsafeBootstrapAndDetachCommandIT extends OpenSearchIntegTestCase {
ClusterState state = internalCluster().client().admin().cluster().prepareState().execute().actionGet().getState(); ClusterState state = internalCluster().client().admin().cluster().prepareState().execute().actionGet().getState();
assertThat(state.metadata().persistentSettings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()), equalTo("1234kb")); assertThat(state.metadata().persistentSettings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()), equalTo("1234kb"));
ensureReadOnlyBlock(false, masterNode); ensureReadOnlyBlock(false, clusterManagerNode);
internalCluster().stopCurrentMasterNode(); internalCluster().stopCurrentMasterNode();
final Environment environment = TestEnvironment.newEnvironment( final Environment environment = TestEnvironment.newEnvironment(
Settings.builder().put(internalCluster().getDefaultSettings()).put(masterNodeDataPathSettings).build() Settings.builder().put(internalCluster().getDefaultSettings()).put(clusterManagerNodeDataPathSettings).build()
); );
detachCluster(environment); detachCluster(environment);
unsafeBootstrap(environment); // read-only block will remain same as one before bootstrap, in this case it is false unsafeBootstrap(environment); // read-only block will remain same as one before bootstrap, in this case it is false
String masterNode2 = internalCluster().startMasterOnlyNode(masterNodeDataPathSettings); String masterNode2 = internalCluster().startClusterManagerOnlyNode(clusterManagerNodeDataPathSettings);
ensureGreen(); ensureGreen();
ensureReadOnlyBlock(false, masterNode2); ensureReadOnlyBlock(false, masterNode2);
@ -533,9 +533,9 @@ public class UnsafeBootstrapAndDetachCommandIT extends OpenSearchIntegTestCase {
} }
public void testUnsafeBootstrapWithApplyClusterReadOnlyBlockAsFalse() throws Exception { public void testUnsafeBootstrapWithApplyClusterReadOnlyBlockAsFalse() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(0); internalCluster().setBootstrapClusterManagerNodeIndex(0);
String masterNode = internalCluster().startMasterOnlyNode(); String clusterManagerNode = internalCluster().startClusterManagerOnlyNode();
Settings masterNodeDataPathSettings = internalCluster().dataPathSettings(masterNode); Settings clusterManagerNodeDataPathSettings = internalCluster().dataPathSettings(clusterManagerNode);
ClusterUpdateSettingsRequest req = new ClusterUpdateSettingsRequest().persistentSettings( ClusterUpdateSettingsRequest req = new ClusterUpdateSettingsRequest().persistentSettings(
Settings.builder().put(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "1234kb") Settings.builder().put(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "1234kb")
); );
@ -544,18 +544,18 @@ public class UnsafeBootstrapAndDetachCommandIT extends OpenSearchIntegTestCase {
ClusterState state = internalCluster().client().admin().cluster().prepareState().execute().actionGet().getState(); ClusterState state = internalCluster().client().admin().cluster().prepareState().execute().actionGet().getState();
assertThat(state.metadata().persistentSettings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()), equalTo("1234kb")); assertThat(state.metadata().persistentSettings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()), equalTo("1234kb"));
ensureReadOnlyBlock(false, masterNode); ensureReadOnlyBlock(false, clusterManagerNode);
internalCluster().stopCurrentMasterNode(); internalCluster().stopCurrentMasterNode();
final Environment environment = TestEnvironment.newEnvironment( final Environment environment = TestEnvironment.newEnvironment(
Settings.builder().put(internalCluster().getDefaultSettings()).put(masterNodeDataPathSettings).build() Settings.builder().put(internalCluster().getDefaultSettings()).put(clusterManagerNodeDataPathSettings).build()
); );
unsafeBootstrap(environment, false, false); unsafeBootstrap(environment, false, false);
String masterNode2 = internalCluster().startMasterOnlyNode(masterNodeDataPathSettings); String clusterManagerNode2 = internalCluster().startClusterManagerOnlyNode(clusterManagerNodeDataPathSettings);
ensureGreen(); ensureGreen();
ensureReadOnlyBlock(false, masterNode2); ensureReadOnlyBlock(false, clusterManagerNode2);
state = internalCluster().client().admin().cluster().prepareState().execute().actionGet().getState(); state = internalCluster().client().admin().cluster().prepareState().execute().actionGet().getState();
assertThat(state.metadata().settings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()), equalTo("1234kb")); assertThat(state.metadata().settings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()), equalTo("1234kb"));

View File

@ -62,7 +62,7 @@ public class VotingConfigurationIT extends OpenSearchIntegTestCase {
} }
public void testAbdicateAfterVotingConfigExclusionAdded() throws ExecutionException, InterruptedException { public void testAbdicateAfterVotingConfigExclusionAdded() throws ExecutionException, InterruptedException {
internalCluster().setBootstrapMasterNodeIndex(0); internalCluster().setBootstrapClusterManagerNodeIndex(0);
internalCluster().startNodes(2); internalCluster().startNodes(2);
final String originalMaster = internalCluster().getMasterName(); final String originalMaster = internalCluster().getMasterName();
@ -73,7 +73,7 @@ public class VotingConfigurationIT extends OpenSearchIntegTestCase {
} }
public void testElectsNodeNotInVotingConfiguration() throws Exception { public void testElectsNodeNotInVotingConfiguration() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(0); internalCluster().setBootstrapClusterManagerNodeIndex(0);
final List<String> nodeNames = internalCluster().startNodes(4); final List<String> nodeNames = internalCluster().startNodes(4);
// a 4-node cluster settles on a 3-node configuration; we then prevent the nodes in the configuration from winning an election // a 4-node cluster settles on a 3-node configuration; we then prevent the nodes in the configuration from winning an election

View File

@ -60,7 +60,7 @@ import java.util.concurrent.TimeoutException;
import static org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest.Metric.DISCOVERY; import static org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest.Metric.DISCOVERY;
import static org.opensearch.test.NodeRoles.dataNode; import static org.opensearch.test.NodeRoles.dataNode;
import static org.opensearch.test.NodeRoles.masterOnlyNode; import static org.opensearch.test.NodeRoles.clusterManagerOnlyNode;
import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo;
@ -73,7 +73,7 @@ public class ZenDiscoveryIT extends OpenSearchIntegTestCase {
public void testNoShardRelocationsOccurWhenElectedMasterNodeFails() throws Exception { public void testNoShardRelocationsOccurWhenElectedMasterNodeFails() throws Exception {
Settings masterNodeSettings = masterOnlyNode(); Settings masterNodeSettings = clusterManagerOnlyNode();
internalCluster().startNodes(2, masterNodeSettings); internalCluster().startNodes(2, masterNodeSettings);
Settings dateNodeSettings = dataNode(); Settings dateNodeSettings = dataNode();
internalCluster().startNodes(2, dateNodeSettings); internalCluster().startNodes(2, dateNodeSettings);
@ -106,10 +106,10 @@ public class ZenDiscoveryIT extends OpenSearchIntegTestCase {
} }
public void testHandleNodeJoin_incompatibleClusterState() throws InterruptedException, ExecutionException, TimeoutException { public void testHandleNodeJoin_incompatibleClusterState() throws InterruptedException, ExecutionException, TimeoutException {
String masterNode = internalCluster().startMasterOnlyNode(); String clusterManagerNode = internalCluster().startClusterManagerOnlyNode();
String node1 = internalCluster().startNode(); String node1 = internalCluster().startNode();
ClusterService clusterService = internalCluster().getInstance(ClusterService.class, node1); ClusterService clusterService = internalCluster().getInstance(ClusterService.class, node1);
Coordinator coordinator = (Coordinator) internalCluster().getInstance(Discovery.class, masterNode); Coordinator coordinator = (Coordinator) internalCluster().getInstance(Discovery.class, clusterManagerNode);
final ClusterState state = clusterService.state(); final ClusterState state = clusterService.state();
Metadata.Builder mdBuilder = Metadata.builder(state.metadata()); Metadata.Builder mdBuilder = Metadata.builder(state.metadata());
mdBuilder.putCustom(CustomMetadata.TYPE, new CustomMetadata("data")); mdBuilder.putCustom(CustomMetadata.TYPE, new CustomMetadata("data"));

View File

@ -100,7 +100,7 @@ public class AllocationIdIT extends OpenSearchIntegTestCase {
// initial set up // initial set up
final String indexName = "index42"; final String indexName = "index42";
final String master = internalCluster().startMasterOnlyNode(); final String clusterManager = internalCluster().startClusterManagerOnlyNode();
String node1 = internalCluster().startNode(); String node1 = internalCluster().startNode();
createIndex( createIndex(
indexName, indexName,

View File

@ -110,7 +110,7 @@ public class PrimaryAllocationIT extends OpenSearchIntegTestCase {
} }
public void testBulkWeirdScenario() throws Exception { public void testBulkWeirdScenario() throws Exception {
String master = internalCluster().startMasterOnlyNode(Settings.EMPTY); String master = internalCluster().startClusterManagerOnlyNode(Settings.EMPTY);
internalCluster().startDataOnlyNodes(2); internalCluster().startDataOnlyNodes(2);
assertAcked( assertAcked(
@ -203,7 +203,7 @@ public class PrimaryAllocationIT extends OpenSearchIntegTestCase {
public void testDoNotAllowStaleReplicasToBePromotedToPrimary() throws Exception { public void testDoNotAllowStaleReplicasToBePromotedToPrimary() throws Exception {
logger.info("--> starting 3 nodes, 1 master, 2 data"); logger.info("--> starting 3 nodes, 1 master, 2 data");
String master = internalCluster().startMasterOnlyNode(Settings.EMPTY); String master = internalCluster().startClusterManagerOnlyNode(Settings.EMPTY);
internalCluster().startDataOnlyNodes(2); internalCluster().startDataOnlyNodes(2);
assertAcked( assertAcked(
client().admin() client().admin()
@ -292,7 +292,7 @@ public class PrimaryAllocationIT extends OpenSearchIntegTestCase {
public void testForceStaleReplicaToBePromotedToPrimary() throws Exception { public void testForceStaleReplicaToBePromotedToPrimary() throws Exception {
logger.info("--> starting 3 nodes, 1 master, 2 data"); logger.info("--> starting 3 nodes, 1 master, 2 data");
String master = internalCluster().startMasterOnlyNode(Settings.EMPTY); String clusterManager = internalCluster().startClusterManagerOnlyNode(Settings.EMPTY);
internalCluster().startDataOnlyNodes(2); internalCluster().startDataOnlyNodes(2);
assertAcked( assertAcked(
client().admin() client().admin()
@ -305,7 +305,7 @@ public class PrimaryAllocationIT extends OpenSearchIntegTestCase {
Set<String> historyUUIDs = Arrays.stream(client().admin().indices().prepareStats("test").clear().get().getShards()) Set<String> historyUUIDs = Arrays.stream(client().admin().indices().prepareStats("test").clear().get().getShards())
.map(shard -> shard.getCommitStats().getUserData().get(Engine.HISTORY_UUID_KEY)) .map(shard -> shard.getCommitStats().getUserData().get(Engine.HISTORY_UUID_KEY))
.collect(Collectors.toSet()); .collect(Collectors.toSet());
createStaleReplicaScenario(master); createStaleReplicaScenario(clusterManager);
if (randomBoolean()) { if (randomBoolean()) {
assertAcked(client().admin().indices().prepareClose("test").setWaitForActiveShards(0)); assertAcked(client().admin().indices().prepareClose("test").setWaitForActiveShards(0));
} }
@ -343,7 +343,7 @@ public class PrimaryAllocationIT extends OpenSearchIntegTestCase {
} }
logger.info("expected allocation ids: {} actual allocation ids: {}", expectedAllocationIds, allocationIds); logger.info("expected allocation ids: {} actual allocation ids: {}", expectedAllocationIds, allocationIds);
}; };
final ClusterService clusterService = internalCluster().getInstance(ClusterService.class, master); final ClusterService clusterService = internalCluster().getInstance(ClusterService.class, clusterManager);
clusterService.addListener(clusterStateListener); clusterService.addListener(clusterStateListener);
rerouteBuilder.get(); rerouteBuilder.get();
@ -390,7 +390,7 @@ public class PrimaryAllocationIT extends OpenSearchIntegTestCase {
} }
public void testForceStaleReplicaToBePromotedToPrimaryOnWrongNode() throws Exception { public void testForceStaleReplicaToBePromotedToPrimaryOnWrongNode() throws Exception {
String master = internalCluster().startMasterOnlyNode(Settings.EMPTY); String clusterManager = internalCluster().startClusterManagerOnlyNode(Settings.EMPTY);
internalCluster().startDataOnlyNodes(2); internalCluster().startDataOnlyNodes(2);
final String idxName = "test"; final String idxName = "test";
assertAcked( assertAcked(
@ -401,14 +401,14 @@ public class PrimaryAllocationIT extends OpenSearchIntegTestCase {
.get() .get()
); );
ensureGreen(); ensureGreen();
createStaleReplicaScenario(master); createStaleReplicaScenario(clusterManager);
// Ensure the stopped primary's data is deleted so that it doesn't get picked up by the next datanode we start // Ensure the stopped primary's data is deleted so that it doesn't get picked up by the next datanode we start
internalCluster().wipePendingDataDirectories(); internalCluster().wipePendingDataDirectories();
internalCluster().startDataOnlyNodes(1); internalCluster().startDataOnlyNodes(1);
ensureStableCluster(3, master); ensureStableCluster(3, clusterManager);
final int shardId = 0; final int shardId = 0;
final List<String> nodeNames = new ArrayList<>(Arrays.asList(internalCluster().getNodeNames())); final List<String> nodeNames = new ArrayList<>(Arrays.asList(internalCluster().getNodeNames()));
nodeNames.remove(master); nodeNames.remove(clusterManager);
client().admin() client().admin()
.indices() .indices()
.prepareShardStores(idxName) .prepareShardStores(idxName)
@ -434,7 +434,7 @@ public class PrimaryAllocationIT extends OpenSearchIntegTestCase {
} }
public void testForceStaleReplicaToBePromotedForGreenIndex() { public void testForceStaleReplicaToBePromotedForGreenIndex() {
internalCluster().startMasterOnlyNode(Settings.EMPTY); internalCluster().startClusterManagerOnlyNode(Settings.EMPTY);
final List<String> dataNodes = internalCluster().startDataOnlyNodes(2); final List<String> dataNodes = internalCluster().startDataOnlyNodes(2);
final String idxName = "test"; final String idxName = "test";
assertAcked( assertAcked(
@ -459,7 +459,7 @@ public class PrimaryAllocationIT extends OpenSearchIntegTestCase {
} }
public void testForceStaleReplicaToBePromotedForMissingIndex() { public void testForceStaleReplicaToBePromotedForMissingIndex() {
internalCluster().startMasterOnlyNode(Settings.EMPTY); internalCluster().startClusterManagerOnlyNode(Settings.EMPTY);
final String dataNode = internalCluster().startDataOnlyNode(); final String dataNode = internalCluster().startDataOnlyNode();
final String idxName = "test"; final String idxName = "test";
IndexNotFoundException ex = expectThrows( IndexNotFoundException ex = expectThrows(
@ -497,7 +497,7 @@ public class PrimaryAllocationIT extends OpenSearchIntegTestCase {
} }
public void testDoNotRemoveAllocationIdOnNodeLeave() throws Exception { public void testDoNotRemoveAllocationIdOnNodeLeave() throws Exception {
internalCluster().startMasterOnlyNode(Settings.EMPTY); internalCluster().startClusterManagerOnlyNode(Settings.EMPTY);
internalCluster().startDataOnlyNode(Settings.EMPTY); internalCluster().startDataOnlyNode(Settings.EMPTY);
assertAcked( assertAcked(
client().admin() client().admin()
@ -537,7 +537,7 @@ public class PrimaryAllocationIT extends OpenSearchIntegTestCase {
} }
public void testRemoveAllocationIdOnWriteAfterNodeLeave() throws Exception { public void testRemoveAllocationIdOnWriteAfterNodeLeave() throws Exception {
internalCluster().startMasterOnlyNode(Settings.EMPTY); internalCluster().startClusterManagerOnlyNode(Settings.EMPTY);
internalCluster().startDataOnlyNode(Settings.EMPTY); internalCluster().startDataOnlyNode(Settings.EMPTY);
assertAcked( assertAcked(
client().admin() client().admin()
@ -657,7 +657,7 @@ public class PrimaryAllocationIT extends OpenSearchIntegTestCase {
* This test asserts that replicas failed to execute resync operations will be failed but not marked as stale. * This test asserts that replicas failed to execute resync operations will be failed but not marked as stale.
*/ */
public void testPrimaryReplicaResyncFailed() throws Exception { public void testPrimaryReplicaResyncFailed() throws Exception {
String master = internalCluster().startMasterOnlyNode(Settings.EMPTY); String master = internalCluster().startClusterManagerOnlyNode(Settings.EMPTY);
final int numberOfReplicas = between(2, 3); final int numberOfReplicas = between(2, 3);
final String oldPrimary = internalCluster().startDataOnlyNode(); final String oldPrimary = internalCluster().startDataOnlyNode();
assertAcked( assertAcked(

View File

@ -154,7 +154,7 @@ public class DiskThresholdDeciderIT extends OpenSearchIntegTestCase {
} }
public void testHighWatermarkNotExceeded() throws Exception { public void testHighWatermarkNotExceeded() throws Exception {
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode(); internalCluster().startDataOnlyNode();
final String dataNodeName = internalCluster().startDataOnlyNode(); final String dataNodeName = internalCluster().startDataOnlyNode();
ensureStableCluster(3); ensureStableCluster(3);
@ -189,7 +189,7 @@ public class DiskThresholdDeciderIT extends OpenSearchIntegTestCase {
} }
public void testRestoreSnapshotAllocationDoesNotExceedWatermark() throws Exception { public void testRestoreSnapshotAllocationDoesNotExceedWatermark() throws Exception {
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode(); internalCluster().startDataOnlyNode();
final String dataNodeName = internalCluster().startDataOnlyNode(); final String dataNodeName = internalCluster().startDataOnlyNode();
ensureStableCluster(3); ensureStableCluster(3);

View File

@ -63,7 +63,7 @@ public class ClusterDisruptionCleanSettingsIT extends OpenSearchIntegTestCase {
public void testSearchWithRelocationAndSlowClusterStateProcessing() throws Exception { public void testSearchWithRelocationAndSlowClusterStateProcessing() throws Exception {
// Don't use AbstractDisruptionTestCase.DEFAULT_SETTINGS as settings // Don't use AbstractDisruptionTestCase.DEFAULT_SETTINGS as settings
// (which can cause node disconnects on a slow CI machine) // (which can cause node disconnects on a slow CI machine)
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
final String node_1 = internalCluster().startDataOnlyNode(); final String node_1 = internalCluster().startDataOnlyNode();
logger.info("--> creating index [test] with one shard and on replica"); logger.info("--> creating index [test] with one shard and on replica");

View File

@ -338,26 +338,26 @@ public class ClusterDisruptionIT extends AbstractDisruptionTestCase {
// simulate handling of sending shard failure during an isolation // simulate handling of sending shard failure during an isolation
public void testSendingShardFailure() throws Exception { public void testSendingShardFailure() throws Exception {
List<String> nodes = startCluster(3); List<String> nodes = startCluster(3);
String masterNode = internalCluster().getMasterName(); String clusterManagerNode = internalCluster().getMasterName();
List<String> nonMasterNodes = nodes.stream().filter(node -> !node.equals(masterNode)).collect(Collectors.toList()); List<String> nonClusterManagerNodes = nodes.stream().filter(node -> !node.equals(clusterManagerNode)).collect(Collectors.toList());
String nonMasterNode = randomFrom(nonMasterNodes); String nonClusterManagerNode = randomFrom(nonClusterManagerNodes);
assertAcked( assertAcked(
prepareCreate("test").setSettings( prepareCreate("test").setSettings(
Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 3).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2) Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 3).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2)
) )
); );
ensureGreen(); ensureGreen();
String nonMasterNodeId = internalCluster().clusterService(nonMasterNode).localNode().getId(); String nonClusterManagerNodeId = internalCluster().clusterService(nonClusterManagerNode).localNode().getId();
// fail a random shard // fail a random shard
ShardRouting failedShard = randomFrom( ShardRouting failedShard = randomFrom(
clusterService().state().getRoutingNodes().node(nonMasterNodeId).shardsWithState(ShardRoutingState.STARTED) clusterService().state().getRoutingNodes().node(nonClusterManagerNodeId).shardsWithState(ShardRoutingState.STARTED)
); );
ShardStateAction service = internalCluster().getInstance(ShardStateAction.class, nonMasterNode); ShardStateAction service = internalCluster().getInstance(ShardStateAction.class, nonClusterManagerNode);
CountDownLatch latch = new CountDownLatch(1); CountDownLatch latch = new CountDownLatch(1);
AtomicBoolean success = new AtomicBoolean(); AtomicBoolean success = new AtomicBoolean();
String isolatedNode = randomBoolean() ? masterNode : nonMasterNode; String isolatedNode = randomBoolean() ? clusterManagerNode : nonClusterManagerNode;
TwoPartitions partitions = isolateNode(isolatedNode); TwoPartitions partitions = isolateNode(isolatedNode);
// we cannot use the NetworkUnresponsive disruption type here as it will swallow the "shard failed" request, calling neither // we cannot use the NetworkUnresponsive disruption type here as it will swallow the "shard failed" request, calling neither
// onSuccess nor onFailure on the provided listener. // onSuccess nor onFailure on the provided listener.
@ -385,10 +385,10 @@ public class ClusterDisruptionIT extends AbstractDisruptionTestCase {
} }
); );
if (isolatedNode.equals(nonMasterNode)) { if (isolatedNode.equals(nonClusterManagerNode)) {
assertNoMaster(nonMasterNode); assertNoMaster(nonClusterManagerNode);
} else { } else {
ensureStableCluster(2, nonMasterNode); ensureStableCluster(2, nonClusterManagerNode);
} }
// heal the partition // heal the partition
@ -410,10 +410,10 @@ public class ClusterDisruptionIT extends AbstractDisruptionTestCase {
} }
public void testCannotJoinIfMasterLostDataFolder() throws Exception { public void testCannotJoinIfMasterLostDataFolder() throws Exception {
String masterNode = internalCluster().startMasterOnlyNode(); String clusterManagerNode = internalCluster().startClusterManagerOnlyNode();
String dataNode = internalCluster().startDataOnlyNode(); String dataNode = internalCluster().startDataOnlyNode();
internalCluster().restartNode(masterNode, new InternalTestCluster.RestartCallback() { internalCluster().restartNode(clusterManagerNode, new InternalTestCluster.RestartCallback() {
@Override @Override
public boolean clearData(String nodeName) { public boolean clearData(String nodeName) {
return true; return true;
@ -442,9 +442,9 @@ public class ClusterDisruptionIT extends AbstractDisruptionTestCase {
}); });
assertBusy(() -> { assertBusy(() -> {
assertFalse(internalCluster().client(masterNode).admin().cluster().prepareHealth().get().isTimedOut()); assertFalse(internalCluster().client(clusterManagerNode).admin().cluster().prepareHealth().get().isTimedOut());
assertTrue( assertTrue(
internalCluster().client(masterNode) internalCluster().client(clusterManagerNode)
.admin() .admin()
.cluster() .cluster()
.prepareHealth() .prepareHealth()

View File

@ -64,29 +64,29 @@ public class DiscoveryDisruptionIT extends AbstractDisruptionTestCase {
* Test cluster join with issues in cluster state publishing * * Test cluster join with issues in cluster state publishing *
*/ */
public void testClusterJoinDespiteOfPublishingIssues() throws Exception { public void testClusterJoinDespiteOfPublishingIssues() throws Exception {
String masterNode = internalCluster().startMasterOnlyNode(); String clusterManagerNode = internalCluster().startClusterManagerOnlyNode();
String nonMasterNode = internalCluster().startDataOnlyNode(); String nonClusterManagerNode = internalCluster().startDataOnlyNode();
DiscoveryNodes discoveryNodes = internalCluster().getInstance(ClusterService.class, nonMasterNode).state().nodes(); DiscoveryNodes discoveryNodes = internalCluster().getInstance(ClusterService.class, nonClusterManagerNode).state().nodes();
TransportService masterTranspotService = internalCluster().getInstance( TransportService masterTranspotService = internalCluster().getInstance(
TransportService.class, TransportService.class,
discoveryNodes.getMasterNode().getName() discoveryNodes.getMasterNode().getName()
); );
logger.info("blocking requests from non master [{}] to master [{}]", nonMasterNode, masterNode); logger.info("blocking requests from non master [{}] to master [{}]", nonClusterManagerNode, clusterManagerNode);
MockTransportService nonMasterTransportService = (MockTransportService) internalCluster().getInstance( MockTransportService nonMasterTransportService = (MockTransportService) internalCluster().getInstance(
TransportService.class, TransportService.class,
nonMasterNode nonClusterManagerNode
); );
nonMasterTransportService.addFailToSendNoConnectRule(masterTranspotService); nonMasterTransportService.addFailToSendNoConnectRule(masterTranspotService);
assertNoMaster(nonMasterNode); assertNoMaster(nonClusterManagerNode);
logger.info("blocking cluster state publishing from master [{}] to non master [{}]", masterNode, nonMasterNode); logger.info("blocking cluster state publishing from master [{}] to non master [{}]", clusterManagerNode, nonClusterManagerNode);
MockTransportService masterTransportService = (MockTransportService) internalCluster().getInstance( MockTransportService masterTransportService = (MockTransportService) internalCluster().getInstance(
TransportService.class, TransportService.class,
masterNode clusterManagerNode
); );
TransportService localTransportService = internalCluster().getInstance( TransportService localTransportService = internalCluster().getInstance(
TransportService.class, TransportService.class,
@ -98,7 +98,11 @@ public class DiscoveryDisruptionIT extends AbstractDisruptionTestCase {
masterTransportService.addFailToSendNoConnectRule(localTransportService, PublicationTransportHandler.COMMIT_STATE_ACTION_NAME); masterTransportService.addFailToSendNoConnectRule(localTransportService, PublicationTransportHandler.COMMIT_STATE_ACTION_NAME);
} }
logger.info("allowing requests from non master [{}] to master [{}], waiting for two join request", nonMasterNode, masterNode); logger.info(
"allowing requests from non master [{}] to master [{}], waiting for two join request",
nonClusterManagerNode,
clusterManagerNode
);
final CountDownLatch countDownLatch = new CountDownLatch(2); final CountDownLatch countDownLatch = new CountDownLatch(2);
nonMasterTransportService.addSendBehavior(masterTransportService, (connection, requestId, action, request, options) -> { nonMasterTransportService.addSendBehavior(masterTransportService, (connection, requestId, action, request, options) -> {
if (action.equals(JoinHelper.JOIN_ACTION_NAME)) { if (action.equals(JoinHelper.JOIN_ACTION_NAME)) {
@ -197,31 +201,31 @@ public class DiscoveryDisruptionIT extends AbstractDisruptionTestCase {
public void testNodeNotReachableFromMaster() throws Exception { public void testNodeNotReachableFromMaster() throws Exception {
startCluster(3); startCluster(3);
String masterNode = internalCluster().getMasterName(); String clusterManagerNode = internalCluster().getMasterName();
String nonMasterNode = null; String nonClusterManagerNode = null;
while (nonMasterNode == null) { while (nonClusterManagerNode == null) {
nonMasterNode = randomFrom(internalCluster().getNodeNames()); nonClusterManagerNode = randomFrom(internalCluster().getNodeNames());
if (nonMasterNode.equals(masterNode)) { if (nonClusterManagerNode.equals(clusterManagerNode)) {
nonMasterNode = null; nonClusterManagerNode = null;
} }
} }
logger.info("blocking request from master [{}] to [{}]", masterNode, nonMasterNode); logger.info("blocking request from master [{}] to [{}]", clusterManagerNode, nonClusterManagerNode);
MockTransportService masterTransportService = (MockTransportService) internalCluster().getInstance( MockTransportService masterTransportService = (MockTransportService) internalCluster().getInstance(
TransportService.class, TransportService.class,
masterNode clusterManagerNode
); );
if (randomBoolean()) { if (randomBoolean()) {
masterTransportService.addUnresponsiveRule(internalCluster().getInstance(TransportService.class, nonMasterNode)); masterTransportService.addUnresponsiveRule(internalCluster().getInstance(TransportService.class, nonClusterManagerNode));
} else { } else {
masterTransportService.addFailToSendNoConnectRule(internalCluster().getInstance(TransportService.class, nonMasterNode)); masterTransportService.addFailToSendNoConnectRule(internalCluster().getInstance(TransportService.class, nonClusterManagerNode));
} }
logger.info("waiting for [{}] to be removed from cluster", nonMasterNode); logger.info("waiting for [{}] to be removed from cluster", nonClusterManagerNode);
ensureStableCluster(2, masterNode); ensureStableCluster(2, clusterManagerNode);
logger.info("waiting for [{}] to have no cluster-manager", nonMasterNode); logger.info("waiting for [{}] to have no cluster-manager", nonClusterManagerNode);
assertNoMaster(nonMasterNode); assertNoMaster(nonClusterManagerNode);
logger.info("healing partition and checking cluster reforms"); logger.info("healing partition and checking cluster reforms");
masterTransportService.clearAllRules(); masterTransportService.clearAllRules();

View File

@ -71,33 +71,40 @@ public class MasterDisruptionIT extends AbstractDisruptionTestCase {
public void testMasterNodeGCs() throws Exception { public void testMasterNodeGCs() throws Exception {
List<String> nodes = startCluster(3); List<String> nodes = startCluster(3);
String oldMasterNode = internalCluster().getMasterName(); String oldClusterManagerNode = internalCluster().getMasterName();
// a very long GC, but it's OK as we remove the disruption when it has had an effect // a very long GC, but it's OK as we remove the disruption when it has had an effect
SingleNodeDisruption masterNodeDisruption = new IntermittentLongGCDisruption(random(), oldMasterNode, 100, 200, 30000, 60000); SingleNodeDisruption masterNodeDisruption = new IntermittentLongGCDisruption(
random(),
oldClusterManagerNode,
100,
200,
30000,
60000
);
internalCluster().setDisruptionScheme(masterNodeDisruption); internalCluster().setDisruptionScheme(masterNodeDisruption);
masterNodeDisruption.startDisrupting(); masterNodeDisruption.startDisrupting();
Set<String> oldNonMasterNodesSet = new HashSet<>(nodes); Set<String> oldNonClusterManagerNodesSet = new HashSet<>(nodes);
oldNonMasterNodesSet.remove(oldMasterNode); oldNonClusterManagerNodesSet.remove(oldClusterManagerNode);
List<String> oldNonMasterNodes = new ArrayList<>(oldNonMasterNodesSet); List<String> oldNonClusterManagerNodes = new ArrayList<>(oldNonClusterManagerNodesSet);
logger.info("waiting for nodes to de-elect master [{}]", oldMasterNode); logger.info("waiting for nodes to de-elect master [{}]", oldClusterManagerNode);
for (String node : oldNonMasterNodesSet) { for (String node : oldNonClusterManagerNodesSet) {
assertDifferentMaster(node, oldMasterNode); assertDifferentMaster(node, oldClusterManagerNode);
} }
logger.info("waiting for nodes to elect a new master"); logger.info("waiting for nodes to elect a new master");
ensureStableCluster(2, oldNonMasterNodes.get(0)); ensureStableCluster(2, oldNonClusterManagerNodes.get(0));
// restore GC // restore GC
masterNodeDisruption.stopDisrupting(); masterNodeDisruption.stopDisrupting();
final TimeValue waitTime = new TimeValue(DISRUPTION_HEALING_OVERHEAD.millis() + masterNodeDisruption.expectedTimeToHeal().millis()); final TimeValue waitTime = new TimeValue(DISRUPTION_HEALING_OVERHEAD.millis() + masterNodeDisruption.expectedTimeToHeal().millis());
ensureStableCluster(3, waitTime, false, oldNonMasterNodes.get(0)); ensureStableCluster(3, waitTime, false, oldNonClusterManagerNodes.get(0));
// make sure all nodes agree on master // make sure all nodes agree on master
String newMaster = internalCluster().getMasterName(); String newMaster = internalCluster().getMasterName();
assertThat(newMaster, not(equalTo(oldMasterNode))); assertThat(newMaster, not(equalTo(oldClusterManagerNode)));
assertMaster(newMaster, nodes); assertMaster(newMaster, nodes);
} }

View File

@ -73,8 +73,8 @@ import static java.util.Collections.singleton;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
/** /**
* Tests relating to the loss of the master, but which work with the default fault detection settings which are rather lenient and will * Tests relating to the loss of the cluster-manager, but which work with the default fault detection settings which are rather lenient and will
* not detect a master failure too quickly. * not detect a cluster-manager failure too quickly.
*/ */
@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0)
public class StableMasterDisruptionIT extends OpenSearchIntegTestCase { public class StableMasterDisruptionIT extends OpenSearchIntegTestCase {
@ -228,9 +228,9 @@ public class StableMasterDisruptionIT extends OpenSearchIntegTestCase {
event.state(), event.state(),
event.previousState() event.previousState()
); );
String previousMasterNodeName = previousMaster != null ? previousMaster.getName() : null; String previousClusterManagerNodeName = previousMaster != null ? previousMaster.getName() : null;
String currentMasterNodeName = currentMaster != null ? currentMaster.getName() : null; String currentMasterNodeName = currentMaster != null ? currentMaster.getName() : null;
masters.get(node).add(new Tuple<>(previousMasterNodeName, currentMasterNodeName)); masters.get(node).add(new Tuple<>(previousClusterManagerNodeName, currentMasterNodeName));
} }
}); });
} }

View File

@ -56,7 +56,7 @@ public class NodeRepurposeCommandIT extends OpenSearchIntegTestCase {
final String indexName = "test-repurpose"; final String indexName = "test-repurpose";
logger.info("--> starting two nodes"); logger.info("--> starting two nodes");
final String masterNode = internalCluster().startMasterOnlyNode(); final String clusterManagerNode = internalCluster().startClusterManagerOnlyNode();
final String dataNode = internalCluster().startDataOnlyNode( final String dataNode = internalCluster().startDataOnlyNode(
Settings.builder().put(IndicesService.WRITE_DANGLING_INDICES_INFO_SETTING.getKey(), false).build() Settings.builder().put(IndicesService.WRITE_DANGLING_INDICES_INFO_SETTING.getKey(), false).build()
); );
@ -71,20 +71,20 @@ public class NodeRepurposeCommandIT extends OpenSearchIntegTestCase {
assertTrue(client().prepareGet(indexName, "1").get().isExists()); assertTrue(client().prepareGet(indexName, "1").get().isExists());
final Settings masterNodeDataPathSettings = internalCluster().dataPathSettings(masterNode); final Settings clusterManagerNodeDataPathSettings = internalCluster().dataPathSettings(clusterManagerNode);
final Settings dataNodeDataPathSettings = internalCluster().dataPathSettings(dataNode); final Settings dataNodeDataPathSettings = internalCluster().dataPathSettings(dataNode);
final Settings noMasterNoDataSettings = NodeRoles.removeRoles( final Settings noClusterManagerNoDataSettings = NodeRoles.removeRoles(
Collections.unmodifiableSet(new HashSet<>(Arrays.asList(DiscoveryNodeRole.DATA_ROLE, DiscoveryNodeRole.MASTER_ROLE))) Collections.unmodifiableSet(new HashSet<>(Arrays.asList(DiscoveryNodeRole.DATA_ROLE, DiscoveryNodeRole.MASTER_ROLE)))
); );
final Settings noMasterNoDataSettingsForMasterNode = Settings.builder() final Settings noClusterManagerNoDataSettingsForClusterManagerNode = Settings.builder()
.put(noMasterNoDataSettings) .put(noClusterManagerNoDataSettings)
.put(masterNodeDataPathSettings) .put(clusterManagerNodeDataPathSettings)
.build(); .build();
final Settings noMasterNoDataSettingsForDataNode = Settings.builder() final Settings noClusterManagerNoDataSettingsForDataNode = Settings.builder()
.put(noMasterNoDataSettings) .put(noClusterManagerNoDataSettings)
.put(dataNodeDataPathSettings) .put(dataNodeDataPathSettings)
.build(); .build();
@ -99,11 +99,11 @@ public class NodeRepurposeCommandIT extends OpenSearchIntegTestCase {
); );
logger.info("--> Repurposing node 1"); logger.info("--> Repurposing node 1");
executeRepurposeCommand(noMasterNoDataSettingsForDataNode, 1, 1); executeRepurposeCommand(noClusterManagerNoDataSettingsForDataNode, 1, 1);
OpenSearchException lockedException = expectThrows( OpenSearchException lockedException = expectThrows(
OpenSearchException.class, OpenSearchException.class,
() -> executeRepurposeCommand(noMasterNoDataSettingsForMasterNode, 1, 1) () -> executeRepurposeCommand(noClusterManagerNoDataSettingsForClusterManagerNode, 1, 1)
); );
assertThat(lockedException.getMessage(), containsString(NodeRepurposeCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG)); assertThat(lockedException.getMessage(), containsString(NodeRepurposeCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG));
@ -119,11 +119,11 @@ public class NodeRepurposeCommandIT extends OpenSearchIntegTestCase {
internalCluster().stopRandomNode(s -> true); internalCluster().stopRandomNode(s -> true);
internalCluster().stopRandomNode(s -> true); internalCluster().stopRandomNode(s -> true);
executeRepurposeCommand(noMasterNoDataSettingsForMasterNode, 1, 0); executeRepurposeCommand(noClusterManagerNoDataSettingsForClusterManagerNode, 1, 0);
// by restarting as master and data node, we can check that the index definition was really deleted and also that the tool // by restarting as master and data node, we can check that the index definition was really deleted and also that the tool
// does not mess things up so much that the nodes cannot boot as master or data node any longer. // does not mess things up so much that the nodes cannot boot as master or data node any longer.
internalCluster().startMasterOnlyNode(masterNodeDataPathSettings); internalCluster().startClusterManagerOnlyNode(clusterManagerNodeDataPathSettings);
internalCluster().startDataOnlyNode(dataNodeDataPathSettings); internalCluster().startDataOnlyNode(dataNodeDataPathSettings);
ensureGreen(); ensureGreen();

View File

@ -277,7 +277,7 @@ public class GatewayIndexStateIT extends OpenSearchIntegTestCase {
logger.info("--> cleaning nodes"); logger.info("--> cleaning nodes");
logger.info("--> starting 1 master node non data"); logger.info("--> starting 1 master node non data");
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode(); internalCluster().startDataOnlyNode();
logger.info("--> create an index"); logger.info("--> create an index");

View File

@ -60,7 +60,7 @@ import static org.hamcrest.Matchers.equalTo;
public class MetadataNodesIT extends OpenSearchIntegTestCase { public class MetadataNodesIT extends OpenSearchIntegTestCase {
public void testMetaWrittenAlsoOnDataNode() throws Exception { public void testMetaWrittenAlsoOnDataNode() throws Exception {
// this test checks that index state is written on data only nodes if they have a shard allocated // this test checks that index state is written on data only nodes if they have a shard allocated
String masterNode = internalCluster().startMasterOnlyNode(Settings.EMPTY); String masterNode = internalCluster().startClusterManagerOnlyNode(Settings.EMPTY);
String dataNode = internalCluster().startDataOnlyNode(Settings.EMPTY); String dataNode = internalCluster().startDataOnlyNode(Settings.EMPTY);
assertAcked(prepareCreate("test").setSettings(Settings.builder().put("index.number_of_replicas", 0))); assertAcked(prepareCreate("test").setSettings(Settings.builder().put("index.number_of_replicas", 0)));
index("test", "_doc", "1", jsonBuilder().startObject().field("text", "some text").endObject()); index("test", "_doc", "1", jsonBuilder().startObject().field("text", "some text").endObject());
@ -71,7 +71,7 @@ public class MetadataNodesIT extends OpenSearchIntegTestCase {
public void testIndexFilesAreRemovedIfAllShardsFromIndexRemoved() throws Exception { public void testIndexFilesAreRemovedIfAllShardsFromIndexRemoved() throws Exception {
// this test checks that the index data is removed from a data only node once all shards have been allocated away from it // this test checks that the index data is removed from a data only node once all shards have been allocated away from it
String masterNode = internalCluster().startMasterOnlyNode(Settings.EMPTY); String masterNode = internalCluster().startClusterManagerOnlyNode(Settings.EMPTY);
List<String> nodeNames = internalCluster().startDataOnlyNodes(2); List<String> nodeNames = internalCluster().startDataOnlyNodes(2);
String node1 = nodeNames.get(0); String node1 = nodeNames.get(0);
String node2 = nodeNames.get(1); String node2 = nodeNames.get(1);
@ -114,7 +114,7 @@ public class MetadataNodesIT extends OpenSearchIntegTestCase {
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
public void testMetaWrittenWhenIndexIsClosedAndMetaUpdated() throws Exception { public void testMetaWrittenWhenIndexIsClosedAndMetaUpdated() throws Exception {
String masterNode = internalCluster().startMasterOnlyNode(Settings.EMPTY); String masterNode = internalCluster().startClusterManagerOnlyNode(Settings.EMPTY);
final String dataNode = internalCluster().startDataOnlyNode(Settings.EMPTY); final String dataNode = internalCluster().startDataOnlyNode(Settings.EMPTY);
final String index = "index"; final String index = "index";

View File

@ -44,7 +44,7 @@ import org.opensearch.test.OpenSearchIntegTestCase.Scope;
import java.util.Set; import java.util.Set;
import static org.opensearch.test.NodeRoles.dataOnlyNode; import static org.opensearch.test.NodeRoles.dataOnlyNode;
import static org.opensearch.test.NodeRoles.masterOnlyNode; import static org.opensearch.test.NodeRoles.clusterManagerOnlyNode;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasItem;
@ -75,7 +75,7 @@ public class RecoverAfterNodesIT extends OpenSearchIntegTestCase {
} }
public void testRecoverAfterNodes() throws Exception { public void testRecoverAfterNodes() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(0); internalCluster().setBootstrapClusterManagerNodeIndex(0);
logger.info("--> start node (1)"); logger.info("--> start node (1)");
Client clientNode1 = startNode(Settings.builder().put("gateway.recover_after_nodes", 3)); Client clientNode1 = startNode(Settings.builder().put("gateway.recover_after_nodes", 3));
assertThat( assertThat(
@ -128,9 +128,9 @@ public class RecoverAfterNodesIT extends OpenSearchIntegTestCase {
} }
public void testRecoverAfterMasterNodes() throws Exception { public void testRecoverAfterMasterNodes() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(0); internalCluster().setBootstrapClusterManagerNodeIndex(0);
logger.info("--> start master_node (1)"); logger.info("--> start master_node (1)");
Client master1 = startNode(Settings.builder().put("gateway.recover_after_master_nodes", 2).put(masterOnlyNode())); Client master1 = startNode(Settings.builder().put("gateway.recover_after_master_nodes", 2).put(clusterManagerOnlyNode()));
assertThat( assertThat(
master1.admin() master1.admin()
.cluster() .cluster()
@ -211,7 +211,7 @@ public class RecoverAfterNodesIT extends OpenSearchIntegTestCase {
); );
logger.info("--> start master_node (2)"); logger.info("--> start master_node (2)");
Client master2 = startNode(Settings.builder().put("gateway.recover_after_master_nodes", 2).put(masterOnlyNode())); Client master2 = startNode(Settings.builder().put("gateway.recover_after_master_nodes", 2).put(clusterManagerOnlyNode()));
assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, master1).isEmpty(), equalTo(true)); assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, master1).isEmpty(), equalTo(true));
assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, master2).isEmpty(), equalTo(true)); assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, master2).isEmpty(), equalTo(true));
assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, data1).isEmpty(), equalTo(true)); assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, data1).isEmpty(), equalTo(true));
@ -219,9 +219,9 @@ public class RecoverAfterNodesIT extends OpenSearchIntegTestCase {
} }
public void testRecoverAfterDataNodes() throws Exception { public void testRecoverAfterDataNodes() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(0); internalCluster().setBootstrapClusterManagerNodeIndex(0);
logger.info("--> start master_node (1)"); logger.info("--> start master_node (1)");
Client master1 = startNode(Settings.builder().put("gateway.recover_after_data_nodes", 2).put(masterOnlyNode())); Client master1 = startNode(Settings.builder().put("gateway.recover_after_data_nodes", 2).put(clusterManagerOnlyNode()));
assertThat( assertThat(
master1.admin() master1.admin()
.cluster() .cluster()
@ -263,7 +263,7 @@ public class RecoverAfterNodesIT extends OpenSearchIntegTestCase {
); );
logger.info("--> start master_node (2)"); logger.info("--> start master_node (2)");
Client master2 = startNode(Settings.builder().put("gateway.recover_after_data_nodes", 2).put(masterOnlyNode())); Client master2 = startNode(Settings.builder().put("gateway.recover_after_data_nodes", 2).put(clusterManagerOnlyNode()));
assertThat( assertThat(
master2.admin() master2.admin()
.cluster() .cluster()

View File

@ -512,7 +512,7 @@ public class RecoveryFromGatewayIT extends OpenSearchIntegTestCase {
} }
public void testReuseInFileBasedPeerRecovery() throws Exception { public void testReuseInFileBasedPeerRecovery() throws Exception {
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
final String primaryNode = internalCluster().startDataOnlyNode(nodeSettings(0)); final String primaryNode = internalCluster().startDataOnlyNode(nodeSettings(0));
// create the index with our mapping // create the index with our mapping

View File

@ -327,7 +327,7 @@ public class ReplicaShardAllocatorIT extends OpenSearchIntegTestCase {
public void testPreferCopyWithHighestMatchingOperations() throws Exception { public void testPreferCopyWithHighestMatchingOperations() throws Exception {
String indexName = "test"; String indexName = "test";
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNodes(3); internalCluster().startDataOnlyNodes(3);
assertAcked( assertAcked(
client().admin() client().admin()
@ -402,7 +402,7 @@ public class ReplicaShardAllocatorIT extends OpenSearchIntegTestCase {
* Make sure that we do not repeatedly cancel an ongoing recovery for a noop copy on a broken node. * Make sure that we do not repeatedly cancel an ongoing recovery for a noop copy on a broken node.
*/ */
public void testDoNotCancelRecoveryForBrokenNode() throws Exception { public void testDoNotCancelRecoveryForBrokenNode() throws Exception {
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
String nodeWithPrimary = internalCluster().startDataOnlyNode(); String nodeWithPrimary = internalCluster().startDataOnlyNode();
String indexName = "test"; String indexName = "test";
assertAcked( assertAcked(
@ -518,7 +518,7 @@ public class ReplicaShardAllocatorIT extends OpenSearchIntegTestCase {
* this behavior by changing the global checkpoint in phase1 to unassigned. * this behavior by changing the global checkpoint in phase1 to unassigned.
*/ */
public void testSimulateRecoverySourceOnOldNode() throws Exception { public void testSimulateRecoverySourceOnOldNode() throws Exception {
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
String source = internalCluster().startDataOnlyNode(); String source = internalCluster().startDataOnlyNode();
String indexName = "test"; String indexName = "test";
assertAcked( assertAcked(

View File

@ -63,7 +63,7 @@ public class PeerRecoveryRetentionLeaseCreationIT extends OpenSearchIntegTestCas
* primary that is recovering from store creates a lease for itself. * primary that is recovering from store creates a lease for itself.
*/ */
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
final String dataNode = internalCluster().startDataOnlyNode(); final String dataNode = internalCluster().startDataOnlyNode();
final Path[] nodeDataPaths = internalCluster().getInstance(NodeEnvironment.class, dataNode).nodeDataPaths(); final Path[] nodeDataPaths = internalCluster().getInstance(NodeEnvironment.class, dataNode).nodeDataPaths();

View File

@ -497,7 +497,7 @@ public class RemoveCorruptedShardDataCommandIT extends OpenSearchIntegTestCase {
} }
public void testCorruptTranslogTruncationOfReplica() throws Exception { public void testCorruptTranslogTruncationOfReplica() throws Exception {
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
final String node1 = internalCluster().startDataOnlyNode(); final String node1 = internalCluster().startDataOnlyNode();
final String node2 = internalCluster().startDataOnlyNode(); final String node2 = internalCluster().startDataOnlyNode();

View File

@ -42,7 +42,7 @@ public class DedicatedMasterGetFieldMappingIT extends SimpleGetFieldMappingsIT {
@Before @Before
public void before1() throws Exception { public void before1() throws Exception {
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
internalCluster().startNode(); internalCluster().startNode();
} }

View File

@ -1212,7 +1212,7 @@ public class IndexRecoveryIT extends OpenSearchIntegTestCase {
.build(); .build();
TimeValue disconnectAfterDelay = TimeValue.timeValueMillis(randomIntBetween(0, 100)); TimeValue disconnectAfterDelay = TimeValue.timeValueMillis(randomIntBetween(0, 100));
// start a master node // start a master node
String masterNodeName = internalCluster().startMasterOnlyNode(nodeSettings); String masterNodeName = internalCluster().startClusterManagerOnlyNode(nodeSettings);
final String blueNodeName = internalCluster().startNode( final String blueNodeName = internalCluster().startNode(
Settings.builder().put("node.attr.color", "blue").put(nodeSettings).build() Settings.builder().put("node.attr.color", "blue").put(nodeSettings).build()
@ -2085,7 +2085,7 @@ public class IndexRecoveryIT extends OpenSearchIntegTestCase {
} }
public void testAllocateEmptyPrimaryResetsGlobalCheckpoint() throws Exception { public void testAllocateEmptyPrimaryResetsGlobalCheckpoint() throws Exception {
internalCluster().startMasterOnlyNode(Settings.EMPTY); internalCluster().startClusterManagerOnlyNode(Settings.EMPTY);
final List<String> dataNodes = internalCluster().startDataOnlyNodes(2); final List<String> dataNodes = internalCluster().startDataOnlyNodes(2);
final Settings randomNodeDataPathSettings = internalCluster().dataPathSettings(randomFrom(dataNodes)); final Settings randomNodeDataPathSettings = internalCluster().dataPathSettings(randomFrom(dataNodes));
final String indexName = "test"; final String indexName = "test";
@ -2185,7 +2185,7 @@ public class IndexRecoveryIT extends OpenSearchIntegTestCase {
} }
public void testCancelRecoveryWithAutoExpandReplicas() throws Exception { public void testCancelRecoveryWithAutoExpandReplicas() throws Exception {
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
assertAcked( assertAcked(
client().admin() client().admin()
.indices() .indices()

View File

@ -345,7 +345,7 @@ public class IndicesStoreIntegrationIT extends OpenSearchIntegTestCase {
} }
public void testShardActiveElsewhereDoesNotDeleteAnother() throws Exception { public void testShardActiveElsewhereDoesNotDeleteAnother() throws Exception {
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
final List<String> nodes = internalCluster().startDataOnlyNodes(4); final List<String> nodes = internalCluster().startDataOnlyNodes(4);
final String node1 = nodes.get(0); final String node1 = nodes.get(0);
@ -459,11 +459,11 @@ public class IndicesStoreIntegrationIT extends OpenSearchIntegTestCase {
public void testShardActiveElseWhere() throws Exception { public void testShardActiveElseWhere() throws Exception {
List<String> nodes = internalCluster().startNodes(2); List<String> nodes = internalCluster().startNodes(2);
final String masterNode = internalCluster().getMasterName(); final String clusterManagerNode = internalCluster().getMasterName();
final String nonMasterNode = nodes.get(0).equals(masterNode) ? nodes.get(1) : nodes.get(0); final String nonClusterManagerNode = nodes.get(0).equals(clusterManagerNode) ? nodes.get(1) : nodes.get(0);
final String masterId = internalCluster().clusterService(masterNode).localNode().getId(); final String clusterManagerId = internalCluster().clusterService(clusterManagerNode).localNode().getId();
final String nonMasterId = internalCluster().clusterService(nonMasterNode).localNode().getId(); final String nonClusterManagerId = internalCluster().clusterService(nonClusterManagerNode).localNode().getId();
final int numShards = scaledRandomIntBetween(2, 10); final int numShards = scaledRandomIntBetween(2, 10);
assertAcked( assertAcked(
@ -476,14 +476,14 @@ public class IndicesStoreIntegrationIT extends OpenSearchIntegTestCase {
waitNoPendingTasksOnAll(); waitNoPendingTasksOnAll();
ClusterStateResponse stateResponse = client().admin().cluster().prepareState().get(); ClusterStateResponse stateResponse = client().admin().cluster().prepareState().get();
final Index index = stateResponse.getState().metadata().index("test").getIndex(); final Index index = stateResponse.getState().metadata().index("test").getIndex();
RoutingNode routingNode = stateResponse.getState().getRoutingNodes().node(nonMasterId); RoutingNode routingNode = stateResponse.getState().getRoutingNodes().node(nonClusterManagerId);
final int[] node2Shards = new int[routingNode.numberOfOwningShards()]; final int[] node2Shards = new int[routingNode.numberOfOwningShards()];
int i = 0; int i = 0;
for (ShardRouting shardRouting : routingNode) { for (ShardRouting shardRouting : routingNode) {
node2Shards[i] = shardRouting.shardId().id(); node2Shards[i] = shardRouting.shardId().id();
i++; i++;
} }
logger.info("Node [{}] has shards: {}", nonMasterNode, Arrays.toString(node2Shards)); logger.info("Node [{}] has shards: {}", nonClusterManagerNode, Arrays.toString(node2Shards));
// disable relocations when we do this, to make sure the shards are not relocated from node2 // disable relocations when we do this, to make sure the shards are not relocated from node2
// due to rebalancing, and delete its content // due to rebalancing, and delete its content
@ -496,14 +496,14 @@ public class IndicesStoreIntegrationIT extends OpenSearchIntegTestCase {
) )
.get(); .get();
ClusterApplierService clusterApplierService = internalCluster().getInstance(ClusterService.class, nonMasterNode) ClusterApplierService clusterApplierService = internalCluster().getInstance(ClusterService.class, nonClusterManagerNode)
.getClusterApplierService(); .getClusterApplierService();
ClusterState currentState = clusterApplierService.state(); ClusterState currentState = clusterApplierService.state();
IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index); IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index);
for (int j = 0; j < numShards; j++) { for (int j = 0; j < numShards; j++) {
indexRoutingTableBuilder.addIndexShard( indexRoutingTableBuilder.addIndexShard(
new IndexShardRoutingTable.Builder(new ShardId(index, j)).addShard( new IndexShardRoutingTable.Builder(new ShardId(index, j)).addShard(
TestShardRouting.newShardRouting("test", j, masterId, true, ShardRoutingState.STARTED) TestShardRouting.newShardRouting("test", j, clusterManagerId, true, ShardRoutingState.STARTED)
).build() ).build()
); );
} }
@ -528,7 +528,7 @@ public class IndicesStoreIntegrationIT extends OpenSearchIntegTestCase {
waitNoPendingTasksOnAll(); waitNoPendingTasksOnAll();
logger.info("Checking if shards aren't removed"); logger.info("Checking if shards aren't removed");
for (int shard : node2Shards) { for (int shard : node2Shards) {
assertShardExists(nonMasterNode, index, shard); assertShardExists(nonClusterManagerNode, index, shard);
} }
} }

View File

@ -302,8 +302,8 @@ public class IngestClientIT extends OpenSearchIntegTestCase {
assertFalse(response.isFound()); assertFalse(response.isFound());
} }
public void testWithDedicatedMaster() throws Exception { public void testWithDedicatedClusterManager() throws Exception {
String masterOnlyNode = internalCluster().startMasterOnlyNode(); String clusterManagerOnlyNode = internalCluster().startClusterManagerOnlyNode();
BytesReference source = BytesReference.bytes( BytesReference source = BytesReference.bytes(
jsonBuilder().startObject() jsonBuilder().startObject()
.field("description", "my_pipeline") .field("description", "my_pipeline")
@ -318,7 +318,7 @@ public class IngestClientIT extends OpenSearchIntegTestCase {
PutPipelineRequest putPipelineRequest = new PutPipelineRequest("_id", source, XContentType.JSON); PutPipelineRequest putPipelineRequest = new PutPipelineRequest("_id", source, XContentType.JSON);
client().admin().cluster().putPipeline(putPipelineRequest).get(); client().admin().cluster().putPipeline(putPipelineRequest).get();
BulkItemResponse item = client(masterOnlyNode).prepareBulk() BulkItemResponse item = client(clusterManagerOnlyNode).prepareBulk()
.add(client().prepareIndex("test").setSource("field", "value2", "drop", true).setPipeline("_id")) .add(client().prepareIndex("test").setSource("field", "value2", "drop", true).setPipeline("_id"))
.get() .get()
.getItems()[0]; .getItems()[0];

View File

@ -196,7 +196,7 @@ public class FullRollingRestartIT extends OpenSearchIntegTestCase {
public void testNoRebalanceOnRollingRestart() throws Exception { public void testNoRebalanceOnRollingRestart() throws Exception {
// see https://github.com/elastic/elasticsearch/issues/14387 // see https://github.com/elastic/elasticsearch/issues/14387
internalCluster().startMasterOnlyNode(Settings.EMPTY); internalCluster().startClusterManagerOnlyNode(Settings.EMPTY);
internalCluster().startDataOnlyNodes(3); internalCluster().startDataOnlyNodes(3);
/** /**
* We start 3 nodes and a dedicated master. Restart on of the data-nodes and ensure that we got no relocations. * We start 3 nodes and a dedicated master. Restart on of the data-nodes and ensure that we got no relocations.

View File

@ -59,7 +59,7 @@ import static org.hamcrest.Matchers.is;
public class AbortedRestoreIT extends AbstractSnapshotIntegTestCase { public class AbortedRestoreIT extends AbstractSnapshotIntegTestCase {
public void testAbortedRestoreAlsoAbortFileRestores() throws Exception { public void testAbortedRestoreAlsoAbortFileRestores() throws Exception {
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
final String dataNode = internalCluster().startDataOnlyNode(); final String dataNode = internalCluster().startDataOnlyNode();
final String indexName = "test-abort-restore"; final String indexName = "test-abort-restore";

View File

@ -59,7 +59,7 @@ import static org.hamcrest.Matchers.is;
public class BlobStoreIncrementalityIT extends AbstractSnapshotIntegTestCase { public class BlobStoreIncrementalityIT extends AbstractSnapshotIntegTestCase {
public void testIncrementalBehaviorOnPrimaryFailover() throws InterruptedException, ExecutionException, IOException { public void testIncrementalBehaviorOnPrimaryFailover() throws InterruptedException, ExecutionException, IOException {
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
final String primaryNode = internalCluster().startDataOnlyNode(); final String primaryNode = internalCluster().startDataOnlyNode();
final String indexName = "test-index"; final String indexName = "test-index";
createIndex( createIndex(
@ -148,7 +148,7 @@ public class BlobStoreIncrementalityIT extends AbstractSnapshotIntegTestCase {
} }
public void testForceMergeCausesFullSnapshot() throws Exception { public void testForceMergeCausesFullSnapshot() throws Exception {
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
internalCluster().ensureAtLeastNumDataNodes(2); internalCluster().ensureAtLeastNumDataNodes(2);
final String indexName = "test-index"; final String indexName = "test-index";
createIndex(indexName, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).build()); createIndex(indexName, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).build());

View File

@ -72,7 +72,7 @@ import static org.hamcrest.Matchers.is;
public class CloneSnapshotIT extends AbstractSnapshotIntegTestCase { public class CloneSnapshotIT extends AbstractSnapshotIntegTestCase {
public void testShardClone() throws Exception { public void testShardClone() throws Exception {
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode(); internalCluster().startDataOnlyNode();
final String repoName = "repo-name"; final String repoName = "repo-name";
final Path repoPath = randomRepoPath(); final Path repoPath = randomRepoPath();
@ -143,7 +143,7 @@ public class CloneSnapshotIT extends AbstractSnapshotIntegTestCase {
} }
public void testCloneSnapshotIndex() throws Exception { public void testCloneSnapshotIndex() throws Exception {
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode(); internalCluster().startDataOnlyNode();
final String repoName = "repo-name"; final String repoName = "repo-name";
createRepository(repoName, "fs"); createRepository(repoName, "fs");
@ -172,7 +172,7 @@ public class CloneSnapshotIT extends AbstractSnapshotIntegTestCase {
} }
public void testClonePreventsSnapshotDelete() throws Exception { public void testClonePreventsSnapshotDelete() throws Exception {
final String masterName = internalCluster().startMasterOnlyNode(); final String clusterManagerName = internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode(); internalCluster().startDataOnlyNode();
final String repoName = "repo-name"; final String repoName = "repo-name";
createRepository(repoName, "mock"); createRepository(repoName, "mock");
@ -185,9 +185,9 @@ public class CloneSnapshotIT extends AbstractSnapshotIntegTestCase {
indexRandomDocs(indexName, randomIntBetween(20, 100)); indexRandomDocs(indexName, randomIntBetween(20, 100));
final String targetSnapshot = "target-snapshot"; final String targetSnapshot = "target-snapshot";
blockNodeOnAnyFiles(repoName, masterName); blockNodeOnAnyFiles(repoName, clusterManagerName);
final ActionFuture<AcknowledgedResponse> cloneFuture = startClone(repoName, sourceSnapshot, targetSnapshot, indexName); final ActionFuture<AcknowledgedResponse> cloneFuture = startClone(repoName, sourceSnapshot, targetSnapshot, indexName);
waitForBlock(masterName, repoName, TimeValue.timeValueSeconds(30L)); waitForBlock(clusterManagerName, repoName, TimeValue.timeValueSeconds(30L));
assertFalse(cloneFuture.isDone()); assertFalse(cloneFuture.isDone());
ConcurrentSnapshotExecutionException ex = expectThrows( ConcurrentSnapshotExecutionException ex = expectThrows(
@ -196,7 +196,7 @@ public class CloneSnapshotIT extends AbstractSnapshotIntegTestCase {
); );
assertThat(ex.getMessage(), containsString("cannot delete snapshot while it is being cloned")); assertThat(ex.getMessage(), containsString("cannot delete snapshot while it is being cloned"));
unblockNode(repoName, masterName); unblockNode(repoName, clusterManagerName);
assertAcked(cloneFuture.get()); assertAcked(cloneFuture.get());
final List<SnapshotStatus> status = clusterAdmin().prepareSnapshotStatus(repoName) final List<SnapshotStatus> status = clusterAdmin().prepareSnapshotStatus(repoName)
.setSnapshots(sourceSnapshot, targetSnapshot) .setSnapshots(sourceSnapshot, targetSnapshot)
@ -210,7 +210,7 @@ public class CloneSnapshotIT extends AbstractSnapshotIntegTestCase {
} }
public void testConcurrentCloneAndSnapshot() throws Exception { public void testConcurrentCloneAndSnapshot() throws Exception {
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
final String dataNode = internalCluster().startDataOnlyNode(); final String dataNode = internalCluster().startDataOnlyNode();
final String repoName = "repo-name"; final String repoName = "repo-name";
createRepository(repoName, "mock"); createRepository(repoName, "mock");
@ -234,7 +234,7 @@ public class CloneSnapshotIT extends AbstractSnapshotIntegTestCase {
public void testLongRunningCloneAllowsConcurrentSnapshot() throws Exception { public void testLongRunningCloneAllowsConcurrentSnapshot() throws Exception {
// large snapshot pool so blocked snapshot threads from cloning don't prevent concurrent snapshot finalizations // large snapshot pool so blocked snapshot threads from cloning don't prevent concurrent snapshot finalizations
final String masterNode = internalCluster().startMasterOnlyNode(LARGE_SNAPSHOT_POOL_SETTINGS); final String masterNode = internalCluster().startClusterManagerOnlyNode(LARGE_SNAPSHOT_POOL_SETTINGS);
internalCluster().startDataOnlyNode(); internalCluster().startDataOnlyNode();
final String repoName = "test-repo"; final String repoName = "test-repo";
createRepository(repoName, "mock"); createRepository(repoName, "mock");
@ -263,7 +263,7 @@ public class CloneSnapshotIT extends AbstractSnapshotIntegTestCase {
} }
public void testLongRunningSnapshotAllowsConcurrentClone() throws Exception { public void testLongRunningSnapshotAllowsConcurrentClone() throws Exception {
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
final String dataNode = internalCluster().startDataOnlyNode(); final String dataNode = internalCluster().startDataOnlyNode();
final String repoName = "test-repo"; final String repoName = "test-repo";
createRepository(repoName, "mock"); createRepository(repoName, "mock");
@ -293,7 +293,7 @@ public class CloneSnapshotIT extends AbstractSnapshotIntegTestCase {
} }
public void testDeletePreventsClone() throws Exception { public void testDeletePreventsClone() throws Exception {
final String masterName = internalCluster().startMasterOnlyNode(); final String clusterManagerName = internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode(); internalCluster().startDataOnlyNode();
final String repoName = "repo-name"; final String repoName = "repo-name";
createRepository(repoName, "mock"); createRepository(repoName, "mock");
@ -306,9 +306,9 @@ public class CloneSnapshotIT extends AbstractSnapshotIntegTestCase {
indexRandomDocs(indexName, randomIntBetween(20, 100)); indexRandomDocs(indexName, randomIntBetween(20, 100));
final String targetSnapshot = "target-snapshot"; final String targetSnapshot = "target-snapshot";
blockNodeOnAnyFiles(repoName, masterName); blockNodeOnAnyFiles(repoName, clusterManagerName);
final ActionFuture<AcknowledgedResponse> deleteFuture = startDeleteSnapshot(repoName, sourceSnapshot); final ActionFuture<AcknowledgedResponse> deleteFuture = startDeleteSnapshot(repoName, sourceSnapshot);
waitForBlock(masterName, repoName, TimeValue.timeValueSeconds(30L)); waitForBlock(clusterManagerName, repoName, TimeValue.timeValueSeconds(30L));
assertFalse(deleteFuture.isDone()); assertFalse(deleteFuture.isDone());
ConcurrentSnapshotExecutionException ex = expectThrows( ConcurrentSnapshotExecutionException ex = expectThrows(
@ -317,13 +317,13 @@ public class CloneSnapshotIT extends AbstractSnapshotIntegTestCase {
); );
assertThat(ex.getMessage(), containsString("cannot clone from snapshot that is being deleted")); assertThat(ex.getMessage(), containsString("cannot clone from snapshot that is being deleted"));
unblockNode(repoName, masterName); unblockNode(repoName, clusterManagerName);
assertAcked(deleteFuture.get()); assertAcked(deleteFuture.get());
} }
public void testBackToBackClonesForIndexNotInCluster() throws Exception { public void testBackToBackClonesForIndexNotInCluster() throws Exception {
// large snapshot pool so blocked snapshot threads from cloning don't prevent concurrent snapshot finalizations // large snapshot pool so blocked snapshot threads from cloning don't prevent concurrent snapshot finalizations
final String masterNode = internalCluster().startMasterOnlyNode(LARGE_SNAPSHOT_POOL_SETTINGS); final String masterNode = internalCluster().startClusterManagerOnlyNode(LARGE_SNAPSHOT_POOL_SETTINGS);
internalCluster().startDataOnlyNode(); internalCluster().startDataOnlyNode();
final String repoName = "test-repo"; final String repoName = "test-repo";
createRepository(repoName, "mock"); createRepository(repoName, "mock");
@ -413,7 +413,7 @@ public class CloneSnapshotIT extends AbstractSnapshotIntegTestCase {
} }
public void testFailsOnCloneMissingIndices() { public void testFailsOnCloneMissingIndices() {
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode(); internalCluster().startDataOnlyNode();
final String repoName = "repo-name"; final String repoName = "repo-name";
final Path repoPath = randomRepoPath(); final Path repoPath = randomRepoPath();
@ -481,7 +481,7 @@ public class CloneSnapshotIT extends AbstractSnapshotIntegTestCase {
} }
public void testDoesNotStartOnBrokenSourceSnapshot() throws Exception { public void testDoesNotStartOnBrokenSourceSnapshot() throws Exception {
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
final String dataNode = internalCluster().startDataOnlyNode(); final String dataNode = internalCluster().startDataOnlyNode();
final String repoName = "test-repo"; final String repoName = "test-repo";
createRepository(repoName, "mock"); createRepository(repoName, "mock");
@ -516,7 +516,7 @@ public class CloneSnapshotIT extends AbstractSnapshotIntegTestCase {
} }
public void testStartSnapshotWithSuccessfulShardClonePendingFinalization() throws Exception { public void testStartSnapshotWithSuccessfulShardClonePendingFinalization() throws Exception {
final String masterName = internalCluster().startMasterOnlyNode(LARGE_SNAPSHOT_POOL_SETTINGS); final String masterName = internalCluster().startClusterManagerOnlyNode(LARGE_SNAPSHOT_POOL_SETTINGS);
final String dataNode = internalCluster().startDataOnlyNode(); final String dataNode = internalCluster().startDataOnlyNode();
final String repoName = "test-repo"; final String repoName = "test-repo";
createRepository(repoName, "mock"); createRepository(repoName, "mock");
@ -546,7 +546,7 @@ public class CloneSnapshotIT extends AbstractSnapshotIntegTestCase {
} }
public void testStartCloneWithSuccessfulShardClonePendingFinalization() throws Exception { public void testStartCloneWithSuccessfulShardClonePendingFinalization() throws Exception {
final String masterName = internalCluster().startMasterOnlyNode(); final String clusterManagerName = internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode(); internalCluster().startDataOnlyNode();
final String repoName = "test-repo"; final String repoName = "test-repo";
createRepository(repoName, "mock"); createRepository(repoName, "mock");
@ -560,14 +560,14 @@ public class CloneSnapshotIT extends AbstractSnapshotIntegTestCase {
blockMasterOnWriteIndexFile(repoName); blockMasterOnWriteIndexFile(repoName);
final String cloneName = "clone-blocked"; final String cloneName = "clone-blocked";
final ActionFuture<AcknowledgedResponse> blockedClone = startClone(repoName, sourceSnapshot, cloneName, indexName); final ActionFuture<AcknowledgedResponse> blockedClone = startClone(repoName, sourceSnapshot, cloneName, indexName);
waitForBlock(masterName, repoName, TimeValue.timeValueSeconds(30L)); waitForBlock(clusterManagerName, repoName, TimeValue.timeValueSeconds(30L));
awaitNumberOfSnapshotsInProgress(1); awaitNumberOfSnapshotsInProgress(1);
final String otherCloneName = "other-clone"; final String otherCloneName = "other-clone";
final ActionFuture<AcknowledgedResponse> otherClone = startClone(repoName, sourceSnapshot, otherCloneName, indexName); final ActionFuture<AcknowledgedResponse> otherClone = startClone(repoName, sourceSnapshot, otherCloneName, indexName);
awaitNumberOfSnapshotsInProgress(2); awaitNumberOfSnapshotsInProgress(2);
assertFalse(blockedClone.isDone()); assertFalse(blockedClone.isDone());
unblockNode(repoName, masterName); unblockNode(repoName, clusterManagerName);
awaitNoMoreRunningOperations(masterName); awaitNoMoreRunningOperations(clusterManagerName);
awaitMasterFinishRepoOperations(); awaitMasterFinishRepoOperations();
assertAcked(blockedClone.get()); assertAcked(blockedClone.get());
assertAcked(otherClone.get()); assertAcked(otherClone.get());
@ -576,7 +576,7 @@ public class CloneSnapshotIT extends AbstractSnapshotIntegTestCase {
} }
public void testStartCloneWithSuccessfulShardSnapshotPendingFinalization() throws Exception { public void testStartCloneWithSuccessfulShardSnapshotPendingFinalization() throws Exception {
final String masterName = internalCluster().startMasterOnlyNode(LARGE_SNAPSHOT_POOL_SETTINGS); final String masterName = internalCluster().startClusterManagerOnlyNode(LARGE_SNAPSHOT_POOL_SETTINGS);
internalCluster().startDataOnlyNode(); internalCluster().startDataOnlyNode();
final String repoName = "test-repo"; final String repoName = "test-repo";
createRepository(repoName, "mock"); createRepository(repoName, "mock");

View File

@ -106,7 +106,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
} }
public void testLongRunningSnapshotAllowsConcurrentSnapshot() throws Exception { public void testLongRunningSnapshotAllowsConcurrentSnapshot() throws Exception {
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
final String dataNode = internalCluster().startDataOnlyNode(); final String dataNode = internalCluster().startDataOnlyNode();
final String repoName = "test-repo"; final String repoName = "test-repo";
createRepository(repoName, "mock"); createRepository(repoName, "mock");
@ -139,7 +139,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
} }
public void testDeletesAreBatched() throws Exception { public void testDeletesAreBatched() throws Exception {
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
final String dataNode = internalCluster().startDataOnlyNode(); final String dataNode = internalCluster().startDataOnlyNode();
final String repoName = "test-repo"; final String repoName = "test-repo";
createRepository(repoName, "mock"); createRepository(repoName, "mock");
@ -205,7 +205,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
} }
public void testBlockedRepoDoesNotBlockOtherRepos() throws Exception { public void testBlockedRepoDoesNotBlockOtherRepos() throws Exception {
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode(); internalCluster().startDataOnlyNode();
final String blockedRepoName = "test-repo-blocked"; final String blockedRepoName = "test-repo-blocked";
final String otherRepoName = "test-repo"; final String otherRepoName = "test-repo";
@ -231,7 +231,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
} }
public void testMultipleReposAreIndependent() throws Exception { public void testMultipleReposAreIndependent() throws Exception {
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
// We're blocking a some of the snapshot threads when we block the first repo below so we have to make sure we have enough threads // We're blocking a some of the snapshot threads when we block the first repo below so we have to make sure we have enough threads
// left for the second concurrent snapshot. // left for the second concurrent snapshot.
final String dataNode = startDataNodeWithLargeSnapshotPool(); final String dataNode = startDataNodeWithLargeSnapshotPool();
@ -255,7 +255,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
} }
public void testMultipleReposAreIndependent2() throws Exception { public void testMultipleReposAreIndependent2() throws Exception {
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
// We're blocking a some of the snapshot threads when we block the first repo below so we have to make sure we have enough threads // We're blocking a some of the snapshot threads when we block the first repo below so we have to make sure we have enough threads
// left for the second repository's concurrent operations. // left for the second repository's concurrent operations.
final String dataNode = startDataNodeWithLargeSnapshotPool(); final String dataNode = startDataNodeWithLargeSnapshotPool();
@ -280,7 +280,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
} }
public void testMultipleReposAreIndependent3() throws Exception { public void testMultipleReposAreIndependent3() throws Exception {
final String masterNode = internalCluster().startMasterOnlyNode(LARGE_SNAPSHOT_POOL_SETTINGS); final String masterNode = internalCluster().startClusterManagerOnlyNode(LARGE_SNAPSHOT_POOL_SETTINGS);
internalCluster().startDataOnlyNode(); internalCluster().startDataOnlyNode();
final String blockedRepoName = "test-repo-blocked"; final String blockedRepoName = "test-repo-blocked";
final String otherRepoName = "test-repo"; final String otherRepoName = "test-repo";
@ -301,7 +301,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
} }
public void testSnapshotRunsAfterInProgressDelete() throws Exception { public void testSnapshotRunsAfterInProgressDelete() throws Exception {
final String masterNode = internalCluster().startMasterOnlyNode(); final String clusterManagerNode = internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode(); internalCluster().startDataOnlyNode();
final String repoName = "test-repo"; final String repoName = "test-repo";
createRepository(repoName, "mock"); createRepository(repoName, "mock");
@ -314,11 +314,11 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
blockMasterFromFinalizingSnapshotOnIndexFile(repoName); blockMasterFromFinalizingSnapshotOnIndexFile(repoName);
final ActionFuture<AcknowledgedResponse> deleteFuture = startDeleteSnapshot(repoName, firstSnapshot); final ActionFuture<AcknowledgedResponse> deleteFuture = startDeleteSnapshot(repoName, firstSnapshot);
waitForBlock(masterNode, repoName, TimeValue.timeValueSeconds(30L)); waitForBlock(clusterManagerNode, repoName, TimeValue.timeValueSeconds(30L));
final ActionFuture<CreateSnapshotResponse> snapshotFuture = startFullSnapshot(repoName, "second-snapshot"); final ActionFuture<CreateSnapshotResponse> snapshotFuture = startFullSnapshot(repoName, "second-snapshot");
unblockNode(repoName, masterNode); unblockNode(repoName, clusterManagerNode);
final UncategorizedExecutionException ex = expectThrows(UncategorizedExecutionException.class, deleteFuture::actionGet); final UncategorizedExecutionException ex = expectThrows(UncategorizedExecutionException.class, deleteFuture::actionGet);
assertThat(ex.getRootCause(), instanceOf(IOException.class)); assertThat(ex.getRootCause(), instanceOf(IOException.class));
@ -326,7 +326,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
} }
public void testAbortOneOfMultipleSnapshots() throws Exception { public void testAbortOneOfMultipleSnapshots() throws Exception {
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
final String dataNode = internalCluster().startDataOnlyNode(); final String dataNode = internalCluster().startDataOnlyNode();
final String repoName = "test-repo"; final String repoName = "test-repo";
createRepository(repoName, "mock"); createRepository(repoName, "mock");
@ -386,7 +386,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
} }
public void testCascadedAborts() throws Exception { public void testCascadedAborts() throws Exception {
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
final String dataNode = internalCluster().startDataOnlyNode(); final String dataNode = internalCluster().startDataOnlyNode();
final String repoName = "test-repo"; final String repoName = "test-repo";
createRepository(repoName, "mock"); createRepository(repoName, "mock");
@ -540,7 +540,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
} }
public void testAssertMultipleSnapshotsAndPrimaryFailOver() throws Exception { public void testAssertMultipleSnapshotsAndPrimaryFailOver() throws Exception {
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
final String dataNode = internalCluster().startDataOnlyNode(); final String dataNode = internalCluster().startDataOnlyNode();
final String repoName = "test-repo"; final String repoName = "test-repo";
createRepository(repoName, "mock"); createRepository(repoName, "mock");
@ -571,7 +571,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
} }
public void testQueuedDeletesWithFailures() throws Exception { public void testQueuedDeletesWithFailures() throws Exception {
final String masterNode = internalCluster().startMasterOnlyNode(); final String clusterManagerNode = internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode(); internalCluster().startDataOnlyNode();
final String repoName = "test-repo"; final String repoName = "test-repo";
createRepository(repoName, "mock"); createRepository(repoName, "mock");
@ -580,7 +580,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
blockMasterFromFinalizingSnapshotOnIndexFile(repoName); blockMasterFromFinalizingSnapshotOnIndexFile(repoName);
final ActionFuture<AcknowledgedResponse> firstDeleteFuture = startDeleteSnapshot(repoName, "*"); final ActionFuture<AcknowledgedResponse> firstDeleteFuture = startDeleteSnapshot(repoName, "*");
waitForBlock(masterNode, repoName, TimeValue.timeValueSeconds(30L)); waitForBlock(clusterManagerNode, repoName, TimeValue.timeValueSeconds(30L));
final ActionFuture<CreateSnapshotResponse> snapshotFuture = startFullSnapshot(repoName, "snapshot-queued"); final ActionFuture<CreateSnapshotResponse> snapshotFuture = startFullSnapshot(repoName, "snapshot-queued");
awaitNumberOfSnapshotsInProgress(1); awaitNumberOfSnapshotsInProgress(1);
@ -588,7 +588,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
final ActionFuture<AcknowledgedResponse> secondDeleteFuture = startDeleteSnapshot(repoName, "*"); final ActionFuture<AcknowledgedResponse> secondDeleteFuture = startDeleteSnapshot(repoName, "*");
awaitNDeletionsInProgress(2); awaitNDeletionsInProgress(2);
unblockNode(repoName, masterNode); unblockNode(repoName, clusterManagerNode);
expectThrows(UncategorizedExecutionException.class, firstDeleteFuture::actionGet); expectThrows(UncategorizedExecutionException.class, firstDeleteFuture::actionGet);
// Second delete works out cleanly since the repo is unblocked now // Second delete works out cleanly since the repo is unblocked now
@ -601,7 +601,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
} }
public void testQueuedDeletesWithOverlap() throws Exception { public void testQueuedDeletesWithOverlap() throws Exception {
final String masterNode = internalCluster().startMasterOnlyNode(); final String clusterManagerNode = internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode(); internalCluster().startDataOnlyNode();
final String repoName = "test-repo"; final String repoName = "test-repo";
createRepository(repoName, "mock"); createRepository(repoName, "mock");
@ -615,7 +615,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
final ActionFuture<AcknowledgedResponse> secondDeleteFuture = startDeleteSnapshot(repoName, "*"); final ActionFuture<AcknowledgedResponse> secondDeleteFuture = startDeleteSnapshot(repoName, "*");
awaitNDeletionsInProgress(2); awaitNDeletionsInProgress(2);
unblockNode(repoName, masterNode); unblockNode(repoName, clusterManagerNode);
assertThat(firstDeleteFuture.get().isAcknowledged(), is(true)); assertThat(firstDeleteFuture.get().isAcknowledged(), is(true));
// Second delete works out cleanly since the repo is unblocked now // Second delete works out cleanly since the repo is unblocked now
@ -878,7 +878,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
} }
public void testMultipleSnapshotsQueuedAfterDelete() throws Exception { public void testMultipleSnapshotsQueuedAfterDelete() throws Exception {
final String masterNode = internalCluster().startMasterOnlyNode(); final String clusterManagerNode = internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode(); internalCluster().startDataOnlyNode();
final String repoName = "test-repo"; final String repoName = "test-repo";
createRepository(repoName, "mock"); createRepository(repoName, "mock");
@ -889,7 +889,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
final ActionFuture<CreateSnapshotResponse> snapshotThree = startFullSnapshot(repoName, "snapshot-three"); final ActionFuture<CreateSnapshotResponse> snapshotThree = startFullSnapshot(repoName, "snapshot-three");
final ActionFuture<CreateSnapshotResponse> snapshotFour = startFullSnapshot(repoName, "snapshot-four"); final ActionFuture<CreateSnapshotResponse> snapshotFour = startFullSnapshot(repoName, "snapshot-four");
unblockNode(repoName, masterNode); unblockNode(repoName, clusterManagerNode);
assertSuccessful(snapshotThree); assertSuccessful(snapshotThree);
assertSuccessful(snapshotFour); assertSuccessful(snapshotFour);
@ -897,7 +897,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
} }
public void testMultiplePartialSnapshotsQueuedAfterDelete() throws Exception { public void testMultiplePartialSnapshotsQueuedAfterDelete() throws Exception {
final String masterNode = internalCluster().startMasterOnlyNode(); final String clusterManagerNode = internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode(); internalCluster().startDataOnlyNode();
final String repoName = "test-repo"; final String repoName = "test-repo";
createRepository(repoName, "mock"); createRepository(repoName, "mock");
@ -911,7 +911,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
awaitNumberOfSnapshotsInProgress(2); awaitNumberOfSnapshotsInProgress(2);
assertAcked(client().admin().indices().prepareDelete("index-two")); assertAcked(client().admin().indices().prepareDelete("index-two"));
unblockNode(repoName, masterNode); unblockNode(repoName, clusterManagerNode);
assertThat(snapshotThree.get().getSnapshotInfo().state(), is(SnapshotState.PARTIAL)); assertThat(snapshotThree.get().getSnapshotInfo().state(), is(SnapshotState.PARTIAL));
assertThat(snapshotFour.get().getSnapshotInfo().state(), is(SnapshotState.PARTIAL)); assertThat(snapshotFour.get().getSnapshotInfo().state(), is(SnapshotState.PARTIAL));
@ -919,7 +919,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
} }
public void testQueuedSnapshotsWaitingForShardReady() throws Exception { public void testQueuedSnapshotsWaitingForShardReady() throws Exception {
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNodes(2); internalCluster().startDataOnlyNodes(2);
final String repoName = "test-repo"; final String repoName = "test-repo";
createRepository(repoName, "fs"); createRepository(repoName, "fs");
@ -977,7 +977,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
} }
public void testBackToBackQueuedDeletes() throws Exception { public void testBackToBackQueuedDeletes() throws Exception {
final String masterName = internalCluster().startMasterOnlyNode(); final String clusterManagerName = internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode(); internalCluster().startDataOnlyNode();
final String repoName = "test-repo"; final String repoName = "test-repo";
createRepository(repoName, "mock"); createRepository(repoName, "mock");
@ -990,7 +990,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
final ActionFuture<AcknowledgedResponse> deleteSnapshotTwo = startDeleteSnapshot(repoName, snapshotTwo); final ActionFuture<AcknowledgedResponse> deleteSnapshotTwo = startDeleteSnapshot(repoName, snapshotTwo);
awaitNDeletionsInProgress(2); awaitNDeletionsInProgress(2);
unblockNode(repoName, masterName); unblockNode(repoName, clusterManagerName);
assertAcked(deleteSnapshotOne.get()); assertAcked(deleteSnapshotOne.get());
assertAcked(deleteSnapshotTwo.get()); assertAcked(deleteSnapshotTwo.get());
@ -1024,7 +1024,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
} }
public void testStartDeleteDuringFinalizationCleanup() throws Exception { public void testStartDeleteDuringFinalizationCleanup() throws Exception {
final String masterName = internalCluster().startMasterOnlyNode(); final String clusterManagerName = internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode(); internalCluster().startDataOnlyNode();
final String repoName = "test-repo"; final String repoName = "test-repo";
createRepository(repoName, "mock"); createRepository(repoName, "mock");
@ -1033,35 +1033,35 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
final String snapshotName = "snap-name"; final String snapshotName = "snap-name";
blockMasterFromDeletingIndexNFile(repoName); blockMasterFromDeletingIndexNFile(repoName);
final ActionFuture<CreateSnapshotResponse> snapshotFuture = startFullSnapshot(repoName, snapshotName); final ActionFuture<CreateSnapshotResponse> snapshotFuture = startFullSnapshot(repoName, snapshotName);
waitForBlock(masterName, repoName, TimeValue.timeValueSeconds(30L)); waitForBlock(clusterManagerName, repoName, TimeValue.timeValueSeconds(30L));
final ActionFuture<AcknowledgedResponse> deleteFuture = startDeleteSnapshot(repoName, snapshotName); final ActionFuture<AcknowledgedResponse> deleteFuture = startDeleteSnapshot(repoName, snapshotName);
awaitNDeletionsInProgress(1); awaitNDeletionsInProgress(1);
unblockNode(repoName, masterName); unblockNode(repoName, clusterManagerName);
assertSuccessful(snapshotFuture); assertSuccessful(snapshotFuture);
assertAcked(deleteFuture.get(30L, TimeUnit.SECONDS)); assertAcked(deleteFuture.get(30L, TimeUnit.SECONDS));
} }
public void testEquivalentDeletesAreDeduplicated() throws Exception { public void testEquivalentDeletesAreDeduplicated() throws Exception {
final String masterName = internalCluster().startMasterOnlyNode(); final String clusterManagerName = internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode(); internalCluster().startDataOnlyNode();
final String repoName = "test-repo"; final String repoName = "test-repo";
createRepository(repoName, "mock"); createRepository(repoName, "mock");
createIndexWithContent("index-test"); createIndexWithContent("index-test");
createNSnapshots(repoName, randomIntBetween(1, 5)); createNSnapshots(repoName, randomIntBetween(1, 5));
blockNodeOnAnyFiles(repoName, masterName); blockNodeOnAnyFiles(repoName, clusterManagerName);
final int deletes = randomIntBetween(2, 10); final int deletes = randomIntBetween(2, 10);
final List<ActionFuture<AcknowledgedResponse>> deleteResponses = new ArrayList<>(deletes); final List<ActionFuture<AcknowledgedResponse>> deleteResponses = new ArrayList<>(deletes);
for (int i = 0; i < deletes; ++i) { for (int i = 0; i < deletes; ++i) {
deleteResponses.add(client().admin().cluster().prepareDeleteSnapshot(repoName, "*").execute()); deleteResponses.add(client().admin().cluster().prepareDeleteSnapshot(repoName, "*").execute());
} }
waitForBlock(masterName, repoName, TimeValue.timeValueSeconds(30L)); waitForBlock(clusterManagerName, repoName, TimeValue.timeValueSeconds(30L));
awaitNDeletionsInProgress(1); awaitNDeletionsInProgress(1);
for (ActionFuture<AcknowledgedResponse> deleteResponse : deleteResponses) { for (ActionFuture<AcknowledgedResponse> deleteResponse : deleteResponses) {
assertFalse(deleteResponse.isDone()); assertFalse(deleteResponse.isDone());
} }
awaitNDeletionsInProgress(1); awaitNDeletionsInProgress(1);
unblockNode(repoName, masterName); unblockNode(repoName, clusterManagerName);
for (ActionFuture<AcknowledgedResponse> deleteResponse : deleteResponses) { for (ActionFuture<AcknowledgedResponse> deleteResponse : deleteResponses) {
assertAcked(deleteResponse.get()); assertAcked(deleteResponse.get());
} }
@ -1102,7 +1102,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
} }
public void testStatusMultipleSnapshotsMultipleRepos() throws Exception { public void testStatusMultipleSnapshotsMultipleRepos() throws Exception {
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
// We're blocking a some of the snapshot threads when we block the first repo below so we have to make sure we have enough threads // We're blocking a some of the snapshot threads when we block the first repo below so we have to make sure we have enough threads
// left for the second concurrent snapshot. // left for the second concurrent snapshot.
final String dataNode = startDataNodeWithLargeSnapshotPool(); final String dataNode = startDataNodeWithLargeSnapshotPool();
@ -1146,7 +1146,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
} }
public void testInterleavedAcrossMultipleRepos() throws Exception { public void testInterleavedAcrossMultipleRepos() throws Exception {
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
// We're blocking a some of the snapshot threads when we block the first repo below so we have to make sure we have enough threads // We're blocking a some of the snapshot threads when we block the first repo below so we have to make sure we have enough threads
// left for the second concurrent snapshot. // left for the second concurrent snapshot.
final String dataNode = startDataNodeWithLargeSnapshotPool(); final String dataNode = startDataNodeWithLargeSnapshotPool();
@ -1219,7 +1219,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
} }
public void testConcurrentOperationsLimit() throws Exception { public void testConcurrentOperationsLimit() throws Exception {
final String masterName = internalCluster().startMasterOnlyNode(); final String clusterManagerName = internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode(); internalCluster().startDataOnlyNode();
final String repoName = "test-repo"; final String repoName = "test-repo";
createRepository(repoName, "mock"); createRepository(repoName, "mock");
@ -1237,7 +1237,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
); );
final List<String> snapshotNames = createNSnapshots(repoName, limitToTest + 1); final List<String> snapshotNames = createNSnapshots(repoName, limitToTest + 1);
blockNodeOnAnyFiles(repoName, masterName); blockNodeOnAnyFiles(repoName, clusterManagerName);
int blockedSnapshots = 0; int blockedSnapshots = 0;
boolean blockedDelete = false; boolean blockedDelete = false;
final List<ActionFuture<CreateSnapshotResponse>> snapshotFutures = new ArrayList<>(); final List<ActionFuture<CreateSnapshotResponse>> snapshotFutures = new ArrayList<>();
@ -1255,7 +1255,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
if (blockedDelete) { if (blockedDelete) {
awaitNDeletionsInProgress(1); awaitNDeletionsInProgress(1);
} }
waitForBlock(masterName, repoName, TimeValue.timeValueSeconds(30L)); waitForBlock(clusterManagerName, repoName, TimeValue.timeValueSeconds(30L));
final String expectedFailureMessage = "Cannot start another operation, already running [" final String expectedFailureMessage = "Cannot start another operation, already running ["
+ limitToTest + limitToTest
@ -1275,7 +1275,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
assertThat(csen2.getMessage(), containsString(expectedFailureMessage)); assertThat(csen2.getMessage(), containsString(expectedFailureMessage));
} }
unblockNode(repoName, masterName); unblockNode(repoName, clusterManagerName);
if (deleteFuture != null) { if (deleteFuture != null) {
assertAcked(deleteFuture.get()); assertAcked(deleteFuture.get());
} }
@ -1285,7 +1285,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
} }
public void testConcurrentSnapshotWorksWithOldVersionRepo() throws Exception { public void testConcurrentSnapshotWorksWithOldVersionRepo() throws Exception {
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
final String dataNode = internalCluster().startDataOnlyNode(); final String dataNode = internalCluster().startDataOnlyNode();
final String repoName = "test-repo"; final String repoName = "test-repo";
final Path repoPath = randomRepoPath(); final Path repoPath = randomRepoPath();
@ -1322,23 +1322,23 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
} }
public void testQueuedDeleteAfterFinalizationFailure() throws Exception { public void testQueuedDeleteAfterFinalizationFailure() throws Exception {
final String masterNode = internalCluster().startMasterOnlyNode(); final String clusterManagerNode = internalCluster().startClusterManagerOnlyNode();
final String repoName = "test-repo"; final String repoName = "test-repo";
createRepository(repoName, "mock"); createRepository(repoName, "mock");
blockMasterFromFinalizingSnapshotOnIndexFile(repoName); blockMasterFromFinalizingSnapshotOnIndexFile(repoName);
final String snapshotName = "snap-1"; final String snapshotName = "snap-1";
final ActionFuture<CreateSnapshotResponse> snapshotFuture = startFullSnapshot(repoName, snapshotName); final ActionFuture<CreateSnapshotResponse> snapshotFuture = startFullSnapshot(repoName, snapshotName);
waitForBlock(masterNode, repoName, TimeValue.timeValueSeconds(30L)); waitForBlock(clusterManagerNode, repoName, TimeValue.timeValueSeconds(30L));
final ActionFuture<AcknowledgedResponse> deleteFuture = startDeleteSnapshot(repoName, snapshotName); final ActionFuture<AcknowledgedResponse> deleteFuture = startDeleteSnapshot(repoName, snapshotName);
awaitNDeletionsInProgress(1); awaitNDeletionsInProgress(1);
unblockNode(repoName, masterNode); unblockNode(repoName, clusterManagerNode);
assertAcked(deleteFuture.get()); assertAcked(deleteFuture.get());
final SnapshotException sne = expectThrows(SnapshotException.class, snapshotFuture::actionGet); final SnapshotException sne = expectThrows(SnapshotException.class, snapshotFuture::actionGet);
assertThat(sne.getCause().getMessage(), containsString("exception after block")); assertThat(sne.getCause().getMessage(), containsString("exception after block"));
} }
public void testAbortNotStartedSnapshotWithoutIO() throws Exception { public void testAbortNotStartedSnapshotWithoutIO() throws Exception {
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
final String dataNode = internalCluster().startDataOnlyNode(); final String dataNode = internalCluster().startDataOnlyNode();
final String repoName = "test-repo"; final String repoName = "test-repo";
createRepository(repoName, "mock"); createRepository(repoName, "mock");
@ -1365,7 +1365,7 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
} }
public void testStartWithSuccessfulShardSnapshotPendingFinalization() throws Exception { public void testStartWithSuccessfulShardSnapshotPendingFinalization() throws Exception {
final String masterName = internalCluster().startMasterOnlyNode(); final String clusterManagerName = internalCluster().startClusterManagerOnlyNode();
final String dataNode = internalCluster().startDataOnlyNode(); final String dataNode = internalCluster().startDataOnlyNode();
final String repoName = "test-repo"; final String repoName = "test-repo";
createRepository(repoName, "mock"); createRepository(repoName, "mock");
@ -1375,13 +1375,13 @@ public class ConcurrentSnapshotsIT extends AbstractSnapshotIntegTestCase {
blockMasterOnWriteIndexFile(repoName); blockMasterOnWriteIndexFile(repoName);
final ActionFuture<CreateSnapshotResponse> blockedSnapshot = startFullSnapshot(repoName, "snap-blocked"); final ActionFuture<CreateSnapshotResponse> blockedSnapshot = startFullSnapshot(repoName, "snap-blocked");
waitForBlock(masterName, repoName, TimeValue.timeValueSeconds(30L)); waitForBlock(clusterManagerName, repoName, TimeValue.timeValueSeconds(30L));
awaitNumberOfSnapshotsInProgress(1); awaitNumberOfSnapshotsInProgress(1);
blockNodeOnAnyFiles(repoName, dataNode); blockNodeOnAnyFiles(repoName, dataNode);
final ActionFuture<CreateSnapshotResponse> otherSnapshot = startFullSnapshot(repoName, "other-snapshot"); final ActionFuture<CreateSnapshotResponse> otherSnapshot = startFullSnapshot(repoName, "other-snapshot");
awaitNumberOfSnapshotsInProgress(2); awaitNumberOfSnapshotsInProgress(2);
assertFalse(blockedSnapshot.isDone()); assertFalse(blockedSnapshot.isDone());
unblockNode(repoName, masterName); unblockNode(repoName, clusterManagerName);
awaitNumberOfSnapshotsInProgress(1); awaitNumberOfSnapshotsInProgress(1);
awaitMasterFinishRepoOperations(); awaitMasterFinishRepoOperations();

View File

@ -926,7 +926,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
*/ */
public void testRestoreShrinkIndex() throws Exception { public void testRestoreShrinkIndex() throws Exception {
logger.info("--> starting a cluster-manager node and a data node"); logger.info("--> starting a cluster-manager node and a data node");
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode(); internalCluster().startDataOnlyNode();
final String repo = "test-repo"; final String repo = "test-repo";
@ -1145,7 +1145,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
public void testDataNodeRestartWithBusyMasterDuringSnapshot() throws Exception { public void testDataNodeRestartWithBusyMasterDuringSnapshot() throws Exception {
logger.info("--> starting a cluster-manager node and two data nodes"); logger.info("--> starting a cluster-manager node and two data nodes");
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNodes(2); internalCluster().startDataOnlyNodes(2);
final Path repoPath = randomRepoPath(); final Path repoPath = randomRepoPath();
createRepository("test-repo", "mock", repoPath); createRepository("test-repo", "mock", repoPath);
@ -1201,7 +1201,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
public void testDataNodeRestartAfterShardSnapshotFailure() throws Exception { public void testDataNodeRestartAfterShardSnapshotFailure() throws Exception {
logger.info("--> starting a cluster-manager node and two data nodes"); logger.info("--> starting a cluster-manager node and two data nodes");
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
final List<String> dataNodes = internalCluster().startDataOnlyNodes(2); final List<String> dataNodes = internalCluster().startDataOnlyNodes(2);
final Path repoPath = randomRepoPath(); final Path repoPath = randomRepoPath();
createRepository("test-repo", "mock", repoPath); createRepository("test-repo", "mock", repoPath);
@ -1323,7 +1323,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
} }
public void testAbortWaitsOnDataNode() throws Exception { public void testAbortWaitsOnDataNode() throws Exception {
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
final String dataNodeName = internalCluster().startDataOnlyNode(); final String dataNodeName = internalCluster().startDataOnlyNode();
final String indexName = "test-index"; final String indexName = "test-index";
createIndex(indexName); createIndex(indexName);
@ -1375,7 +1375,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
} }
public void testPartialSnapshotAllShardsMissing() throws Exception { public void testPartialSnapshotAllShardsMissing() throws Exception {
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
final String dataNode = internalCluster().startDataOnlyNode(); final String dataNode = internalCluster().startDataOnlyNode();
final String repoName = "test-repo"; final String repoName = "test-repo";
createRepository(repoName, "fs"); createRepository(repoName, "fs");
@ -1393,7 +1393,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
* correctly by testing a snapshot name collision. * correctly by testing a snapshot name collision.
*/ */
public void testCreateSnapshotLegacyPath() throws Exception { public void testCreateSnapshotLegacyPath() throws Exception {
final String masterNode = internalCluster().startMasterOnlyNode(); final String clusterManagerNode = internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode(); internalCluster().startDataOnlyNode();
final String repoName = "test-repo"; final String repoName = "test-repo";
createRepository(repoName, "fs"); createRepository(repoName, "fs");
@ -1403,7 +1403,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
final Snapshot snapshot1 = PlainActionFuture.get( final Snapshot snapshot1 = PlainActionFuture.get(
f -> snapshotsService.createSnapshotLegacy(new CreateSnapshotRequest(repoName, "snap-1"), f) f -> snapshotsService.createSnapshotLegacy(new CreateSnapshotRequest(repoName, "snap-1"), f)
); );
awaitNoMoreRunningOperations(masterNode); awaitNoMoreRunningOperations(clusterManagerNode);
final InvalidSnapshotNameException sne = expectThrows( final InvalidSnapshotNameException sne = expectThrows(
InvalidSnapshotNameException.class, InvalidSnapshotNameException.class,
@ -1425,7 +1425,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
} }
public void testSnapshotDeleteRelocatingPrimaryIndex() throws Exception { public void testSnapshotDeleteRelocatingPrimaryIndex() throws Exception {
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
final List<String> dataNodes = internalCluster().startDataOnlyNodes(2); final List<String> dataNodes = internalCluster().startDataOnlyNodes(2);
final String repoName = "test-repo"; final String repoName = "test-repo";
createRepository(repoName, "fs"); createRepository(repoName, "fs");

View File

@ -107,13 +107,13 @@ public class MultiClusterRepoAccessIT extends AbstractSnapshotIntegTestCase {
} }
public void testConcurrentDeleteFromOtherCluster() throws InterruptedException { public void testConcurrentDeleteFromOtherCluster() throws InterruptedException {
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode(); internalCluster().startDataOnlyNode();
final String repoNameOnFirstCluster = "test-repo"; final String repoNameOnFirstCluster = "test-repo";
final String repoNameOnSecondCluster = randomBoolean() ? "test-repo" : "other-repo"; final String repoNameOnSecondCluster = randomBoolean() ? "test-repo" : "other-repo";
createRepository(repoNameOnFirstCluster, "fs", repoPath); createRepository(repoNameOnFirstCluster, "fs", repoPath);
secondCluster.startMasterOnlyNode(); secondCluster.startClusterManagerOnlyNode();
secondCluster.startDataOnlyNode(); secondCluster.startDataOnlyNode();
secondCluster.client() secondCluster.client()
.admin() .admin()

View File

@ -60,7 +60,7 @@ public class SnapshotShardsServiceIT extends AbstractSnapshotIntegTestCase {
} }
public void testRetryPostingSnapshotStatusMessages() throws Exception { public void testRetryPostingSnapshotStatusMessages() throws Exception {
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode(); internalCluster().startDataOnlyNode();
createRepository("test-repo", "mock"); createRepository("test-repo", "mock");

View File

@ -83,7 +83,7 @@ public class DelayedAllocationServiceTests extends OpenSearchAllocationTestCase
threadPool = new TestThreadPool(getTestName()); threadPool = new TestThreadPool(getTestName());
clusterService = mock(ClusterService.class); clusterService = mock(ClusterService.class);
allocationService = createAllocationService(Settings.EMPTY, new DelayedShardsMockGatewayAllocator()); allocationService = createAllocationService(Settings.EMPTY, new DelayedShardsMockGatewayAllocator());
when(clusterService.getSettings()).thenReturn(NodeRoles.masterOnlyNode()); when(clusterService.getSettings()).thenReturn(NodeRoles.clusterManagerOnlyNode());
delayedAllocationService = new TestDelayAllocationService(threadPool, clusterService, allocationService); delayedAllocationService = new TestDelayAllocationService(threadPool, clusterService, allocationService);
verify(clusterService).addListener(delayedAllocationService); verify(clusterService).addListener(delayedAllocationService);
verify(clusterService).getSettings(); verify(clusterService).getSettings();

View File

@ -737,7 +737,7 @@ public class AllocationCommandsTests extends OpenSearchAllocationTestCase {
"test1", "test1",
buildNewFakeTransportAddress(), buildNewFakeTransportAddress(),
emptyMap(), emptyMap(),
MASTER_DATA_ROLES, CLUSTER_MANAGER_DATA_ROLES,
Version.CURRENT Version.CURRENT
); );
DiscoveryNode node2 = new DiscoveryNode( DiscoveryNode node2 = new DiscoveryNode(
@ -808,7 +808,7 @@ public class AllocationCommandsTests extends OpenSearchAllocationTestCase {
"test1", "test1",
buildNewFakeTransportAddress(), buildNewFakeTransportAddress(),
emptyMap(), emptyMap(),
MASTER_DATA_ROLES, CLUSTER_MANAGER_DATA_ROLES,
Version.CURRENT Version.CURRENT
); );
DiscoveryNode node2 = new DiscoveryNode( DiscoveryNode node2 = new DiscoveryNode(

View File

@ -338,21 +338,21 @@ public class NodeVersionAllocationDeciderTests extends OpenSearchAllocationTestC
"newNode", "newNode",
buildNewFakeTransportAddress(), buildNewFakeTransportAddress(),
emptyMap(), emptyMap(),
MASTER_DATA_ROLES, CLUSTER_MANAGER_DATA_ROLES,
Version.CURRENT Version.CURRENT
); );
final DiscoveryNode oldNode1 = new DiscoveryNode( final DiscoveryNode oldNode1 = new DiscoveryNode(
"oldNode1", "oldNode1",
buildNewFakeTransportAddress(), buildNewFakeTransportAddress(),
emptyMap(), emptyMap(),
MASTER_DATA_ROLES, CLUSTER_MANAGER_DATA_ROLES,
VersionUtils.getPreviousVersion() VersionUtils.getPreviousVersion()
); );
final DiscoveryNode oldNode2 = new DiscoveryNode( final DiscoveryNode oldNode2 = new DiscoveryNode(
"oldNode2", "oldNode2",
buildNewFakeTransportAddress(), buildNewFakeTransportAddress(),
emptyMap(), emptyMap(),
MASTER_DATA_ROLES, CLUSTER_MANAGER_DATA_ROLES,
VersionUtils.getPreviousVersion() VersionUtils.getPreviousVersion()
); );
AllocationId allocationId1P = AllocationId.newInitializing(); AllocationId allocationId1P = AllocationId.newInitializing();
@ -457,21 +457,21 @@ public class NodeVersionAllocationDeciderTests extends OpenSearchAllocationTestC
"newNode", "newNode",
buildNewFakeTransportAddress(), buildNewFakeTransportAddress(),
emptyMap(), emptyMap(),
MASTER_DATA_ROLES, CLUSTER_MANAGER_DATA_ROLES,
Version.CURRENT Version.CURRENT
); );
final DiscoveryNode oldNode1 = new DiscoveryNode( final DiscoveryNode oldNode1 = new DiscoveryNode(
"oldNode1", "oldNode1",
buildNewFakeTransportAddress(), buildNewFakeTransportAddress(),
emptyMap(), emptyMap(),
MASTER_DATA_ROLES, CLUSTER_MANAGER_DATA_ROLES,
VersionUtils.getPreviousVersion() VersionUtils.getPreviousVersion()
); );
final DiscoveryNode oldNode2 = new DiscoveryNode( final DiscoveryNode oldNode2 = new DiscoveryNode(
"oldNode2", "oldNode2",
buildNewFakeTransportAddress(), buildNewFakeTransportAddress(),
emptyMap(), emptyMap(),
MASTER_DATA_ROLES, CLUSTER_MANAGER_DATA_ROLES,
VersionUtils.getPreviousVersion() VersionUtils.getPreviousVersion()
); );

View File

@ -96,7 +96,7 @@ public class SameShardRoutingTests extends OpenSearchAllocationTestCase {
"test1", "test1",
buildNewFakeTransportAddress(), buildNewFakeTransportAddress(),
emptyMap(), emptyMap(),
MASTER_DATA_ROLES, CLUSTER_MANAGER_DATA_ROLES,
Version.CURRENT Version.CURRENT
) )
) )
@ -109,7 +109,7 @@ public class SameShardRoutingTests extends OpenSearchAllocationTestCase {
"test1", "test1",
buildNewFakeTransportAddress(), buildNewFakeTransportAddress(),
emptyMap(), emptyMap(),
MASTER_DATA_ROLES, CLUSTER_MANAGER_DATA_ROLES,
Version.CURRENT Version.CURRENT
) )
) )
@ -138,7 +138,7 @@ public class SameShardRoutingTests extends OpenSearchAllocationTestCase {
"test2", "test2",
buildNewFakeTransportAddress(), buildNewFakeTransportAddress(),
emptyMap(), emptyMap(),
MASTER_DATA_ROLES, CLUSTER_MANAGER_DATA_ROLES,
Version.CURRENT Version.CURRENT
) )
) )

View File

@ -921,14 +921,14 @@ public class DiskThresholdDeciderTests extends OpenSearchAllocationTestCase {
"node1", "node1",
buildNewFakeTransportAddress(), buildNewFakeTransportAddress(),
emptyMap(), emptyMap(),
MASTER_DATA_ROLES, CLUSTER_MANAGER_DATA_ROLES,
Version.CURRENT Version.CURRENT
); );
DiscoveryNode discoveryNode2 = new DiscoveryNode( DiscoveryNode discoveryNode2 = new DiscoveryNode(
"node2", "node2",
buildNewFakeTransportAddress(), buildNewFakeTransportAddress(),
emptyMap(), emptyMap(),
MASTER_DATA_ROLES, CLUSTER_MANAGER_DATA_ROLES,
Version.CURRENT Version.CURRENT
); );
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(discoveryNode1).add(discoveryNode2).build(); DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(discoveryNode1).add(discoveryNode2).build();

View File

@ -181,8 +181,8 @@ public class IncrementalClusterStateWriterTests extends OpenSearchAllocationTest
private DiscoveryNodes.Builder generateDiscoveryNodes(boolean masterEligible) { private DiscoveryNodes.Builder generateDiscoveryNodes(boolean masterEligible) {
Set<DiscoveryNodeRole> dataOnlyRoles = Collections.singleton(DiscoveryNodeRole.DATA_ROLE); Set<DiscoveryNodeRole> dataOnlyRoles = Collections.singleton(DiscoveryNodeRole.DATA_ROLE);
return DiscoveryNodes.builder() return DiscoveryNodes.builder()
.add(newNode("node1", masterEligible ? MASTER_DATA_ROLES : dataOnlyRoles)) .add(newNode("node1", masterEligible ? CLUSTER_MANAGER_DATA_ROLES : dataOnlyRoles))
.add(newNode("master_node", MASTER_DATA_ROLES)) .add(newNode("master_node", CLUSTER_MANAGER_DATA_ROLES))
.localNodeId("node1") .localNodeId("node1")
.masterNodeId(masterEligible ? "node1" : "master_node"); .masterNodeId(masterEligible ? "node1" : "master_node");
} }

View File

@ -62,7 +62,7 @@ import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.AtomicReference;
import java.util.function.BiFunction; import java.util.function.BiFunction;
import static org.opensearch.test.NodeRoles.masterOnlyNode; import static org.opensearch.test.NodeRoles.clusterManagerOnlyNode;
import static org.opensearch.test.NodeRoles.nonMasterNode; import static org.opensearch.test.NodeRoles.nonMasterNode;
import static org.opensearch.test.NodeRoles.removeRoles; import static org.opensearch.test.NodeRoles.removeRoles;
import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.either;
@ -553,7 +553,7 @@ public class RemoteClusterServiceTests extends OpenSearchTestCase {
final Settings settings = Settings.EMPTY; final Settings settings = Settings.EMPTY;
final List<DiscoveryNode> knownNodes = new CopyOnWriteArrayList<>(); final List<DiscoveryNode> knownNodes = new CopyOnWriteArrayList<>();
final Settings data = nonMasterNode(); final Settings data = nonMasterNode();
final Settings dedicatedMaster = masterOnlyNode(); final Settings dedicatedMaster = clusterManagerOnlyNode();
try ( try (
MockTransportService c1N1 = startTransport("cluster_1_node_1", knownNodes, Version.CURRENT, dedicatedMaster); MockTransportService c1N1 = startTransport("cluster_1_node_1", knownNodes, Version.CURRENT, dedicatedMaster);
MockTransportService c1N2 = startTransport("cluster_1_node_2", knownNodes, Version.CURRENT, data); MockTransportService c1N2 = startTransport("cluster_1_node_2", knownNodes, Version.CURRENT, data);

View File

@ -97,7 +97,7 @@ public class ClusterStateCreationUtils {
numberOfNodes++; numberOfNodes++;
} }
} }
numberOfNodes = Math.max(2, numberOfNodes); // we need a non-local master to test shard failures numberOfNodes = Math.max(2, numberOfNodes); // we need a non-local cluster-manager to test shard failures
final ShardId shardId = new ShardId(index, "_na_", 0); final ShardId shardId = new ShardId(index, "_na_", 0);
DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder();
Set<String> unassignedNodes = new HashSet<>(); Set<String> unassignedNodes = new HashSet<>();
@ -107,7 +107,7 @@ public class ClusterStateCreationUtils {
unassignedNodes.add(node.getId()); unassignedNodes.add(node.getId());
} }
discoBuilder.localNodeId(newNode(0).getId()); discoBuilder.localNodeId(newNode(0).getId());
discoBuilder.masterNodeId(newNode(1).getId()); // we need a non-local master to test shard failures discoBuilder.masterNodeId(newNode(1).getId()); // we need a non-local cluster-manager to test shard failures
final int primaryTerm = 1 + randomInt(200); final int primaryTerm = 1 + randomInt(200);
IndexMetadata indexMetadata = IndexMetadata.builder(index) IndexMetadata indexMetadata = IndexMetadata.builder(index)
.settings( .settings(
@ -284,14 +284,14 @@ public class ClusterStateCreationUtils {
*/ */
public static ClusterState stateWithAssignedPrimariesAndOneReplica(String index, int numberOfShards) { public static ClusterState stateWithAssignedPrimariesAndOneReplica(String index, int numberOfShards) {
int numberOfNodes = 2; // we need a non-local master to test shard failures int numberOfNodes = 2; // we need a non-local cluster-manager to test shard failures
DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder();
for (int i = 0; i < numberOfNodes + 1; i++) { for (int i = 0; i < numberOfNodes + 1; i++) {
final DiscoveryNode node = newNode(i); final DiscoveryNode node = newNode(i);
discoBuilder = discoBuilder.add(node); discoBuilder = discoBuilder.add(node);
} }
discoBuilder.localNodeId(newNode(0).getId()); discoBuilder.localNodeId(newNode(0).getId());
discoBuilder.masterNodeId(newNode(1).getId()); // we need a non-local master to test shard failures discoBuilder.masterNodeId(newNode(1).getId()); // we need a non-local cluster-manager to test shard failures
IndexMetadata indexMetadata = IndexMetadata.builder(index) IndexMetadata indexMetadata = IndexMetadata.builder(index)
.settings( .settings(
Settings.builder() Settings.builder()
@ -426,20 +426,20 @@ public class ClusterStateCreationUtils {
} }
/** /**
* Creates a cluster state where local node and master node can be specified * Creates a cluster state where local node and cluster-manager node can be specified
* *
* @param localNode node in allNodes that is the local node * @param localNode node in allNodes that is the local node
* @param masterNode node in allNodes that is the master node. Can be null if no cluster-manager exists * @param clusterManagerNode node in allNodes that is the cluster-manager node. Can be null if no cluster-manager exists
* @param allNodes all nodes in the cluster * @param allNodes all nodes in the cluster
* @return cluster state * @return cluster state
*/ */
public static ClusterState state(DiscoveryNode localNode, DiscoveryNode masterNode, DiscoveryNode... allNodes) { public static ClusterState state(DiscoveryNode localNode, DiscoveryNode clusterManagerNode, DiscoveryNode... allNodes) {
DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder();
for (DiscoveryNode node : allNodes) { for (DiscoveryNode node : allNodes) {
discoBuilder.add(node); discoBuilder.add(node);
} }
if (masterNode != null) { if (clusterManagerNode != null) {
discoBuilder.masterNodeId(masterNode.getId()); discoBuilder.masterNodeId(clusterManagerNode.getId());
} }
discoBuilder.localNodeId(localNode.getId()); discoBuilder.localNodeId(localNode.getId());

View File

@ -148,7 +148,7 @@ public abstract class OpenSearchAllocationTestCase extends OpenSearchTestCase {
return new AllocationDeciders(deciders); return new AllocationDeciders(deciders);
} }
protected static Set<DiscoveryNodeRole> MASTER_DATA_ROLES = Collections.unmodifiableSet( protected static Set<DiscoveryNodeRole> CLUSTER_MANAGER_DATA_ROLES = Collections.unmodifiableSet(
new HashSet<>(Arrays.asList(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE, DiscoveryNodeRole.DATA_ROLE)) new HashSet<>(Arrays.asList(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE, DiscoveryNodeRole.DATA_ROLE))
); );
@ -157,11 +157,11 @@ public abstract class OpenSearchAllocationTestCase extends OpenSearchTestCase {
} }
protected static DiscoveryNode newNode(String nodeName, String nodeId, Map<String, String> attributes) { protected static DiscoveryNode newNode(String nodeName, String nodeId, Map<String, String> attributes) {
return new DiscoveryNode(nodeName, nodeId, buildNewFakeTransportAddress(), attributes, MASTER_DATA_ROLES, Version.CURRENT); return new DiscoveryNode(nodeName, nodeId, buildNewFakeTransportAddress(), attributes, CLUSTER_MANAGER_DATA_ROLES, Version.CURRENT);
} }
protected static DiscoveryNode newNode(String nodeId, Map<String, String> attributes) { protected static DiscoveryNode newNode(String nodeId, Map<String, String> attributes) {
return new DiscoveryNode(nodeId, buildNewFakeTransportAddress(), attributes, MASTER_DATA_ROLES, Version.CURRENT); return new DiscoveryNode(nodeId, buildNewFakeTransportAddress(), attributes, CLUSTER_MANAGER_DATA_ROLES, Version.CURRENT);
} }
protected static DiscoveryNode newNode(String nodeId, Set<DiscoveryNodeRole> roles) { protected static DiscoveryNode newNode(String nodeId, Set<DiscoveryNodeRole> roles) {
@ -169,7 +169,7 @@ public abstract class OpenSearchAllocationTestCase extends OpenSearchTestCase {
} }
protected static DiscoveryNode newNode(String nodeId, Version version) { protected static DiscoveryNode newNode(String nodeId, Version version) {
return new DiscoveryNode(nodeId, buildNewFakeTransportAddress(), emptyMap(), MASTER_DATA_ROLES, version); return new DiscoveryNode(nodeId, buildNewFakeTransportAddress(), emptyMap(), CLUSTER_MANAGER_DATA_ROLES, version);
} }
protected static ClusterState startRandomInitializingShard(ClusterState clusterState, AllocationService strategy) { protected static ClusterState startRandomInitializingShard(ClusterState clusterState, AllocationService strategy) {

View File

@ -217,13 +217,13 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
} }
// Updating the cluster state involves up to 7 delays: // Updating the cluster state involves up to 7 delays:
// 1. submit the task to the master service // 1. submit the task to the cluster-manager service
// 2. send PublishRequest // 2. send PublishRequest
// 3. receive PublishResponse // 3. receive PublishResponse
// 4. send ApplyCommitRequest // 4. send ApplyCommitRequest
// 5. apply committed cluster state // 5. apply committed cluster state
// 6. receive ApplyCommitResponse // 6. receive ApplyCommitResponse
// 7. apply committed state on master (last one to apply cluster state) // 7. apply committed state on cluster-manager (last one to apply cluster state)
public static final long DEFAULT_CLUSTER_STATE_UPDATE_DELAY = 7 * DEFAULT_DELAY_VARIABILITY; public static final long DEFAULT_CLUSTER_STATE_UPDATE_DELAY = 7 * DEFAULT_DELAY_VARIABILITY;
private static final int ELECTION_RETRIES = 10; private static final int ELECTION_RETRIES = 10;
@ -288,11 +288,11 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
this(initialNodeCount, true, Settings.EMPTY); this(initialNodeCount, true, Settings.EMPTY);
} }
Cluster(int initialNodeCount, boolean allNodesMasterEligible, Settings nodeSettings) { Cluster(int initialNodeCount, boolean allNodesclusterManagerEligible, Settings nodeSettings) {
this(initialNodeCount, allNodesMasterEligible, nodeSettings, () -> new StatusInfo(HEALTHY, "healthy-info")); this(initialNodeCount, allNodesclusterManagerEligible, nodeSettings, () -> new StatusInfo(HEALTHY, "healthy-info"));
} }
Cluster(int initialNodeCount, boolean allNodesMasterEligible, Settings nodeSettings, NodeHealthService nodeHealthService) { Cluster(int initialNodeCount, boolean allNodesClusterManagerEligible, Settings nodeSettings, NodeHealthService nodeHealthService) {
this.nodeHealthService = nodeHealthService; this.nodeHealthService = nodeHealthService;
bigArrays = usually() bigArrays = usually()
? BigArrays.NON_RECYCLING_INSTANCE ? BigArrays.NON_RECYCLING_INSTANCE
@ -301,29 +301,29 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
assertThat(initialNodeCount, greaterThan(0)); assertThat(initialNodeCount, greaterThan(0));
final Set<String> masterEligibleNodeIds = new HashSet<>(initialNodeCount); final Set<String> clusterManagerEligibleNodeIds = new HashSet<>(initialNodeCount);
clusterNodes = new ArrayList<>(initialNodeCount); clusterNodes = new ArrayList<>(initialNodeCount);
for (int i = 0; i < initialNodeCount; i++) { for (int i = 0; i < initialNodeCount; i++) {
final ClusterNode clusterNode = new ClusterNode( final ClusterNode clusterNode = new ClusterNode(
nextNodeIndex.getAndIncrement(), nextNodeIndex.getAndIncrement(),
allNodesMasterEligible || i == 0 || randomBoolean(), allNodesClusterManagerEligible || i == 0 || randomBoolean(),
nodeSettings, nodeSettings,
nodeHealthService nodeHealthService
); );
clusterNodes.add(clusterNode); clusterNodes.add(clusterNode);
if (clusterNode.getLocalNode().isMasterNode()) { if (clusterNode.getLocalNode().isMasterNode()) {
masterEligibleNodeIds.add(clusterNode.getId()); clusterManagerEligibleNodeIds.add(clusterNode.getId());
} }
} }
initialConfiguration = new VotingConfiguration( initialConfiguration = new VotingConfiguration(
new HashSet<>(randomSubsetOf(randomIntBetween(1, masterEligibleNodeIds.size()), masterEligibleNodeIds)) new HashSet<>(randomSubsetOf(randomIntBetween(1, clusterManagerEligibleNodeIds.size()), clusterManagerEligibleNodeIds))
); );
logger.info( logger.info(
"--> creating cluster of {} nodes (cluster-manager-eligible nodes: {}) with initial configuration {}", "--> creating cluster of {} nodes (cluster-manager-eligible nodes: {}) with initial configuration {}",
initialNodeCount, initialNodeCount,
masterEligibleNodeIds, clusterManagerEligibleNodeIds,
initialConfiguration initialConfiguration
); );
} }
@ -336,7 +336,7 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
addNodes(newNodesCount); addNodes(newNodesCount);
stabilise( stabilise(
// The first pinging discovers the master // The first pinging discovers the cluster-manager
defaultMillis(DISCOVERY_FIND_PEERS_INTERVAL_SETTING) defaultMillis(DISCOVERY_FIND_PEERS_INTERVAL_SETTING)
// One message delay to send a join // One message delay to send a join
+ DEFAULT_DELAY_VARIABILITY + DEFAULT_DELAY_VARIABILITY
@ -557,7 +557,7 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
final ClusterNode leader = getAnyLeader(); final ClusterNode leader = getAnyLeader();
final long leaderTerm = leader.coordinator.getCurrentTerm(); final long leaderTerm = leader.coordinator.getCurrentTerm();
final int pendingTaskCount = leader.masterService.getFakeMasterServicePendingTaskCount(); final int pendingTaskCount = leader.clusterManagerService.getFakeMasterServicePendingTaskCount();
runFor((pendingTaskCount + 1) * DEFAULT_CLUSTER_STATE_UPDATE_DELAY, "draining task queue"); runFor((pendingTaskCount + 1) * DEFAULT_CLUSTER_STATE_UPDATE_DELAY, "draining task queue");
final Matcher<Long> isEqualToLeaderVersion = equalTo(leader.coordinator.getLastAcceptedState().getVersion()); final Matcher<Long> isEqualToLeaderVersion = equalTo(leader.coordinator.getLastAcceptedState().getVersion());
@ -566,7 +566,7 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
assertTrue(leaderId + " has been bootstrapped", leader.coordinator.isInitialConfigurationSet()); assertTrue(leaderId + " has been bootstrapped", leader.coordinator.isInitialConfigurationSet());
assertTrue(leaderId + " exists in its last-applied state", leader.getLastAppliedClusterState().getNodes().nodeExists(leaderId)); assertTrue(leaderId + " exists in its last-applied state", leader.getLastAppliedClusterState().getNodes().nodeExists(leaderId));
assertThat( assertThat(
leaderId + " has no NO_MASTER_BLOCK", leaderId + " has no NO_CLUSTER_MANAGER_BLOCK",
leader.getLastAppliedClusterState().blocks().hasGlobalBlockWithId(NO_MASTER_BLOCK_ID), leader.getLastAppliedClusterState().blocks().hasGlobalBlockWithId(NO_MASTER_BLOCK_ID),
equalTo(false) equalTo(false)
); );
@ -616,12 +616,12 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
); );
assertTrue(nodeId + " has been bootstrapped", clusterNode.coordinator.isInitialConfigurationSet()); assertTrue(nodeId + " has been bootstrapped", clusterNode.coordinator.isInitialConfigurationSet());
assertThat( assertThat(
nodeId + " has correct master", nodeId + " has correct cluster-manager",
clusterNode.getLastAppliedClusterState().nodes().getMasterNode(), clusterNode.getLastAppliedClusterState().nodes().getMasterNode(),
equalTo(leader.getLocalNode()) equalTo(leader.getLocalNode())
); );
assertThat( assertThat(
nodeId + " has no NO_MASTER_BLOCK", nodeId + " has no NO_CLUSTER_MANAGER_BLOCK",
clusterNode.getLastAppliedClusterState().blocks().hasGlobalBlockWithId(NO_MASTER_BLOCK_ID), clusterNode.getLastAppliedClusterState().blocks().hasGlobalBlockWithId(NO_MASTER_BLOCK_ID),
equalTo(false) equalTo(false)
); );
@ -638,7 +638,7 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
nullValue() nullValue()
); );
assertThat( assertThat(
nodeId + " has NO_MASTER_BLOCK", nodeId + " has NO_CLUSTER_MANAGER_BLOCK",
clusterNode.getLastAppliedClusterState().blocks().hasGlobalBlockWithId(NO_MASTER_BLOCK_ID), clusterNode.getLastAppliedClusterState().blocks().hasGlobalBlockWithId(NO_MASTER_BLOCK_ID),
equalTo(true) equalTo(true)
); );
@ -898,7 +898,7 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
final long persistedCurrentTerm; final long persistedCurrentTerm;
if ( // node is master-ineligible either before or after the restart ... if ( // node is cluster-manager-ineligible either before or after the restart ...
(oldState.getLastAcceptedState().nodes().getLocalNode().isMasterNode() && newLocalNode.isMasterNode()) == false (oldState.getLastAcceptedState().nodes().getLocalNode().isMasterNode() && newLocalNode.isMasterNode()) == false
// ... and it's accepted some non-initial state so we can roll back ... // ... and it's accepted some non-initial state so we can roll back ...
&& (oldState.getLastAcceptedState().term() > 0L || oldState.getLastAcceptedState().version() > 0L) && (oldState.getLastAcceptedState().term() > 0L || oldState.getLastAcceptedState().version() > 0L)
@ -930,7 +930,7 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
final long newValue = randomLong(); final long newValue = randomLong();
logger.trace( logger.trace(
"rolling back persisted cluster state on master-ineligible node [{}]: " "rolling back persisted cluster state on cluster-manager-ineligible node [{}]: "
+ "previously currentTerm={}, lastAcceptedTerm={}, lastAcceptedVersion={} " + "previously currentTerm={}, lastAcceptedTerm={}, lastAcceptedVersion={} "
+ "but now currentTerm={}, lastAcceptedTerm={}, lastAcceptedVersion={}", + "but now currentTerm={}, lastAcceptedTerm={}, lastAcceptedVersion={}",
newLocalNode, newLocalNode,
@ -1025,7 +1025,7 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
private final DiscoveryNode localNode; private final DiscoveryNode localNode;
final MockPersistedState persistedState; final MockPersistedState persistedState;
final Settings nodeSettings; final Settings nodeSettings;
private AckedFakeThreadPoolMasterService masterService; private AckedFakeThreadPoolClusterManagerService clusterManagerService;
private DisruptableClusterApplierService clusterApplierService; private DisruptableClusterApplierService clusterApplierService;
private ClusterService clusterService; private ClusterService clusterService;
TransportService transportService; TransportService transportService;
@ -1033,10 +1033,10 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
private NodeHealthService nodeHealthService; private NodeHealthService nodeHealthService;
List<BiConsumer<DiscoveryNode, ClusterState>> extraJoinValidators = new ArrayList<>(); List<BiConsumer<DiscoveryNode, ClusterState>> extraJoinValidators = new ArrayList<>();
ClusterNode(int nodeIndex, boolean masterEligible, Settings nodeSettings, NodeHealthService nodeHealthService) { ClusterNode(int nodeIndex, boolean clusterManagerEligible, Settings nodeSettings, NodeHealthService nodeHealthService) {
this( this(
nodeIndex, nodeIndex,
createDiscoveryNode(nodeIndex, masterEligible), createDiscoveryNode(nodeIndex, clusterManagerEligible),
defaultPersistedStateSupplier, defaultPersistedStateSupplier,
nodeSettings, nodeSettings,
nodeHealthService nodeHealthService
@ -1105,7 +1105,7 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
null, null,
emptySet() emptySet()
); );
masterService = new AckedFakeThreadPoolMasterService( clusterManagerService = new AckedFakeThreadPoolClusterManagerService(
localNode.getId(), localNode.getId(),
"test", "test",
threadPool, threadPool,
@ -1119,7 +1119,7 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
deterministicTaskQueue, deterministicTaskQueue,
threadPool threadPool
); );
clusterService = new ClusterService(settings, clusterSettings, masterService, clusterApplierService); clusterService = new ClusterService(settings, clusterSettings, clusterManagerService, clusterApplierService);
clusterService.setNodeConnectionsService( clusterService.setNodeConnectionsService(
new NodeConnectionsService(clusterService.getSettings(), threadPool, transportService) new NodeConnectionsService(clusterService.getSettings(), threadPool, transportService)
); );
@ -1134,7 +1134,7 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
transportService, transportService,
writableRegistry(), writableRegistry(),
allocationService, allocationService,
masterService, clusterManagerService,
this::getPersistedState, this::getPersistedState,
Cluster.this::provideSeedHosts, Cluster.this::provideSeedHosts,
clusterApplierService, clusterApplierService,
@ -1144,7 +1144,7 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
getElectionStrategy(), getElectionStrategy(),
nodeHealthService nodeHealthService
); );
masterService.setClusterStatePublisher(coordinator); clusterManagerService.setClusterStatePublisher(coordinator);
final GatewayService gatewayService = new GatewayService( final GatewayService gatewayService = new GatewayService(
settings, settings,
allocationService, allocationService,
@ -1261,7 +1261,7 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
void submitSetAutoShrinkVotingConfiguration(final boolean autoShrinkVotingConfiguration) { void submitSetAutoShrinkVotingConfiguration(final boolean autoShrinkVotingConfiguration) {
submitUpdateTask( submitUpdateTask(
"set master nodes failure tolerance [" + autoShrinkVotingConfiguration + "]", "set cluster-manager nodes failure tolerance [" + autoShrinkVotingConfiguration + "]",
cs -> ClusterState.builder(cs) cs -> ClusterState.builder(cs)
.metadata( .metadata(
Metadata.builder(cs.metadata()) Metadata.builder(cs.metadata())
@ -1331,11 +1331,11 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
onNode(() -> { onNode(() -> {
logger.trace("[{}] submitUpdateTask: enqueueing [{}]", localNode.getId(), source); logger.trace("[{}] submitUpdateTask: enqueueing [{}]", localNode.getId(), source);
final long submittedTerm = coordinator.getCurrentTerm(); final long submittedTerm = coordinator.getCurrentTerm();
masterService.submitStateUpdateTask(source, new ClusterStateUpdateTask() { clusterManagerService.submitStateUpdateTask(source, new ClusterStateUpdateTask() {
@Override @Override
public ClusterState execute(ClusterState currentState) { public ClusterState execute(ClusterState currentState) {
assertThat(currentState.term(), greaterThanOrEqualTo(submittedTerm)); assertThat(currentState.term(), greaterThanOrEqualTo(submittedTerm));
masterService.nextAckCollector = ackCollector; clusterManagerService.nextAckCollector = ackCollector;
return clusterStateUpdate.apply(currentState); return clusterStateUpdate.apply(currentState);
} }
@ -1347,7 +1347,7 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
@Override @Override
public void onNoLongerMaster(String source) { public void onNoLongerMaster(String source) {
logger.trace("no longer master: [{}]", source); logger.trace("no longer cluster-manager: [{}]", source);
taskListener.onNoLongerMaster(source); taskListener.onNoLongerMaster(source);
} }
@ -1510,11 +1510,11 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
} }
} }
static class AckedFakeThreadPoolMasterService extends FakeThreadPoolMasterService { static class AckedFakeThreadPoolClusterManagerService extends FakeThreadPoolMasterService {
AckCollector nextAckCollector = new AckCollector(); AckCollector nextAckCollector = new AckCollector();
AckedFakeThreadPoolMasterService( AckedFakeThreadPoolClusterManagerService(
String nodeName, String nodeName,
String serviceName, String serviceName,
ThreadPool threadPool, ThreadPool threadPool,
@ -1610,7 +1610,7 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
} }
} }
protected DiscoveryNode createDiscoveryNode(int nodeIndex, boolean masterEligible) { protected DiscoveryNode createDiscoveryNode(int nodeIndex, boolean clusterManagerEligible) {
final TransportAddress address = buildNewFakeTransportAddress(); final TransportAddress address = buildNewFakeTransportAddress();
return new DiscoveryNode( return new DiscoveryNode(
"", "",
@ -1620,7 +1620,7 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
address.getAddress(), address.getAddress(),
address, address,
Collections.emptyMap(), Collections.emptyMap(),
masterEligible ? DiscoveryNodeRole.BUILT_IN_ROLES : emptySet(), clusterManagerEligible ? DiscoveryNodeRole.BUILT_IN_ROLES : emptySet(),
Version.CURRENT Version.CURRENT
); );
} }

View File

@ -285,11 +285,11 @@ public class CoordinationStateTestCluster {
} else if (rarely() && rarely()) { } else if (rarely() && rarely()) {
randomFrom(clusterNodes).reboot(); randomFrom(clusterNodes).reboot();
} else if (rarely()) { } else if (rarely()) {
final List<ClusterNode> masterNodes = clusterNodes.stream() final List<ClusterNode> clusterManangerNodes = clusterNodes.stream()
.filter(cn -> cn.state.electionWon()) .filter(cn -> cn.state.electionWon())
.collect(Collectors.toList()); .collect(Collectors.toList());
if (masterNodes.isEmpty() == false) { if (clusterManangerNodes.isEmpty() == false) {
final ClusterNode clusterNode = randomFrom(masterNodes); final ClusterNode clusterNode = randomFrom(clusterManangerNodes);
final long term = rarely() ? randomLongBetween(0, maxTerm + 1) : clusterNode.state.getCurrentTerm(); final long term = rarely() ? randomLongBetween(0, maxTerm + 1) : clusterNode.state.getCurrentTerm();
final long version = rarely() ? randomIntBetween(0, 5) : clusterNode.state.getLastPublishedVersion() + 1; final long version = rarely() ? randomIntBetween(0, 5) : clusterNode.state.getLastPublishedVersion() + 1;
final CoordinationMetadata.VotingConfiguration acceptedConfig = rarely() final CoordinationMetadata.VotingConfiguration acceptedConfig = rarely()
@ -323,13 +323,15 @@ public class CoordinationStateTestCluster {
} }
void invariant() { void invariant() {
// one master per term // one cluster-manager per term
messages.stream() messages.stream()
.filter(m -> m.payload instanceof PublishRequest) .filter(m -> m.payload instanceof PublishRequest)
.collect(Collectors.groupingBy(m -> ((PublishRequest) m.payload).getAcceptedState().term())) .collect(Collectors.groupingBy(m -> ((PublishRequest) m.payload).getAcceptedState().term()))
.forEach((term, publishMessages) -> { .forEach((term, publishMessages) -> {
Set<DiscoveryNode> mastersForTerm = publishMessages.stream().collect(Collectors.groupingBy(m -> m.sourceNode)).keySet(); Set<DiscoveryNode> clusterManagersForTerm = publishMessages.stream()
assertThat("Multiple masters " + mastersForTerm + " for term " + term, mastersForTerm, hasSize(1)); .collect(Collectors.groupingBy(m -> m.sourceNode))
.keySet();
assertThat("Multiple cluster-managers " + clusterManagersForTerm + " for term " + term, clusterManagersForTerm, hasSize(1));
}); });
// unique cluster state per (term, version) pair // unique cluster state per (term, version) pair

View File

@ -116,7 +116,7 @@ public class FakeThreadPoolMasterService extends MasterService {
onTaskAvailableToRun.accept(new Runnable() { onTaskAvailableToRun.accept(new Runnable() {
@Override @Override
public String toString() { public String toString() {
return "master service scheduling next task"; return "cluster-manager service scheduling next task";
} }
@Override @Override
@ -125,7 +125,7 @@ public class FakeThreadPoolMasterService extends MasterService {
assert waitForPublish == false; assert waitForPublish == false;
assert scheduledNextTask; assert scheduledNextTask;
final int taskIndex = randomInt(pendingTasks.size() - 1); final int taskIndex = randomInt(pendingTasks.size() - 1);
logger.debug("next master service task: choosing task {} of {}", taskIndex, pendingTasks.size()); logger.debug("next cluster-manager service task: choosing task {} of {}", taskIndex, pendingTasks.size());
final Runnable task = pendingTasks.remove(taskIndex); final Runnable task = pendingTasks.remove(taskIndex);
taskInProgress = true; taskInProgress = true;
scheduledNextTask = false; scheduledNextTask = false;

View File

@ -66,21 +66,22 @@ public class MockBigArrays extends BigArrays {
private static final ConcurrentMap<Object, Object> ACQUIRED_ARRAYS = new ConcurrentHashMap<>(); private static final ConcurrentMap<Object, Object> ACQUIRED_ARRAYS = new ConcurrentHashMap<>();
public static void ensureAllArraysAreReleased() throws Exception { public static void ensureAllArraysAreReleased() throws Exception {
final Map<Object, Object> masterCopy = new HashMap<>(ACQUIRED_ARRAYS); final Map<Object, Object> clusterManagerCopy = new HashMap<>(ACQUIRED_ARRAYS);
if (!masterCopy.isEmpty()) { if (!clusterManagerCopy.isEmpty()) {
// not empty, we might be executing on a shared cluster that keeps on obtaining // not empty, we might be executing on a shared cluster that keeps on obtaining
// and releasing arrays, lets make sure that after a reasonable timeout, all master // and releasing arrays, lets make sure that after a reasonable timeout, all cluster-manager
// copy (snapshot) have been released // copy (snapshot) have been released
try { try {
assertBusy(() -> assertTrue(Sets.haveEmptyIntersection(masterCopy.keySet(), ACQUIRED_ARRAYS.keySet()))); assertBusy(() -> assertTrue(Sets.haveEmptyIntersection(clusterManagerCopy.keySet(), ACQUIRED_ARRAYS.keySet())));
} catch (AssertionError ex) { } catch (AssertionError ex) {
masterCopy.keySet().retainAll(ACQUIRED_ARRAYS.keySet()); clusterManagerCopy.keySet().retainAll(ACQUIRED_ARRAYS.keySet());
ACQUIRED_ARRAYS.keySet().removeAll(masterCopy.keySet()); // remove all existing master copy we will report on // remove all existing cluster-manager copy we will report on
if (!masterCopy.isEmpty()) { ACQUIRED_ARRAYS.keySet().removeAll(clusterManagerCopy.keySet());
Iterator<Object> causes = masterCopy.values().iterator(); if (!clusterManagerCopy.isEmpty()) {
Iterator<Object> causes = clusterManagerCopy.values().iterator();
Object firstCause = causes.next(); Object firstCause = causes.next();
RuntimeException exception = new RuntimeException( RuntimeException exception = new RuntimeException(
masterCopy.size() + " arrays have not been released", clusterManagerCopy.size() + " arrays have not been released",
firstCause instanceof Throwable ? (Throwable) firstCause : null firstCause instanceof Throwable ? (Throwable) firstCause : null
); );
while (causes.hasNext()) { while (causes.hasNext()) {

View File

@ -53,19 +53,23 @@ public class MockPageCacheRecycler extends PageCacheRecycler {
private static final ConcurrentMap<Object, Throwable> ACQUIRED_PAGES = new ConcurrentHashMap<>(); private static final ConcurrentMap<Object, Throwable> ACQUIRED_PAGES = new ConcurrentHashMap<>();
public static void ensureAllPagesAreReleased() throws Exception { public static void ensureAllPagesAreReleased() throws Exception {
final Map<Object, Throwable> masterCopy = new HashMap<>(ACQUIRED_PAGES); final Map<Object, Throwable> clusterManagerCopy = new HashMap<>(ACQUIRED_PAGES);
if (!masterCopy.isEmpty()) { if (!clusterManagerCopy.isEmpty()) {
// not empty, we might be executing on a shared cluster that keeps on obtaining // not empty, we might be executing on a shared cluster that keeps on obtaining
// and releasing pages, lets make sure that after a reasonable timeout, all master // and releasing pages, lets make sure that after a reasonable timeout, all cluster-manager
// copy (snapshot) have been released // copy (snapshot) have been released
final boolean success = waitUntil(() -> Sets.haveEmptyIntersection(masterCopy.keySet(), ACQUIRED_PAGES.keySet())); final boolean success = waitUntil(() -> Sets.haveEmptyIntersection(clusterManagerCopy.keySet(), ACQUIRED_PAGES.keySet()));
if (!success) { if (!success) {
masterCopy.keySet().retainAll(ACQUIRED_PAGES.keySet()); clusterManagerCopy.keySet().retainAll(ACQUIRED_PAGES.keySet());
ACQUIRED_PAGES.keySet().removeAll(masterCopy.keySet()); // remove all existing master copy we will report on // remove all existing cluster-manager copy we will report on
if (!masterCopy.isEmpty()) { ACQUIRED_PAGES.keySet().removeAll(clusterManagerCopy.keySet());
Iterator<Throwable> causes = masterCopy.values().iterator(); if (!clusterManagerCopy.isEmpty()) {
Iterator<Throwable> causes = clusterManagerCopy.values().iterator();
Throwable firstCause = causes.next(); Throwable firstCause = causes.next();
RuntimeException exception = new RuntimeException(masterCopy.size() + " pages have not been released", firstCause); RuntimeException exception = new RuntimeException(
clusterManagerCopy.size() + " pages have not been released",
firstCause
);
while (causes.hasNext()) { while (causes.hasNext()) {
exception.addSuppressed(causes.next()); exception.addSuppressed(causes.next());
} }

View File

@ -410,7 +410,7 @@ public final class BlobStoreTestUtil {
final ClusterService clusterService = mock(ClusterService.class); final ClusterService clusterService = mock(ClusterService.class);
final ClusterApplierService clusterApplierService = mock(ClusterApplierService.class); final ClusterApplierService clusterApplierService = mock(ClusterApplierService.class);
when(clusterService.getClusterApplierService()).thenReturn(clusterApplierService); when(clusterService.getClusterApplierService()).thenReturn(clusterApplierService);
// Setting local node as master so it may update the repository metadata in the cluster state // Setting local node as cluster-manager so it may update the repository metadata in the cluster state
final DiscoveryNode localNode = new DiscoveryNode("", buildNewFakeTransportAddress(), Version.CURRENT); final DiscoveryNode localNode = new DiscoveryNode("", buildNewFakeTransportAddress(), Version.CURRENT);
final AtomicReference<ClusterState> currentState = new AtomicReference<>( final AtomicReference<ClusterState> currentState = new AtomicReference<>(
ClusterState.builder(initialState) ClusterState.builder(initialState)

View File

@ -252,30 +252,30 @@ public abstract class AbstractSnapshotIntegTestCase extends OpenSearchIntegTestC
} }
public static String blockMasterFromFinalizingSnapshotOnIndexFile(final String repositoryName) { public static String blockMasterFromFinalizingSnapshotOnIndexFile(final String repositoryName) {
final String masterName = internalCluster().getMasterName(); final String clusterManagerName = internalCluster().getMasterName();
((MockRepository) internalCluster().getInstance(RepositoriesService.class, masterName).repository(repositoryName)) ((MockRepository) internalCluster().getInstance(RepositoriesService.class, clusterManagerName).repository(repositoryName))
.setBlockAndFailOnWriteIndexFile(); .setBlockAndFailOnWriteIndexFile();
return masterName; return clusterManagerName;
} }
public static String blockMasterOnWriteIndexFile(final String repositoryName) { public static String blockMasterOnWriteIndexFile(final String repositoryName) {
final String masterName = internalCluster().getMasterName(); final String clusterManagerName = internalCluster().getMasterName();
((MockRepository) internalCluster().getMasterNodeInstance(RepositoriesService.class).repository(repositoryName)) ((MockRepository) internalCluster().getMasterNodeInstance(RepositoriesService.class).repository(repositoryName))
.setBlockOnWriteIndexFile(); .setBlockOnWriteIndexFile();
return masterName; return clusterManagerName;
} }
public static void blockMasterFromDeletingIndexNFile(String repositoryName) { public static void blockMasterFromDeletingIndexNFile(String repositoryName) {
final String masterName = internalCluster().getMasterName(); final String clusterManagerName = internalCluster().getMasterName();
((MockRepository) internalCluster().getInstance(RepositoriesService.class, masterName).repository(repositoryName)) ((MockRepository) internalCluster().getInstance(RepositoriesService.class, clusterManagerName).repository(repositoryName))
.setBlockOnDeleteIndexFile(); .setBlockOnDeleteIndexFile();
} }
public static String blockMasterFromFinalizingSnapshotOnSnapFile(final String repositoryName) { public static String blockMasterFromFinalizingSnapshotOnSnapFile(final String repositoryName) {
final String masterName = internalCluster().getMasterName(); final String clusterManagerName = internalCluster().getMasterName();
((MockRepository) internalCluster().getInstance(RepositoriesService.class, masterName).repository(repositoryName)) ((MockRepository) internalCluster().getInstance(RepositoriesService.class, clusterManagerName).repository(repositoryName))
.setBlockAndFailOnWriteSnapFiles(true); .setBlockAndFailOnWriteSnapFiles(true);
return masterName; return clusterManagerName;
} }
public static String blockNodeWithIndex(final String repositoryName, final String indexName) { public static String blockNodeWithIndex(final String repositoryName, final String indexName) {
@ -628,10 +628,10 @@ public abstract class AbstractSnapshotIntegTestCase extends OpenSearchIntegTestC
} }
protected void awaitMasterFinishRepoOperations() throws Exception { protected void awaitMasterFinishRepoOperations() throws Exception {
logger.info("--> waiting for master to finish all repo operations on its SNAPSHOT pool"); logger.info("--> waiting for cluster-manager to finish all repo operations on its SNAPSHOT pool");
final ThreadPool masterThreadPool = internalCluster().getMasterNodeInstance(ThreadPool.class); final ThreadPool clusterManagerThreadPool = internalCluster().getMasterNodeInstance(ThreadPool.class);
assertBusy(() -> { assertBusy(() -> {
for (ThreadPoolStats.Stats stat : masterThreadPool.stats()) { for (ThreadPoolStats.Stats stat : clusterManagerThreadPool.stats()) {
if (ThreadPool.Names.SNAPSHOT.equals(stat.getName())) { if (ThreadPool.Names.SNAPSHOT.equals(stat.getName())) {
assertEquals(stat.getActive(), 0); assertEquals(stat.getActive(), 0);
break; break;

View File

@ -143,7 +143,7 @@ public class MockRepository extends FsRepository {
*/ */
private volatile boolean blockOnWriteIndexFile; private volatile boolean blockOnWriteIndexFile;
/** Allows blocking on writing the snapshot file at the end of snapshot creation to simulate a died master node */ /** Allows blocking on writing the snapshot file at the end of snapshot creation to simulate a died cluster-manager node */
private volatile boolean blockAndFailOnWriteSnapFile; private volatile boolean blockAndFailOnWriteSnapFile;
private volatile boolean blockOnWriteShardLevelMeta; private volatile boolean blockOnWriteShardLevelMeta;
@ -191,7 +191,7 @@ public class MockRepository extends FsRepository {
} }
private static RepositoryMetadata overrideSettings(RepositoryMetadata metadata, Environment environment) { private static RepositoryMetadata overrideSettings(RepositoryMetadata metadata, Environment environment) {
// TODO: use another method of testing not being able to read the test file written by the master... // TODO: use another method of testing not being able to read the test file written by the cluster-manager...
// this is super duper hacky // this is super duper hacky
if (metadata.settings().getAsBoolean("localize_location", false)) { if (metadata.settings().getAsBoolean("localize_location", false)) {
Path location = PathUtils.get(metadata.settings().get("location")); Path location = PathUtils.get(metadata.settings().get("location"));

View File

@ -248,10 +248,10 @@ public abstract class AbstractBuilderTestCase extends OpenSearchTestCase {
assert serviceHolderWithNoType == null; assert serviceHolderWithNoType == null;
// we initialize the serviceHolder and serviceHolderWithNoType just once, but need some // we initialize the serviceHolder and serviceHolderWithNoType just once, but need some
// calls to the randomness source during its setup. In order to not mix these calls with // calls to the randomness source during its setup. In order to not mix these calls with
// the randomness source that is later used in the test method, we use the master seed during // the randomness source that is later used in the test method, we use the cluster-manager seed during
// this setup // this setup
long masterSeed = SeedUtils.parseSeed(RandomizedTest.getContext().getRunnerSeedAsString()); long clusterManagerSeed = SeedUtils.parseSeed(RandomizedTest.getContext().getRunnerSeedAsString());
RandomizedTest.getContext().runWithPrivateRandomness(masterSeed, (Callable<Void>) () -> { RandomizedTest.getContext().runWithPrivateRandomness(clusterManagerSeed, (Callable<Void>) () -> {
serviceHolder = new ServiceHolder( serviceHolder = new ServiceHolder(
nodeSettings, nodeSettings,
createTestIndexSettings(), createTestIndexSettings(),

View File

@ -62,19 +62,19 @@ import static junit.framework.TestCase.fail;
public class ClusterServiceUtils { public class ClusterServiceUtils {
public static MasterService createMasterService(ThreadPool threadPool, ClusterState initialClusterState) { public static MasterService createMasterService(ThreadPool threadPool, ClusterState initialClusterState) {
MasterService masterService = new MasterService( MasterService clusterManagerService = new MasterService(
Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test_master_node").build(), Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test_cluster_manager_node").build(),
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS),
threadPool threadPool
); );
AtomicReference<ClusterState> clusterStateRef = new AtomicReference<>(initialClusterState); AtomicReference<ClusterState> clusterStateRef = new AtomicReference<>(initialClusterState);
masterService.setClusterStatePublisher((event, publishListener, ackListener) -> { clusterManagerService.setClusterStatePublisher((event, publishListener, ackListener) -> {
clusterStateRef.set(event.state()); clusterStateRef.set(event.state());
publishListener.onResponse(null); publishListener.onResponse(null);
}); });
masterService.setClusterStateSupplier(clusterStateRef::get); clusterManagerService.setClusterStateSupplier(clusterStateRef::get);
masterService.start(); clusterManagerService.start();
return masterService; return clusterManagerService;
} }
public static MasterService createMasterService(ThreadPool threadPool, DiscoveryNode localNode) { public static MasterService createMasterService(ThreadPool threadPool, DiscoveryNode localNode) {

View File

@ -90,7 +90,7 @@ public final class ExternalTestCluster extends TestCluster {
private final String clusterName; private final String clusterName;
private final int numDataNodes; private final int numDataNodes;
private final int numMasterAndDataNodes; private final int numClusterManagerAndDataNodes;
public ExternalTestCluster( public ExternalTestCluster(
Path tempDir, Path tempDir,
@ -141,19 +141,19 @@ public final class ExternalTestCluster extends TestCluster {
.get(); .get();
httpAddresses = new InetSocketAddress[nodeInfos.getNodes().size()]; httpAddresses = new InetSocketAddress[nodeInfos.getNodes().size()];
int dataNodes = 0; int dataNodes = 0;
int masterAndDataNodes = 0; int clusterManagerAndDataNodes = 0;
for (int i = 0; i < nodeInfos.getNodes().size(); i++) { for (int i = 0; i < nodeInfos.getNodes().size(); i++) {
NodeInfo nodeInfo = nodeInfos.getNodes().get(i); NodeInfo nodeInfo = nodeInfos.getNodes().get(i);
httpAddresses[i] = nodeInfo.getInfo(HttpInfo.class).address().publishAddress().address(); httpAddresses[i] = nodeInfo.getInfo(HttpInfo.class).address().publishAddress().address();
if (DiscoveryNode.isDataNode(nodeInfo.getSettings())) { if (DiscoveryNode.isDataNode(nodeInfo.getSettings())) {
dataNodes++; dataNodes++;
masterAndDataNodes++; clusterManagerAndDataNodes++;
} else if (DiscoveryNode.isMasterNode(nodeInfo.getSettings())) { } else if (DiscoveryNode.isMasterNode(nodeInfo.getSettings())) {
masterAndDataNodes++; clusterManagerAndDataNodes++;
} }
} }
this.numDataNodes = dataNodes; this.numDataNodes = dataNodes;
this.numMasterAndDataNodes = masterAndDataNodes; this.numClusterManagerAndDataNodes = clusterManagerAndDataNodes;
this.client = client; this.client = client;
this.node = node; this.node = node;
@ -191,7 +191,7 @@ public final class ExternalTestCluster extends TestCluster {
@Override @Override
public int numDataAndMasterNodes() { public int numDataAndMasterNodes() {
return numMasterAndDataNodes; return numClusterManagerAndDataNodes;
} }
@Override @Override

View File

@ -166,7 +166,6 @@ import static org.opensearch.discovery.FileBasedSeedHostsProvider.UNICAST_HOSTS_
import static org.opensearch.test.OpenSearchTestCase.assertBusy; import static org.opensearch.test.OpenSearchTestCase.assertBusy;
import static org.opensearch.test.OpenSearchTestCase.randomFrom; import static org.opensearch.test.OpenSearchTestCase.randomFrom;
import static org.opensearch.test.NodeRoles.dataOnlyNode; import static org.opensearch.test.NodeRoles.dataOnlyNode;
import static org.opensearch.test.NodeRoles.masterOnlyNode;
import static org.opensearch.test.NodeRoles.noRoles; import static org.opensearch.test.NodeRoles.noRoles;
import static org.opensearch.test.NodeRoles.onlyRole; import static org.opensearch.test.NodeRoles.onlyRole;
import static org.opensearch.test.NodeRoles.removeRoles; import static org.opensearch.test.NodeRoles.removeRoles;
@ -200,11 +199,11 @@ public final class InternalTestCluster extends TestCluster {
nodeAndClient.node.settings() nodeAndClient.node.settings()
); );
private static final Predicate<NodeAndClient> NO_DATA_NO_MASTER_PREDICATE = nodeAndClient -> DiscoveryNode.isMasterNode( private static final Predicate<NodeAndClient> NO_DATA_NO_CLUSTER_MANAGER_PREDICATE = nodeAndClient -> DiscoveryNode.isMasterNode(
nodeAndClient.node.settings() nodeAndClient.node.settings()
) == false && DiscoveryNode.isDataNode(nodeAndClient.node.settings()) == false; ) == false && DiscoveryNode.isDataNode(nodeAndClient.node.settings()) == false;
private static final Predicate<NodeAndClient> MASTER_NODE_PREDICATE = nodeAndClient -> DiscoveryNode.isMasterNode( private static final Predicate<NodeAndClient> CLUSTER_MANAGER_NODE_PREDICATE = nodeAndClient -> DiscoveryNode.isMasterNode(
nodeAndClient.node.settings() nodeAndClient.node.settings()
); );
@ -238,8 +237,8 @@ public final class InternalTestCluster extends TestCluster {
* fully shared cluster to be more reproducible */ * fully shared cluster to be more reproducible */
private final long[] sharedNodesSeeds; private final long[] sharedNodesSeeds;
// if set to 0, data nodes will also assume the master role // if set to 0, data nodes will also assume the cluster-manager role
private final int numSharedDedicatedMasterNodes; private final int numSharedDedicatedClusterManagerNodes;
private final int numSharedDataNodes; private final int numSharedDataNodes;
@ -249,7 +248,7 @@ public final class InternalTestCluster extends TestCluster {
private final ExecutorService executor; private final ExecutorService executor;
private final boolean autoManageMasterNodes; private final boolean autoManageClusterManagerNodes;
private final Collection<Class<? extends Plugin>> mockPlugins; private final Collection<Class<? extends Plugin>> mockPlugins;
@ -266,13 +265,13 @@ public final class InternalTestCluster extends TestCluster {
private ServiceDisruptionScheme activeDisruptionScheme; private ServiceDisruptionScheme activeDisruptionScheme;
private final Function<Client, Client> clientWrapper; private final Function<Client, Client> clientWrapper;
private int bootstrapMasterNodeIndex = -1; private int bootstrapClusterManagerNodeIndex = -1;
public InternalTestCluster( public InternalTestCluster(
final long clusterSeed, final long clusterSeed,
final Path baseDir, final Path baseDir,
final boolean randomlyAddDedicatedMasters, final boolean randomlyAddDedicatedClusterManagers,
final boolean autoManageMasterNodes, final boolean autoManageClusterManagerNodes,
final int minNumDataNodes, final int minNumDataNodes,
final int maxNumDataNodes, final int maxNumDataNodes,
final String clusterName, final String clusterName,
@ -285,8 +284,8 @@ public final class InternalTestCluster extends TestCluster {
this( this(
clusterSeed, clusterSeed,
baseDir, baseDir,
randomlyAddDedicatedMasters, randomlyAddDedicatedClusterManagers,
autoManageMasterNodes, autoManageClusterManagerNodes,
minNumDataNodes, minNumDataNodes,
maxNumDataNodes, maxNumDataNodes,
clusterName, clusterName,
@ -302,8 +301,8 @@ public final class InternalTestCluster extends TestCluster {
public InternalTestCluster( public InternalTestCluster(
final long clusterSeed, final long clusterSeed,
final Path baseDir, final Path baseDir,
final boolean randomlyAddDedicatedMasters, final boolean randomlyAddDedicatedClusterManagers,
final boolean autoManageMasterNodes, final boolean autoManageClusterManagerNodes,
final int minNumDataNodes, final int minNumDataNodes,
final int maxNumDataNodes, final int maxNumDataNodes,
final String clusterName, final String clusterName,
@ -315,7 +314,7 @@ public final class InternalTestCluster extends TestCluster {
final boolean forbidPrivateIndexSettings final boolean forbidPrivateIndexSettings
) { ) {
super(clusterSeed); super(clusterSeed);
this.autoManageMasterNodes = autoManageMasterNodes; this.autoManageClusterManagerNodes = autoManageClusterManagerNodes;
this.clientWrapper = clientWrapper; this.clientWrapper = clientWrapper;
this.forbidPrivateIndexSettings = forbidPrivateIndexSettings; this.forbidPrivateIndexSettings = forbidPrivateIndexSettings;
this.baseDir = baseDir; this.baseDir = baseDir;
@ -330,24 +329,24 @@ public final class InternalTestCluster extends TestCluster {
Random random = new Random(clusterSeed); Random random = new Random(clusterSeed);
boolean useDedicatedMasterNodes = randomlyAddDedicatedMasters ? random.nextBoolean() : false; boolean useDedicatedClusterManagerNodes = randomlyAddDedicatedClusterManagers ? random.nextBoolean() : false;
this.numSharedDataNodes = RandomNumbers.randomIntBetween(random, minNumDataNodes, maxNumDataNodes); this.numSharedDataNodes = RandomNumbers.randomIntBetween(random, minNumDataNodes, maxNumDataNodes);
assert this.numSharedDataNodes >= 0; assert this.numSharedDataNodes >= 0;
if (numSharedDataNodes == 0) { if (numSharedDataNodes == 0) {
this.numSharedCoordOnlyNodes = 0; this.numSharedCoordOnlyNodes = 0;
this.numSharedDedicatedMasterNodes = 0; this.numSharedDedicatedClusterManagerNodes = 0;
} else { } else {
if (useDedicatedMasterNodes) { if (useDedicatedClusterManagerNodes) {
if (random.nextBoolean()) { if (random.nextBoolean()) {
// use a dedicated master, but only low number to reduce overhead to tests // use a dedicated cluster-manager, but only low number to reduce overhead to tests
this.numSharedDedicatedMasterNodes = DEFAULT_LOW_NUM_MASTER_NODES; this.numSharedDedicatedClusterManagerNodes = DEFAULT_LOW_NUM_MASTER_NODES;
} else { } else {
this.numSharedDedicatedMasterNodes = DEFAULT_HIGH_NUM_MASTER_NODES; this.numSharedDedicatedClusterManagerNodes = DEFAULT_HIGH_NUM_MASTER_NODES;
} }
} else { } else {
this.numSharedDedicatedMasterNodes = 0; this.numSharedDedicatedClusterManagerNodes = 0;
} }
if (numClientNodes < 0) { if (numClientNodes < 0) {
this.numSharedCoordOnlyNodes = RandomNumbers.randomIntBetween( this.numSharedCoordOnlyNodes = RandomNumbers.randomIntBetween(
@ -367,20 +366,20 @@ public final class InternalTestCluster extends TestCluster {
this.mockPlugins = mockPlugins; this.mockPlugins = mockPlugins;
sharedNodesSeeds = new long[numSharedDedicatedMasterNodes + numSharedDataNodes + numSharedCoordOnlyNodes]; sharedNodesSeeds = new long[numSharedDedicatedClusterManagerNodes + numSharedDataNodes + numSharedCoordOnlyNodes];
for (int i = 0; i < sharedNodesSeeds.length; i++) { for (int i = 0; i < sharedNodesSeeds.length; i++) {
sharedNodesSeeds[i] = random.nextLong(); sharedNodesSeeds[i] = random.nextLong();
} }
logger.info( logger.info(
"Setup InternalTestCluster [{}] with seed [{}] using [{}] dedicated masters, " "Setup InternalTestCluster [{}] with seed [{}] using [{}] dedicated cluster-managers, "
+ "[{}] (data) nodes and [{}] coord only nodes (min_master_nodes are [{}])", + "[{}] (data) nodes and [{}] coord only nodes (min_cluster_manager_nodes are [{}])",
clusterName, clusterName,
SeedUtils.formatSeed(clusterSeed), SeedUtils.formatSeed(clusterSeed),
numSharedDedicatedMasterNodes, numSharedDedicatedClusterManagerNodes,
numSharedDataNodes, numSharedDataNodes,
numSharedCoordOnlyNodes, numSharedCoordOnlyNodes,
autoManageMasterNodes ? "auto-managed" : "manual" autoManageClusterManagerNodes ? "auto-managed" : "manual"
); );
this.nodeConfigurationSource = nodeConfigurationSource; this.nodeConfigurationSource = nodeConfigurationSource;
numDataPaths = random.nextInt(5) == 0 ? 2 + random.nextInt(3) : 1; numDataPaths = random.nextInt(5) == 0 ? 2 + random.nextInt(3) : 1;
@ -433,8 +432,8 @@ public final class InternalTestCluster extends TestCluster {
RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_OPERATIONS_SETTING.getKey(), RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_OPERATIONS_SETTING.getKey(),
RandomNumbers.randomIntBetween(random, 1, 4) RandomNumbers.randomIntBetween(random, 1, 4)
); );
// TODO: currently we only randomize "cluster.no_master_block" between "write" and "metadata_write", as "all" is fragile // TODO: currently we only randomize "cluster.no_cluster_manager_block" between "write" and "metadata_write", as "all" is fragile
// and fails shards when a master abdicates, which breaks many tests. // and fails shards when a cluster-manager abdicates, which breaks many tests.
builder.put(NoMasterBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), randomFrom(random, "write", "metadata_write")); builder.put(NoMasterBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), randomFrom(random, "write", "metadata_write"));
defaultSettings = builder.build(); defaultSettings = builder.build();
executor = OpenSearchExecutors.newScaling( executor = OpenSearchExecutors.newScaling(
@ -449,14 +448,15 @@ public final class InternalTestCluster extends TestCluster {
} }
/** /**
* Sets {@link #bootstrapMasterNodeIndex} to the given value, see {@link #bootstrapMasterNodeWithSpecifiedIndex(List)} * Sets {@link #bootstrapClusterManagerNodeIndex} to the given value, see {@link #bootstrapClusterManagerNodeWithSpecifiedIndex(List)}
* for the description of how this field is used. * for the description of how this field is used.
* It's only possible to change {@link #bootstrapMasterNodeIndex} value if autoManageMasterNodes is false. * It's only possible to change {@link #bootstrapClusterManagerNodeIndex} value if autoManageClusterManagerNodes is false.
*/ */
public void setBootstrapMasterNodeIndex(int bootstrapMasterNodeIndex) { public void setBootstrapClusterManagerNodeIndex(int bootstrapClusterManagerNodeIndex) {
assert autoManageMasterNodes == false || bootstrapMasterNodeIndex == -1 assert autoManageClusterManagerNodes == false || bootstrapClusterManagerNodeIndex == -1
: "bootstrapMasterNodeIndex should be -1 if autoManageMasterNodes is true, but was " + bootstrapMasterNodeIndex; : "bootstrapClusterManagerNodeIndex should be -1 if autoManageClusterManagerNodes is true, but was "
this.bootstrapMasterNodeIndex = bootstrapMasterNodeIndex; + bootstrapClusterManagerNodeIndex;
this.bootstrapClusterManagerNodeIndex = bootstrapClusterManagerNodeIndex;
} }
@Override @Override
@ -648,7 +648,7 @@ public final class InternalTestCluster extends TestCluster {
int size = numDataNodes(); int size = numDataNodes();
if (size < n) { if (size < n) {
logger.info("increasing cluster size from {} to {}", size, n); logger.info("increasing cluster size from {} to {}", size, n);
if (numSharedDedicatedMasterNodes > 0) { if (numSharedDedicatedClusterManagerNodes > 0) {
startDataOnlyNodes(n - size); startDataOnlyNodes(n - size);
} else { } else {
startNodes(n - size); startNodes(n - size);
@ -667,7 +667,7 @@ public final class InternalTestCluster extends TestCluster {
if (size <= n) { if (size <= n) {
return; return;
} }
// prevent killing the master if possible and client nodes // prevent killing the cluster-manager if possible and client nodes
final Stream<NodeAndClient> collection = n == 0 final Stream<NodeAndClient> collection = n == 0
? nodes.values().stream() ? nodes.values().stream()
: nodes.values().stream().filter(DATA_NODE_PREDICATE.and(new NodeNamePredicate(getMasterName()).negate())); : nodes.values().stream().filter(DATA_NODE_PREDICATE.and(new NodeNamePredicate(getMasterName()).negate()));
@ -687,7 +687,12 @@ public final class InternalTestCluster extends TestCluster {
} }
} }
private Settings getNodeSettings(final int nodeId, final long seed, final Settings extraSettings, final int defaultMinMasterNodes) { private Settings getNodeSettings(
final int nodeId,
final long seed,
final Settings extraSettings,
final int defaultMinClusterManagerNodes
) {
final Settings settings = getSettings(nodeId, seed, extraSettings); final Settings settings = getSettings(nodeId, seed, extraSettings);
final String name = buildNodeName(nodeId, settings); final String name = buildNodeName(nodeId, settings);
@ -718,9 +723,9 @@ public final class InternalTestCluster extends TestCluster {
final String discoveryType = DISCOVERY_TYPE_SETTING.get(updatedSettings.build()); final String discoveryType = DISCOVERY_TYPE_SETTING.get(updatedSettings.build());
final boolean usingSingleNodeDiscovery = discoveryType.equals("single-node"); final boolean usingSingleNodeDiscovery = discoveryType.equals("single-node");
if (usingSingleNodeDiscovery == false) { if (usingSingleNodeDiscovery == false) {
if (autoManageMasterNodes) { if (autoManageClusterManagerNodes) {
assertThat( assertThat(
"if master nodes are automatically managed then nodes must complete a join cycle when starting", "if cluster-manager nodes are automatically managed then nodes must complete a join cycle when starting",
updatedSettings.get(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey()), updatedSettings.get(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey()),
nullValue() nullValue()
); );
@ -817,26 +822,26 @@ public final class InternalTestCluster extends TestCluster {
} }
/** /**
* Returns a node client to the current master node. * Returns a node client to the current cluster-manager node.
* Note: use this with care tests should not rely on a certain nodes client. * Note: use this with care tests should not rely on a certain nodes client.
*/ */
public Client masterClient() { public Client masterClient() {
NodeAndClient randomNodeAndClient = getRandomNodeAndClient(new NodeNamePredicate(getMasterName())); NodeAndClient randomNodeAndClient = getRandomNodeAndClient(new NodeNamePredicate(getMasterName()));
if (randomNodeAndClient != null) { if (randomNodeAndClient != null) {
return randomNodeAndClient.nodeClient(); // ensure node client master is requested return randomNodeAndClient.nodeClient(); // ensure node client cluster-manager is requested
} }
throw new AssertionError("No cluster-manager client found"); throw new AssertionError("No cluster-manager client found");
} }
/** /**
* Returns a node client to random node but not the master. This method will fail if no non-master client is available. * Returns a node client to random node but not the cluster-manager. This method will fail if no non-cluster-manager client is available.
*/ */
public Client nonMasterClient() { public Client nonMasterClient() {
NodeAndClient randomNodeAndClient = getRandomNodeAndClient(new NodeNamePredicate(getMasterName()).negate()); NodeAndClient randomNodeAndClient = getRandomNodeAndClient(new NodeNamePredicate(getMasterName()).negate());
if (randomNodeAndClient != null) { if (randomNodeAndClient != null) {
return randomNodeAndClient.nodeClient(); // ensure node client non-master is requested return randomNodeAndClient.nodeClient(); // ensure node client non-cluster-manager is requested
} }
throw new AssertionError("No non-master client found"); throw new AssertionError("No non-cluster-manager client found");
} }
/** /**
@ -844,14 +849,14 @@ public final class InternalTestCluster extends TestCluster {
*/ */
public synchronized Client coordOnlyNodeClient() { public synchronized Client coordOnlyNodeClient() {
ensureOpen(); ensureOpen();
NodeAndClient randomNodeAndClient = getRandomNodeAndClient(NO_DATA_NO_MASTER_PREDICATE); NodeAndClient randomNodeAndClient = getRandomNodeAndClient(NO_DATA_NO_CLUSTER_MANAGER_PREDICATE);
if (randomNodeAndClient != null) { if (randomNodeAndClient != null) {
return randomNodeAndClient.client(); return randomNodeAndClient.client();
} }
int nodeId = nextNodeId.getAndIncrement(); int nodeId = nextNodeId.getAndIncrement();
Settings settings = getSettings(nodeId, random.nextLong(), Settings.EMPTY); Settings settings = getSettings(nodeId, random.nextLong(), Settings.EMPTY);
startCoordinatingOnlyNode(settings); startCoordinatingOnlyNode(settings);
return getRandomNodeAndClient(NO_DATA_NO_MASTER_PREDICATE).client(); return getRandomNodeAndClient(NO_DATA_NO_CLUSTER_MANAGER_PREDICATE).client();
} }
public synchronized String startCoordinatingOnlyNode(Settings settings) { public synchronized String startCoordinatingOnlyNode(Settings settings) {
@ -981,7 +986,7 @@ public final class InternalTestCluster extends TestCluster {
/** /**
* closes the node and prepares it to be restarted * closes the node and prepares it to be restarted
*/ */
Settings closeForRestart(RestartCallback callback, int minMasterNodes) throws Exception { Settings closeForRestart(RestartCallback callback, int minClusterManagerNodes) throws Exception {
assert callback != null; assert callback != null;
close(); close();
removeNode(this); removeNode(this);
@ -989,7 +994,7 @@ public final class InternalTestCluster extends TestCluster {
assert callbackSettings != null; assert callbackSettings != null;
Settings.Builder newSettings = Settings.builder(); Settings.Builder newSettings = Settings.builder();
newSettings.put(callbackSettings); newSettings.put(callbackSettings);
if (minMasterNodes >= 0) { if (minClusterManagerNodes >= 0) {
if (INITIAL_CLUSTER_MANAGER_NODES_SETTING.exists(callbackSettings) == false) { if (INITIAL_CLUSTER_MANAGER_NODES_SETTING.exists(callbackSettings) == false) {
newSettings.putList(INITIAL_CLUSTER_MANAGER_NODES_SETTING.getKey()); newSettings.putList(INITIAL_CLUSTER_MANAGER_NODES_SETTING.getKey());
} }
@ -1128,54 +1133,56 @@ public final class InternalTestCluster extends TestCluster {
final int prevNodeCount = nodes.size(); final int prevNodeCount = nodes.size();
// start any missing node // start any missing node
assert newSize == numSharedDedicatedMasterNodes + numSharedDataNodes + numSharedCoordOnlyNodes; assert newSize == numSharedDedicatedClusterManagerNodes + numSharedDataNodes + numSharedCoordOnlyNodes;
final int numberOfMasterNodes = numSharedDedicatedMasterNodes > 0 ? numSharedDedicatedMasterNodes : numSharedDataNodes; final int numberOfClusterManagerNodes = numSharedDedicatedClusterManagerNodes > 0
final int defaultMinMasterNodes = (numberOfMasterNodes / 2) + 1; ? numSharedDedicatedClusterManagerNodes
: numSharedDataNodes;
final int defaultMinClusterManagerNodes = (numberOfClusterManagerNodes / 2) + 1;
final List<NodeAndClient> toStartAndPublish = new ArrayList<>(); // we want to start nodes in one go final List<NodeAndClient> toStartAndPublish = new ArrayList<>(); // we want to start nodes in one go
final Runnable onTransportServiceStarted = () -> rebuildUnicastHostFiles(toStartAndPublish); final Runnable onTransportServiceStarted = () -> rebuildUnicastHostFiles(toStartAndPublish);
final List<Settings> settings = new ArrayList<>(); final List<Settings> settings = new ArrayList<>();
for (int i = 0; i < numSharedDedicatedMasterNodes; i++) { for (int i = 0; i < numSharedDedicatedClusterManagerNodes; i++) {
final Settings nodeSettings = getNodeSettings(i, sharedNodesSeeds[i], Settings.EMPTY, defaultMinMasterNodes); final Settings nodeSettings = getNodeSettings(i, sharedNodesSeeds[i], Settings.EMPTY, defaultMinClusterManagerNodes);
settings.add(removeRoles(nodeSettings, Collections.singleton(DiscoveryNodeRole.DATA_ROLE))); settings.add(removeRoles(nodeSettings, Collections.singleton(DiscoveryNodeRole.DATA_ROLE)));
} }
for (int i = numSharedDedicatedMasterNodes; i < numSharedDedicatedMasterNodes + numSharedDataNodes; i++) { for (int i = numSharedDedicatedClusterManagerNodes; i < numSharedDedicatedClusterManagerNodes + numSharedDataNodes; i++) {
final Settings nodeSettings = getNodeSettings(i, sharedNodesSeeds[i], Settings.EMPTY, defaultMinMasterNodes); final Settings nodeSettings = getNodeSettings(i, sharedNodesSeeds[i], Settings.EMPTY, defaultMinClusterManagerNodes);
if (numSharedDedicatedMasterNodes > 0) { if (numSharedDedicatedClusterManagerNodes > 0) {
settings.add(removeRoles(nodeSettings, Collections.singleton(DiscoveryNodeRole.MASTER_ROLE))); settings.add(removeRoles(nodeSettings, Collections.singleton(DiscoveryNodeRole.MASTER_ROLE)));
} else { } else {
// if we don't have dedicated master nodes, keep things default // if we don't have dedicated cluster-manager nodes, keep things default
settings.add(nodeSettings); settings.add(nodeSettings);
} }
} }
for (int i = numSharedDedicatedMasterNodes + numSharedDataNodes; i < numSharedDedicatedMasterNodes + numSharedDataNodes for (int i = numSharedDedicatedClusterManagerNodes + numSharedDataNodes; i < numSharedDedicatedClusterManagerNodes
+ numSharedCoordOnlyNodes; i++) { + numSharedDataNodes + numSharedCoordOnlyNodes; i++) {
final Builder extraSettings = Settings.builder().put(noRoles()); final Builder extraSettings = Settings.builder().put(noRoles());
settings.add(getNodeSettings(i, sharedNodesSeeds[i], extraSettings.build(), defaultMinMasterNodes)); settings.add(getNodeSettings(i, sharedNodesSeeds[i], extraSettings.build(), defaultMinClusterManagerNodes));
} }
int autoBootstrapMasterNodeIndex = -1; int autoBootstrapClusterManagerNodeIndex = -1;
final List<String> masterNodeNames = settings.stream() final List<String> clusterManagerNodeNames = settings.stream()
.filter(DiscoveryNode::isMasterNode) .filter(DiscoveryNode::isMasterNode)
.map(Node.NODE_NAME_SETTING::get) .map(Node.NODE_NAME_SETTING::get)
.collect(Collectors.toList()); .collect(Collectors.toList());
if (prevNodeCount == 0 && autoManageMasterNodes) { if (prevNodeCount == 0 && autoManageClusterManagerNodes) {
if (numSharedDedicatedMasterNodes > 0) { if (numSharedDedicatedClusterManagerNodes > 0) {
autoBootstrapMasterNodeIndex = RandomNumbers.randomIntBetween(random, 0, numSharedDedicatedMasterNodes - 1); autoBootstrapClusterManagerNodeIndex = RandomNumbers.randomIntBetween(random, 0, numSharedDedicatedClusterManagerNodes - 1);
} else if (numSharedDataNodes > 0) { } else if (numSharedDataNodes > 0) {
autoBootstrapMasterNodeIndex = RandomNumbers.randomIntBetween(random, 0, numSharedDataNodes - 1); autoBootstrapClusterManagerNodeIndex = RandomNumbers.randomIntBetween(random, 0, numSharedDataNodes - 1);
} }
} }
final List<Settings> updatedSettings = bootstrapMasterNodeWithSpecifiedIndex(settings); final List<Settings> updatedSettings = bootstrapClusterManagerNodeWithSpecifiedIndex(settings);
for (int i = 0; i < numSharedDedicatedMasterNodes + numSharedDataNodes + numSharedCoordOnlyNodes; i++) { for (int i = 0; i < numSharedDedicatedClusterManagerNodes + numSharedDataNodes + numSharedCoordOnlyNodes; i++) {
Settings nodeSettings = updatedSettings.get(i); Settings nodeSettings = updatedSettings.get(i);
if (i == autoBootstrapMasterNodeIndex) { if (i == autoBootstrapClusterManagerNodeIndex) {
nodeSettings = Settings.builder() nodeSettings = Settings.builder()
.putList(INITIAL_CLUSTER_MANAGER_NODES_SETTING.getKey(), masterNodeNames) .putList(INITIAL_CLUSTER_MANAGER_NODES_SETTING.getKey(), clusterManagerNodeNames)
.put(nodeSettings) .put(nodeSettings)
.build(); .build();
} }
@ -1187,7 +1194,7 @@ public final class InternalTestCluster extends TestCluster {
nextNodeId.set(newSize); nextNodeId.set(newSize);
assert size() == newSize; assert size() == newSize;
if (autoManageMasterNodes && newSize > 0) { if (autoManageClusterManagerNodes && newSize > 0) {
validateClusterFormed(); validateClusterFormed();
} }
logger.debug( logger.debug(
@ -1214,11 +1221,11 @@ public final class InternalTestCluster extends TestCluster {
.map(ClusterService::state) .map(ClusterService::state)
.collect(Collectors.toList()); .collect(Collectors.toList());
final String debugString = ", expected nodes: " + expectedNodes + " and actual cluster states " + states; final String debugString = ", expected nodes: " + expectedNodes + " and actual cluster states " + states;
// all nodes have a master // all nodes have a cluster-manager
assertTrue("Missing master" + debugString, states.stream().allMatch(cs -> cs.nodes().getMasterNodeId() != null)); assertTrue("Missing cluster-manager" + debugString, states.stream().allMatch(cs -> cs.nodes().getMasterNodeId() != null));
// all nodes have the same master (in same term) // all nodes have the same cluster-manager (in same term)
assertEquals( assertEquals(
"Not all masters in same term" + debugString, "Not all cluster-managers in same term" + debugString,
1, 1,
states.stream().mapToLong(ClusterState::term).distinct().count() states.stream().mapToLong(ClusterState::term).distinct().count()
); );
@ -1555,11 +1562,11 @@ public final class InternalTestCluster extends TestCluster {
} }
/** /**
* Returns an Iterable to all instances for the given class &gt;T&lt; across all data and master nodes * Returns an Iterable to all instances for the given class &gt;T&lt; across all data and cluster-manager nodes
* in the cluster. * in the cluster.
*/ */
public <T> Iterable<T> getDataOrMasterNodeInstances(Class<T> clazz) { public <T> Iterable<T> getDataOrMasterNodeInstances(Class<T> clazz) {
return getInstances(clazz, DATA_NODE_PREDICATE.or(MASTER_NODE_PREDICATE)); return getInstances(clazz, DATA_NODE_PREDICATE.or(CLUSTER_MANAGER_NODE_PREDICATE));
} }
private <T> Iterable<T> getInstances(Class<T> clazz, Predicate<NodeAndClient> predicate) { private <T> Iterable<T> getInstances(Class<T> clazz, Predicate<NodeAndClient> predicate) {
@ -1583,7 +1590,7 @@ public final class InternalTestCluster extends TestCluster {
} }
public <T> T getMasterNodeInstance(Class<T> clazz) { public <T> T getMasterNodeInstance(Class<T> clazz) {
return getInstance(clazz, MASTER_NODE_PREDICATE); return getInstance(clazz, CLUSTER_MANAGER_NODE_PREDICATE);
} }
private synchronized <T> T getInstance(Class<T> clazz, Predicate<NodeAndClient> predicate) { private synchronized <T> T getInstance(Class<T> clazz, Predicate<NodeAndClient> predicate) {
@ -1653,13 +1660,13 @@ public final class InternalTestCluster extends TestCluster {
if (nodePrefix.equals(OpenSearchIntegTestCase.SUITE_CLUSTER_NODE_PREFIX) if (nodePrefix.equals(OpenSearchIntegTestCase.SUITE_CLUSTER_NODE_PREFIX)
&& nodeAndClient.nodeAndClientId() < sharedNodesSeeds.length && nodeAndClient.nodeAndClientId() < sharedNodesSeeds.length
&& nodeAndClient.isMasterEligible() && nodeAndClient.isMasterEligible()
&& autoManageMasterNodes && autoManageClusterManagerNodes
&& nodes.values() && nodes.values()
.stream() .stream()
.filter(NodeAndClient::isMasterEligible) .filter(NodeAndClient::isMasterEligible)
.filter(n -> n.nodeAndClientId() < sharedNodesSeeds.length) .filter(n -> n.nodeAndClientId() < sharedNodesSeeds.length)
.count() == 1) { .count() == 1) {
throw new AssertionError("Tried to stop the only master eligible shared node"); throw new AssertionError("Tried to stop the only cluster-manager eligible shared node");
} }
logger.info("Closing filtered random node [{}] ", nodeAndClient.name); logger.info("Closing filtered random node [{}] ", nodeAndClient.name);
stopNodesAndClient(nodeAndClient); stopNodesAndClient(nodeAndClient);
@ -1667,36 +1674,36 @@ public final class InternalTestCluster extends TestCluster {
} }
/** /**
* Stops the current master node forcefully * Stops the current cluster-manager node forcefully
*/ */
public synchronized void stopCurrentMasterNode() throws IOException { public synchronized void stopCurrentMasterNode() throws IOException {
ensureOpen(); ensureOpen();
assert size() > 0; assert size() > 0;
String masterNodeName = getMasterName(); String clusterManagerNodeName = getMasterName();
final NodeAndClient masterNode = nodes.get(masterNodeName); final NodeAndClient clusterManagerNode = nodes.get(clusterManagerNodeName);
assert masterNode != null; assert clusterManagerNode != null;
logger.info("Closing master node [{}] ", masterNodeName); logger.info("Closing cluster-manager node [{}] ", clusterManagerNodeName);
stopNodesAndClient(masterNode); stopNodesAndClient(clusterManagerNode);
} }
/** /**
* Stops any of the current nodes but not the master node. * Stops any of the current nodes but not the cluster-manager node.
*/ */
public synchronized void stopRandomNonMasterNode() throws IOException { public synchronized void stopRandomNonMasterNode() throws IOException {
NodeAndClient nodeAndClient = getRandomNodeAndClient(new NodeNamePredicate(getMasterName()).negate()); NodeAndClient nodeAndClient = getRandomNodeAndClient(new NodeNamePredicate(getMasterName()).negate());
if (nodeAndClient != null) { if (nodeAndClient != null) {
logger.info("Closing random non master node [{}] current master [{}] ", nodeAndClient.name, getMasterName()); logger.info("Closing random non cluster-manager node [{}] current cluster-manager [{}] ", nodeAndClient.name, getMasterName());
stopNodesAndClient(nodeAndClient); stopNodesAndClient(nodeAndClient);
} }
} }
private synchronized void startAndPublishNodesAndClients(List<NodeAndClient> nodeAndClients) { private synchronized void startAndPublishNodesAndClients(List<NodeAndClient> nodeAndClients) {
if (nodeAndClients.size() > 0) { if (nodeAndClients.size() > 0) {
final int newMasters = (int) nodeAndClients.stream() final int newClusterManagers = (int) nodeAndClients.stream()
.filter(NodeAndClient::isMasterEligible) .filter(NodeAndClient::isMasterEligible)
.filter(nac -> nodes.containsKey(nac.name) == false) // filter out old masters .filter(nac -> nodes.containsKey(nac.name) == false) // filter out old cluster-managers
.count(); .count();
final int currentMasters = getMasterNodesCount(); final int currentClusterManagers = getClusterManagerNodesCount();
rebuildUnicastHostFiles(nodeAndClients); // ensure that new nodes can find the existing nodes when they start rebuildUnicastHostFiles(nodeAndClients); // ensure that new nodes can find the existing nodes when they start
List<Future<?>> futures = nodeAndClients.stream().map(node -> executor.submit(node::startNode)).collect(Collectors.toList()); List<Future<?>> futures = nodeAndClients.stream().map(node -> executor.submit(node::startNode)).collect(Collectors.toList());
@ -1713,11 +1720,11 @@ public final class InternalTestCluster extends TestCluster {
} }
nodeAndClients.forEach(this::publishNode); nodeAndClients.forEach(this::publishNode);
if (autoManageMasterNodes if (autoManageClusterManagerNodes
&& currentMasters > 0 && currentClusterManagers > 0
&& newMasters > 0 && newClusterManagers > 0
&& getMinMasterNodes(currentMasters + newMasters) > currentMasters) { && getMinClusterManagerNodes(currentClusterManagers + newClusterManagers) > currentClusterManagers) {
// update once masters have joined // update once cluster-managers have joined
validateClusterFormed(); validateClusterFormed();
} }
} }
@ -1758,7 +1765,7 @@ public final class InternalTestCluster extends TestCluster {
} }
private synchronized void stopNodesAndClients(Collection<NodeAndClient> nodeAndClients) throws IOException { private synchronized void stopNodesAndClients(Collection<NodeAndClient> nodeAndClients) throws IOException {
final Set<String> excludedNodeIds = excludeMasters(nodeAndClients); final Set<String> excludedNodeIds = excludeClusterManagers(nodeAndClients);
for (NodeAndClient nodeAndClient : nodeAndClients) { for (NodeAndClient nodeAndClient : nodeAndClients) {
removeDisruptionSchemeFromNode(nodeAndClient); removeDisruptionSchemeFromNode(nodeAndClient);
@ -1834,11 +1841,11 @@ public final class InternalTestCluster extends TestCluster {
activeDisruptionScheme.removeFromNode(nodeAndClient.name, this); activeDisruptionScheme.removeFromNode(nodeAndClient.name, this);
} }
Set<String> excludedNodeIds = excludeMasters(Collections.singleton(nodeAndClient)); Set<String> excludedNodeIds = excludeClusterManagers(Collections.singleton(nodeAndClient));
final Settings newSettings = nodeAndClient.closeForRestart( final Settings newSettings = nodeAndClient.closeForRestart(
callback, callback,
autoManageMasterNodes ? getMinMasterNodes(getMasterNodesCount()) : -1 autoManageClusterManagerNodes ? getMinClusterManagerNodes(getClusterManagerNodesCount()) : -1
); );
removeExclusions(excludedNodeIds); removeExclusions(excludedNodeIds);
@ -1862,22 +1869,23 @@ public final class InternalTestCluster extends TestCluster {
return previous; return previous;
} }
private Set<String> excludeMasters(Collection<NodeAndClient> nodeAndClients) { private Set<String> excludeClusterManagers(Collection<NodeAndClient> nodeAndClients) {
assert Thread.holdsLock(this); assert Thread.holdsLock(this);
final Set<String> excludedNodeNames = new HashSet<>(); final Set<String> excludedNodeNames = new HashSet<>();
if (autoManageMasterNodes && nodeAndClients.size() > 0) { if (autoManageClusterManagerNodes && nodeAndClients.size() > 0) {
final long currentMasters = nodes.values().stream().filter(NodeAndClient::isMasterEligible).count(); final long currentClusterManagers = nodes.values().stream().filter(NodeAndClient::isMasterEligible).count();
final long stoppingMasters = nodeAndClients.stream().filter(NodeAndClient::isMasterEligible).count(); final long stoppingClusterManagers = nodeAndClients.stream().filter(NodeAndClient::isMasterEligible).count();
assert stoppingMasters <= currentMasters : currentMasters + " < " + stoppingMasters; assert stoppingClusterManagers <= currentClusterManagers : currentClusterManagers + " < " + stoppingClusterManagers;
if (stoppingMasters != currentMasters && stoppingMasters > 0) { if (stoppingClusterManagers != currentClusterManagers && stoppingClusterManagers > 0) {
// If stopping few enough master-nodes that there's still a majority left, there is no need to withdraw their votes first. // If stopping few enough cluster-manager-nodes that there's still a majority left, there is no need to withdraw their votes
// first.
// However, we do not yet have a way to be sure there's a majority left, because the voting configuration may not yet have // However, we do not yet have a way to be sure there's a majority left, because the voting configuration may not yet have
// been updated when the previous nodes shut down, so we must always explicitly withdraw votes. // been updated when the previous nodes shut down, so we must always explicitly withdraw votes.
// TODO add cluster health API to check that voting configuration is optimal so this isn't always needed // TODO add cluster health API to check that voting configuration is optimal so this isn't always needed
nodeAndClients.stream().filter(NodeAndClient::isMasterEligible).map(NodeAndClient::getName).forEach(excludedNodeNames::add); nodeAndClients.stream().filter(NodeAndClient::isMasterEligible).map(NodeAndClient::getName).forEach(excludedNodeNames::add);
assert excludedNodeNames.size() == stoppingMasters; assert excludedNodeNames.size() == stoppingClusterManagers;
logger.info("adding voting config exclusions {} prior to restart/shutdown", excludedNodeNames); logger.info("adding voting config exclusions {} prior to restart/shutdown", excludedNodeNames);
try { try {
@ -1912,7 +1920,7 @@ public final class InternalTestCluster extends TestCluster {
public synchronized void fullRestart(RestartCallback callback) throws Exception { public synchronized void fullRestart(RestartCallback callback) throws Exception {
int numNodesRestarted = 0; int numNodesRestarted = 0;
final Settings[] newNodeSettings = new Settings[nextNodeId.get()]; final Settings[] newNodeSettings = new Settings[nextNodeId.get()];
final int minMasterNodes = autoManageMasterNodes ? getMinMasterNodes(getMasterNodesCount()) : -1; final int minClusterManagerNodes = autoManageClusterManagerNodes ? getMinClusterManagerNodes(getClusterManagerNodesCount()) : -1;
final List<NodeAndClient> toStartAndPublish = new ArrayList<>(); // we want to start nodes in one go final List<NodeAndClient> toStartAndPublish = new ArrayList<>(); // we want to start nodes in one go
for (NodeAndClient nodeAndClient : nodes.values()) { for (NodeAndClient nodeAndClient : nodes.values()) {
callback.doAfterNodes(numNodesRestarted++, nodeAndClient.nodeClient()); callback.doAfterNodes(numNodesRestarted++, nodeAndClient.nodeClient());
@ -1920,7 +1928,7 @@ public final class InternalTestCluster extends TestCluster {
if (activeDisruptionScheme != null) { if (activeDisruptionScheme != null) {
activeDisruptionScheme.removeFromNode(nodeAndClient.name, this); activeDisruptionScheme.removeFromNode(nodeAndClient.name, this);
} }
final Settings newSettings = nodeAndClient.closeForRestart(callback, minMasterNodes); final Settings newSettings = nodeAndClient.closeForRestart(callback, minClusterManagerNodes);
newNodeSettings[nodeAndClient.nodeAndClientId()] = newSettings; newNodeSettings[nodeAndClient.nodeAndClientId()] = newSettings;
toStartAndPublish.add(nodeAndClient); toStartAndPublish.add(nodeAndClient);
} }
@ -1943,14 +1951,14 @@ public final class InternalTestCluster extends TestCluster {
} }
/** /**
* Returns the name of the current master node in the cluster. * Returns the name of the current cluster-manager node in the cluster.
*/ */
public String getMasterName() { public String getMasterName() {
return getMasterName(null); return getMasterName(null);
} }
/** /**
* Returns the name of the current master node in the cluster and executes the request via the node specified * Returns the name of the current cluster-manager node in the cluster and executes the request via the node specified
* in the viaNode parameter. If viaNode isn't specified a random node will be picked to the send the request to. * in the viaNode parameter. If viaNode isn't specified a random node will be picked to the send the request to.
*/ */
public String getMasterName(@Nullable String viaNode) { public String getMasterName(@Nullable String viaNode) {
@ -1959,7 +1967,7 @@ public final class InternalTestCluster extends TestCluster {
return client.admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(); return client.admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName();
} catch (Exception e) { } catch (Exception e) {
logger.warn("Can't fetch cluster state", e); logger.warn("Can't fetch cluster state", e);
throw new RuntimeException("Can't get master node " + e.getMessage(), e); throw new RuntimeException("Can't get cluster-manager node " + e.getMessage(), e);
} }
} }
@ -1999,14 +2007,14 @@ public final class InternalTestCluster extends TestCluster {
} }
/** /**
* Performs cluster bootstrap when node with index {@link #bootstrapMasterNodeIndex} is started * Performs cluster bootstrap when node with index {@link #bootstrapClusterManagerNodeIndex} is started
* with the names of all existing and new cluster-manager-eligible nodes. * with the names of all existing and new cluster-manager-eligible nodes.
* Indexing starts from 0. * Indexing starts from 0.
* If {@link #bootstrapMasterNodeIndex} is -1 (default), this method does nothing. * If {@link #bootstrapClusterManagerNodeIndex} is -1 (default), this method does nothing.
*/ */
private List<Settings> bootstrapMasterNodeWithSpecifiedIndex(List<Settings> allNodesSettings) { private List<Settings> bootstrapClusterManagerNodeWithSpecifiedIndex(List<Settings> allNodesSettings) {
assert Thread.holdsLock(this); assert Thread.holdsLock(this);
if (bootstrapMasterNodeIndex == -1) { // fast-path if (bootstrapClusterManagerNodeIndex == -1) { // fast-path
return allNodesSettings; return allNodesSettings;
} }
@ -2018,7 +2026,7 @@ public final class InternalTestCluster extends TestCluster {
newSettings.add(settings); newSettings.add(settings);
} else { } else {
currentNodeId++; currentNodeId++;
if (currentNodeId != bootstrapMasterNodeIndex) { if (currentNodeId != bootstrapClusterManagerNodeIndex) {
newSettings.add(settings); newSettings.add(settings);
} else { } else {
List<String> nodeNames = new ArrayList<>(); List<String> nodeNames = new ArrayList<>();
@ -2042,7 +2050,7 @@ public final class InternalTestCluster extends TestCluster {
.build() .build()
); );
setBootstrapMasterNodeIndex(-1); setBootstrapClusterManagerNodeIndex(-1);
} }
} }
} }
@ -2089,46 +2097,46 @@ public final class InternalTestCluster extends TestCluster {
* Starts multiple nodes with the given settings and returns their names * Starts multiple nodes with the given settings and returns their names
*/ */
public synchronized List<String> startNodes(Settings... extraSettings) { public synchronized List<String> startNodes(Settings... extraSettings) {
final int newMasterCount = Math.toIntExact(Stream.of(extraSettings).filter(DiscoveryNode::isMasterNode).count()); final int newClusterManagerCount = Math.toIntExact(Stream.of(extraSettings).filter(DiscoveryNode::isMasterNode).count());
final int defaultMinMasterNodes; final int defaultMinClusterManagerNodes;
if (autoManageMasterNodes) { if (autoManageClusterManagerNodes) {
defaultMinMasterNodes = getMinMasterNodes(getMasterNodesCount() + newMasterCount); defaultMinClusterManagerNodes = getMinClusterManagerNodes(getClusterManagerNodesCount() + newClusterManagerCount);
} else { } else {
defaultMinMasterNodes = -1; defaultMinClusterManagerNodes = -1;
} }
final List<NodeAndClient> nodes = new ArrayList<>(); final List<NodeAndClient> nodes = new ArrayList<>();
final int prevMasterCount = getMasterNodesCount(); final int prevClusterManagerCount = getClusterManagerNodesCount();
int autoBootstrapMasterNodeIndex = autoManageMasterNodes int autoBootstrapClusterManagerNodeIndex = autoManageClusterManagerNodes
&& prevMasterCount == 0 && prevClusterManagerCount == 0
&& newMasterCount > 0 && newClusterManagerCount > 0
&& Arrays.stream(extraSettings) && Arrays.stream(extraSettings)
.allMatch(s -> DiscoveryNode.isMasterNode(s) == false || ZEN2_DISCOVERY_TYPE.equals(DISCOVERY_TYPE_SETTING.get(s))) .allMatch(s -> DiscoveryNode.isMasterNode(s) == false || ZEN2_DISCOVERY_TYPE.equals(DISCOVERY_TYPE_SETTING.get(s)))
? RandomNumbers.randomIntBetween(random, 0, newMasterCount - 1) ? RandomNumbers.randomIntBetween(random, 0, newClusterManagerCount - 1)
: -1; : -1;
final int numOfNodes = extraSettings.length; final int numOfNodes = extraSettings.length;
final int firstNodeId = nextNodeId.getAndIncrement(); final int firstNodeId = nextNodeId.getAndIncrement();
final List<Settings> settings = new ArrayList<>(); final List<Settings> settings = new ArrayList<>();
for (int i = 0; i < numOfNodes; i++) { for (int i = 0; i < numOfNodes; i++) {
settings.add(getNodeSettings(firstNodeId + i, random.nextLong(), extraSettings[i], defaultMinMasterNodes)); settings.add(getNodeSettings(firstNodeId + i, random.nextLong(), extraSettings[i], defaultMinClusterManagerNodes));
} }
nextNodeId.set(firstNodeId + numOfNodes); nextNodeId.set(firstNodeId + numOfNodes);
final List<String> initialMasterNodes = settings.stream() final List<String> initialClusterManagerNodes = settings.stream()
.filter(DiscoveryNode::isMasterNode) .filter(DiscoveryNode::isMasterNode)
.map(Node.NODE_NAME_SETTING::get) .map(Node.NODE_NAME_SETTING::get)
.collect(Collectors.toList()); .collect(Collectors.toList());
final List<Settings> updatedSettings = bootstrapMasterNodeWithSpecifiedIndex(settings); final List<Settings> updatedSettings = bootstrapClusterManagerNodeWithSpecifiedIndex(settings);
for (int i = 0; i < numOfNodes; i++) { for (int i = 0; i < numOfNodes; i++) {
final Settings nodeSettings = updatedSettings.get(i); final Settings nodeSettings = updatedSettings.get(i);
final Builder builder = Settings.builder(); final Builder builder = Settings.builder();
if (DiscoveryNode.isMasterNode(nodeSettings)) { if (DiscoveryNode.isMasterNode(nodeSettings)) {
if (autoBootstrapMasterNodeIndex == 0) { if (autoBootstrapClusterManagerNodeIndex == 0) {
builder.putList(INITIAL_CLUSTER_MANAGER_NODES_SETTING.getKey(), initialMasterNodes); builder.putList(INITIAL_CLUSTER_MANAGER_NODES_SETTING.getKey(), initialClusterManagerNodes);
} }
autoBootstrapMasterNodeIndex -= 1; autoBootstrapClusterManagerNodeIndex -= 1;
} }
final NodeAndClient nodeAndClient = buildNode( final NodeAndClient nodeAndClient = buildNode(
@ -2140,7 +2148,7 @@ public final class InternalTestCluster extends TestCluster {
nodes.add(nodeAndClient); nodes.add(nodeAndClient);
} }
startAndPublishNodesAndClients(nodes); startAndPublishNodesAndClients(nodes);
if (autoManageMasterNodes) { if (autoManageClusterManagerNodes) {
validateClusterFormed(); validateClusterFormed();
} }
return nodes.stream().map(NodeAndClient::getName).collect(Collectors.toList()); return nodes.stream().map(NodeAndClient::getName).collect(Collectors.toList());
@ -2162,21 +2170,21 @@ public final class InternalTestCluster extends TestCluster {
return startNodes(numNodes, Settings.builder().put(onlyRole(settings, DiscoveryNodeRole.DATA_ROLE)).build()); return startNodes(numNodes, Settings.builder().put(onlyRole(settings, DiscoveryNodeRole.DATA_ROLE)).build());
} }
/** calculates a min master nodes value based on the given number of master nodes */ /** calculates a min cluster-manager nodes value based on the given number of cluster-manager nodes */
private static int getMinMasterNodes(int eligibleMasterNodes) { private static int getMinClusterManagerNodes(int eligibleClusterManagerNodes) {
return eligibleMasterNodes / 2 + 1; return eligibleClusterManagerNodes / 2 + 1;
} }
private int getMasterNodesCount() { private int getClusterManagerNodesCount() {
return (int) nodes.values().stream().filter(n -> DiscoveryNode.isMasterNode(n.node().settings())).count(); return (int) nodes.values().stream().filter(n -> DiscoveryNode.isMasterNode(n.node().settings())).count();
} }
public String startMasterOnlyNode() { public String startClusterManagerOnlyNode() {
return startMasterOnlyNode(Settings.EMPTY); return startClusterManagerOnlyNode(Settings.EMPTY);
} }
public String startMasterOnlyNode(Settings settings) { public String startClusterManagerOnlyNode(Settings settings) {
Settings settings1 = Settings.builder().put(settings).put(masterOnlyNode(settings)).build(); Settings settings1 = Settings.builder().put(settings).put(NodeRoles.clusterManagerOnlyNode(settings)).build();
return startNode(settings1); return startNode(settings1);
} }
@ -2208,7 +2216,7 @@ public final class InternalTestCluster extends TestCluster {
@Override @Override
public int numDataAndMasterNodes() { public int numDataAndMasterNodes() {
return filterNodes(nodes, DATA_NODE_PREDICATE.or(MASTER_NODE_PREDICATE)).size(); return filterNodes(nodes, DATA_NODE_PREDICATE.or(CLUSTER_MANAGER_NODE_PREDICATE)).size();
} }
public int numMasterNodes() { public int numMasterNodes() {

View File

@ -176,11 +176,11 @@ public class NodeRoles {
return addRoles(settings, Collections.singleton(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)); return addRoles(settings, Collections.singleton(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE));
} }
public static Settings masterOnlyNode() { public static Settings clusterManagerOnlyNode() {
return masterOnlyNode(Settings.EMPTY); return clusterManagerOnlyNode(Settings.EMPTY);
} }
public static Settings masterOnlyNode(final Settings settings) { public static Settings clusterManagerOnlyNode(final Settings settings) {
return onlyRole(settings, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); return onlyRole(settings, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE);
} }

View File

@ -707,17 +707,19 @@ public abstract class OpenSearchIntegTestCase extends OpenSearchTestCase {
} }
/** /**
* Creates a disruption that isolates the current master node from all other nodes in the cluster. * Creates a disruption that isolates the current cluster-manager node from all other nodes in the cluster.
* *
* @param disruptionType type of disruption to create * @param disruptionType type of disruption to create
* @return disruption * @return disruption
*/ */
protected static NetworkDisruption isolateMasterDisruption(NetworkDisruption.NetworkLinkDisruptionType disruptionType) { protected static NetworkDisruption isolateMasterDisruption(NetworkDisruption.NetworkLinkDisruptionType disruptionType) {
final String masterNode = internalCluster().getMasterName(); final String clusterManagerNode = internalCluster().getMasterName();
return new NetworkDisruption( return new NetworkDisruption(
new NetworkDisruption.TwoPartitions( new NetworkDisruption.TwoPartitions(
Collections.singleton(masterNode), Collections.singleton(clusterManagerNode),
Arrays.stream(internalCluster().getNodeNames()).filter(name -> name.equals(masterNode) == false).collect(Collectors.toSet()) Arrays.stream(internalCluster().getNodeNames())
.filter(name -> name.equals(clusterManagerNode) == false)
.collect(Collectors.toSet())
), ),
disruptionType disruptionType
); );
@ -949,12 +951,13 @@ public abstract class OpenSearchIntegTestCase extends OpenSearchTestCase {
.waitForNoRelocatingShards(true) .waitForNoRelocatingShards(true)
.waitForNoInitializingShards(waitForNoInitializingShards) .waitForNoInitializingShards(waitForNoInitializingShards)
// We currently often use ensureGreen or ensureYellow to check whether the cluster is back in a good state after shutting down // We currently often use ensureGreen or ensureYellow to check whether the cluster is back in a good state after shutting down
// a node. If the node that is stopped is the master node, another node will become master and publish a cluster state where it // a node. If the node that is stopped is the cluster-manager node, another node will become cluster-manager and publish a
// is master but where the node that was stopped hasn't been removed yet from the cluster state. It will only subsequently // cluster state where it is cluster-manager but where the node that was stopped hasn't been removed yet from the cluster state.
// publish a second state where the old master is removed. If the ensureGreen/ensureYellow is timed just right, it will get to // It will only subsequently publish a second state where the old cluster-manager is removed.
// execute before the second cluster state update removes the old master and the condition ensureGreen / ensureYellow will // If the ensureGreen/ensureYellow is timed just right, it will get to execute before the second cluster state update removes
// trivially hold if it held before the node was shut down. The following "waitForNodes" condition ensures that the node has // the old cluster-manager and the condition ensureGreen / ensureYellow will trivially hold if it held before the node was
// been removed by the master so that the health check applies to the set of nodes we expect to be part of the cluster. // shut down. The following "waitForNodes" condition ensures that the node has been removed by the cluster-manager
// so that the health check applies to the set of nodes we expect to be part of the cluster.
.waitForNodes(Integer.toString(cluster().size())); .waitForNodes(Integer.toString(cluster().size()));
ClusterHealthResponse actionGet = client().admin().cluster().health(healthRequest).actionGet(); ClusterHealthResponse actionGet = client().admin().cluster().health(healthRequest).actionGet();
@ -1085,19 +1088,19 @@ public abstract class OpenSearchIntegTestCase extends OpenSearchTestCase {
} }
/** /**
* Verifies that all nodes that have the same version of the cluster state as master have same cluster state * Verifies that all nodes that have the same version of the cluster state as cluster-manager have same cluster state
*/ */
protected void ensureClusterStateConsistency() throws IOException { protected void ensureClusterStateConsistency() throws IOException {
if (cluster() != null && cluster().size() > 0) { if (cluster() != null && cluster().size() > 0) {
final NamedWriteableRegistry namedWriteableRegistry = cluster().getNamedWriteableRegistry(); final NamedWriteableRegistry namedWriteableRegistry = cluster().getNamedWriteableRegistry();
final Client masterClient = client(); final Client clusterManagerClient = client();
ClusterState masterClusterState = masterClient.admin().cluster().prepareState().all().get().getState(); ClusterState clusterManagerClusterState = clusterManagerClient.admin().cluster().prepareState().all().get().getState();
byte[] masterClusterStateBytes = ClusterState.Builder.toBytes(masterClusterState); byte[] masterClusterStateBytes = ClusterState.Builder.toBytes(clusterManagerClusterState);
// remove local node reference // remove local node reference
masterClusterState = ClusterState.Builder.fromBytes(masterClusterStateBytes, null, namedWriteableRegistry); clusterManagerClusterState = ClusterState.Builder.fromBytes(masterClusterStateBytes, null, namedWriteableRegistry);
Map<String, Object> masterStateMap = convertToMap(masterClusterState); Map<String, Object> clusterManagerStateMap = convertToMap(clusterManagerClusterState);
int masterClusterStateSize = masterClusterState.toString().length(); int clusterManagerClusterStateSize = clusterManagerClusterState.toString().length();
String masterId = masterClusterState.nodes().getMasterNodeId(); String clusterManagerId = clusterManagerClusterState.nodes().getMasterNodeId();
for (Client client : cluster().getClients()) { for (Client client : cluster().getClients()) {
ClusterState localClusterState = client.admin().cluster().prepareState().all().setLocal(true).get().getState(); ClusterState localClusterState = client.admin().cluster().prepareState().all().setLocal(true).get().getState();
byte[] localClusterStateBytes = ClusterState.Builder.toBytes(localClusterState); byte[] localClusterStateBytes = ClusterState.Builder.toBytes(localClusterState);
@ -1105,27 +1108,32 @@ public abstract class OpenSearchIntegTestCase extends OpenSearchTestCase {
localClusterState = ClusterState.Builder.fromBytes(localClusterStateBytes, null, namedWriteableRegistry); localClusterState = ClusterState.Builder.fromBytes(localClusterStateBytes, null, namedWriteableRegistry);
final Map<String, Object> localStateMap = convertToMap(localClusterState); final Map<String, Object> localStateMap = convertToMap(localClusterState);
final int localClusterStateSize = localClusterState.toString().length(); final int localClusterStateSize = localClusterState.toString().length();
// Check that the non-master node has the same version of the cluster state as the master and // Check that the non-cluster-manager node has the same version of the cluster state as the cluster-manager and
// that the master node matches the master (otherwise there is no requirement for the cluster state to match) // that the cluster-manager node matches the cluster-manager (otherwise there is no requirement for the cluster state to
if (masterClusterState.version() == localClusterState.version() // match)
&& masterId.equals(localClusterState.nodes().getMasterNodeId())) { if (clusterManagerClusterState.version() == localClusterState.version()
&& clusterManagerId.equals(localClusterState.nodes().getMasterNodeId())) {
try { try {
assertEquals("cluster state UUID does not match", masterClusterState.stateUUID(), localClusterState.stateUUID()); assertEquals(
"cluster state UUID does not match",
clusterManagerClusterState.stateUUID(),
localClusterState.stateUUID()
);
// We cannot compare serialization bytes since serialization order of maps is not guaranteed // We cannot compare serialization bytes since serialization order of maps is not guaranteed
// We also cannot compare byte array size because CompressedXContent's DeflateCompressor uses // We also cannot compare byte array size because CompressedXContent's DeflateCompressor uses
// a synced flush that can affect the size of the compressed byte array // a synced flush that can affect the size of the compressed byte array
// (see: DeflateCompressedXContentTests#testDifferentCompressedRepresentation for an example) // (see: DeflateCompressedXContentTests#testDifferentCompressedRepresentation for an example)
// instead we compare the string length of cluster state - they should be the same // instead we compare the string length of cluster state - they should be the same
assertEquals("cluster state size does not match", masterClusterStateSize, localClusterStateSize); assertEquals("cluster state size does not match", clusterManagerClusterStateSize, localClusterStateSize);
// Compare JSON serialization // Compare JSON serialization
assertNull( assertNull(
"cluster state JSON serialization does not match", "cluster state JSON serialization does not match",
differenceBetweenMapsIgnoringArrayOrder(masterStateMap, localStateMap) differenceBetweenMapsIgnoringArrayOrder(clusterManagerStateMap, localStateMap)
); );
} catch (final AssertionError error) { } catch (final AssertionError error) {
logger.error( logger.error(
"Cluster state from master:\n{}\nLocal cluster state:\n{}", "Cluster state from cluster-manager:\n{}\nLocal cluster state:\n{}",
masterClusterState.toString(), clusterManagerClusterState.toString(),
localClusterState.toString() localClusterState.toString()
); );
throw error; throw error;
@ -1138,8 +1146,8 @@ public abstract class OpenSearchIntegTestCase extends OpenSearchTestCase {
protected void ensureClusterStateCanBeReadByNodeTool() throws IOException { protected void ensureClusterStateCanBeReadByNodeTool() throws IOException {
if (cluster() != null && cluster().size() > 0) { if (cluster() != null && cluster().size() > 0) {
final Client masterClient = client(); final Client clusterManagerClient = client();
Metadata metadata = masterClient.admin().cluster().prepareState().all().get().getState().metadata(); Metadata metadata = clusterManagerClient.admin().cluster().prepareState().all().get().getState().metadata();
final Map<String, String> serializationParams = new HashMap<>(2); final Map<String, String> serializationParams = new HashMap<>(2);
serializationParams.put("binary", "true"); serializationParams.put("binary", "true");
serializationParams.put(Metadata.CONTEXT_MODE_PARAM, Metadata.CONTEXT_MODE_GATEWAY); serializationParams.put(Metadata.CONTEXT_MODE_PARAM, Metadata.CONTEXT_MODE_GATEWAY);
@ -1320,7 +1328,7 @@ public abstract class OpenSearchIntegTestCase extends OpenSearchTestCase {
/** /**
* Ensures that all nodes in the cluster are connected to each other. * Ensures that all nodes in the cluster are connected to each other.
* *
* Some network disruptions may leave nodes that are not the master disconnected from each other. * Some network disruptions may leave nodes that are not the cluster-manager disconnected from each other.
* {@link org.opensearch.cluster.NodeConnectionsService} will eventually reconnect but it's * {@link org.opensearch.cluster.NodeConnectionsService} will eventually reconnect but it's
* handy to be able to ensure this happens faster * handy to be able to ensure this happens faster
*/ */
@ -1738,9 +1746,9 @@ public abstract class OpenSearchIntegTestCase extends OpenSearchTestCase {
int maxNumDataNodes() default -1; int maxNumDataNodes() default -1;
/** /**
* Indicates whether the cluster can have dedicated master nodes. If {@code false} means data nodes will serve as master nodes * Indicates whether the cluster can have dedicated cluster-manager nodes. If {@code false} means data nodes will serve as cluster-manager nodes
* and there will be no dedicated master (and data) nodes. Default is {@code false} which means * and there will be no dedicated cluster-manager (and data) nodes. Default is {@code false} which means
* dedicated master nodes will be randomly used. * dedicated cluster-manager nodes will be randomly used.
*/ */
boolean supportsDedicatedMasters() default true; boolean supportsDedicatedMasters() default true;
@ -1829,12 +1837,12 @@ public abstract class OpenSearchIntegTestCase extends OpenSearchTestCase {
return annotation == null ? Scope.SUITE : annotation.scope(); return annotation == null ? Scope.SUITE : annotation.scope();
} }
private boolean getSupportsDedicatedMasters() { private boolean getSupportsDedicatedClusterManagers() {
ClusterScope annotation = getAnnotation(this.getClass(), ClusterScope.class); ClusterScope annotation = getAnnotation(this.getClass(), ClusterScope.class);
return annotation == null ? true : annotation.supportsDedicatedMasters(); return annotation == null ? true : annotation.supportsDedicatedMasters();
} }
private boolean getAutoManageMasterNodes() { private boolean getAutoManageClusterManagerNodes() {
ClusterScope annotation = getAnnotation(this.getClass(), ClusterScope.class); ClusterScope annotation = getAnnotation(this.getClass(), ClusterScope.class);
return annotation == null ? true : annotation.autoManageMasterNodes(); return annotation == null ? true : annotation.autoManageMasterNodes();
} }
@ -1959,7 +1967,7 @@ public abstract class OpenSearchIntegTestCase extends OpenSearchTestCase {
throw new OpenSearchException("Scope not supported: " + scope); throw new OpenSearchException("Scope not supported: " + scope);
} }
boolean supportsDedicatedMasters = getSupportsDedicatedMasters(); boolean supportsDedicatedClusterManagers = getSupportsDedicatedClusterManagers();
int numDataNodes = getNumDataNodes(); int numDataNodes = getNumDataNodes();
int minNumDataNodes; int minNumDataNodes;
int maxNumDataNodes; int maxNumDataNodes;
@ -1983,8 +1991,8 @@ public abstract class OpenSearchIntegTestCase extends OpenSearchTestCase {
return new InternalTestCluster( return new InternalTestCluster(
seed, seed,
createTempDir(), createTempDir(),
supportsDedicatedMasters, supportsDedicatedClusterManagers,
getAutoManageMasterNodes(), getAutoManageClusterManagerNodes(),
minNumDataNodes, minNumDataNodes,
maxNumDataNodes, maxNumDataNodes,
InternalTestCluster.clusterName(scope.name(), seed) + "-cluster", InternalTestCluster.clusterName(scope.name(), seed) + "-cluster",
@ -2227,7 +2235,7 @@ public abstract class OpenSearchIntegTestCase extends OpenSearchTestCase {
// need to check that there are no more in-flight search contexts before // need to check that there are no more in-flight search contexts before
// we remove indices // we remove indices
if (isInternalCluster()) { if (isInternalCluster()) {
internalCluster().setBootstrapMasterNodeIndex(-1); internalCluster().setBootstrapClusterManagerNodeIndex(-1);
} }
super.ensureAllSearchContextsReleased(); super.ensureAllSearchContextsReleased();
if (runTestScopeLifecycle()) { if (runTestScopeLifecycle()) {

View File

@ -97,7 +97,7 @@ public abstract class OpenSearchSingleNodeTestCase extends OpenSearchTestCase {
assert NODE == null; assert NODE == null;
NODE = RandomizedContext.current().runWithPrivateRandomness(seed, this::newNode); NODE = RandomizedContext.current().runWithPrivateRandomness(seed, this::newNode);
// we must wait for the node to actually be up and running. otherwise the node might have started, // we must wait for the node to actually be up and running. otherwise the node might have started,
// elected itself master but might not yet have removed the // elected itself cluster-manager but might not yet have removed the
// SERVICE_UNAVAILABLE/1/state not recovered / initialized block // SERVICE_UNAVAILABLE/1/state not recovered / initialized block
ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForGreenStatus().get(); ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForGreenStatus().get();
assertFalse(clusterHealthResponse.isTimedOut()); assertFalse(clusterHealthResponse.isTimedOut());

View File

@ -125,7 +125,7 @@ public abstract class TestCluster implements Closeable {
public abstract int numDataNodes(); public abstract int numDataNodes();
/** /**
* Returns the number of data and master eligible nodes in the cluster. * Returns the number of data and cluster-manager eligible nodes in the cluster.
*/ */
public abstract int numDataAndMasterNodes(); public abstract int numDataAndMasterNodes();

View File

@ -65,7 +65,7 @@ public class VersionUtils {
Map<Integer, List<Version>> majorVersions = Version.getDeclaredVersions(versionClass) Map<Integer, List<Version>> majorVersions = Version.getDeclaredVersions(versionClass)
.stream() .stream()
.collect(Collectors.groupingBy(v -> (int) v.major)); .collect(Collectors.groupingBy(v -> (int) v.major));
// this breaks b/c 5.x is still in version list but master doesn't care about it! // this breaks b/c 5.x is still in version list but cluster-manager doesn't care about it!
// assert majorVersions.size() == 2; // assert majorVersions.size() == 2;
List<List<Version>> oldVersions = new ArrayList<>(0); List<List<Version>> oldVersions = new ArrayList<>(0);
List<List<Version>> previousMajor = new ArrayList<>(0); List<List<Version>> previousMajor = new ArrayList<>(0);
@ -85,7 +85,7 @@ public class VersionUtils {
List<Version> unreleasedVersions = new ArrayList<>(); List<Version> unreleasedVersions = new ArrayList<>();
final List<List<Version>> stableVersions; final List<List<Version>> stableVersions;
if (currentMajor.size() == 1) { if (currentMajor.size() == 1) {
// on master branch // on main branch
stableVersions = previousMajor; stableVersions = previousMajor;
// remove current // remove current
moveLastToUnreleased(currentMajor, unreleasedVersions); moveLastToUnreleased(currentMajor, unreleasedVersions);

View File

@ -62,7 +62,7 @@ public class BlockMasterServiceOnMaster extends SingleNodeDisruption {
if (clusterService == null) { if (clusterService == null) {
return; return;
} }
logger.info("blocking master service on node [{}]", disruptionNodeCopy); logger.info("blocking cluster-manager service on node [{}]", disruptionNodeCopy);
boolean success = disruptionLatch.compareAndSet(null, new CountDownLatch(1)); boolean success = disruptionLatch.compareAndSet(null, new CountDownLatch(1));
assert success : "startDisrupting called without waiting on stopDisrupting to complete"; assert success : "startDisrupting called without waiting on stopDisrupting to complete";
final CountDownLatch started = new CountDownLatch(1); final CountDownLatch started = new CountDownLatch(1);

View File

@ -61,7 +61,7 @@ public class BusyMasterServiceDisruption extends SingleNodeDisruption {
if (clusterService == null) { if (clusterService == null) {
return; return;
} }
logger.info("making master service busy on node [{}] at priority [{}]", disruptionNodeCopy, priority); logger.info("making cluster-manager service busy on node [{}] at priority [{}]", disruptionNodeCopy, priority);
active.set(true); active.set(true);
submitTask(clusterService); submitTask(clusterService);
} }

View File

@ -109,7 +109,7 @@ public class NetworkDisruption implements ServiceDisruptionScheme {
/** /**
* Ensures that all nodes in the cluster are connected to each other. * Ensures that all nodes in the cluster are connected to each other.
* *
* Some network disruptions may leave nodes that are not the master disconnected from each other. * Some network disruptions may leave nodes that are not the cluster-manager disconnected from each other.
* {@link org.opensearch.cluster.NodeConnectionsService} will eventually reconnect but it's * {@link org.opensearch.cluster.NodeConnectionsService} will eventually reconnect but it's
* handy to be able to ensure this happens faster * handy to be able to ensure this happens faster
*/ */

View File

@ -62,10 +62,10 @@ public final class ClientYamlDocsTestClient extends ClientYamlTestClient {
final RestClient restClient, final RestClient restClient,
final List<HttpHost> hosts, final List<HttpHost> hosts,
final Version esVersion, final Version esVersion,
final Version masterVersion, final Version clusterManagerVersion,
final CheckedSupplier<RestClientBuilder, IOException> clientBuilderWithSniffedNodes final CheckedSupplier<RestClientBuilder, IOException> clientBuilderWithSniffedNodes
) { ) {
super(restSpec, restClient, hosts, esVersion, masterVersion, clientBuilderWithSniffedNodes); super(restSpec, restClient, hosts, esVersion, clusterManagerVersion, clientBuilderWithSniffedNodes);
} }
@Override @Override

View File

@ -78,7 +78,7 @@ public class ClientYamlTestClient implements Closeable {
private final ClientYamlSuiteRestSpec restSpec; private final ClientYamlSuiteRestSpec restSpec;
private final Map<NodeSelector, RestClient> restClients = new HashMap<>(); private final Map<NodeSelector, RestClient> restClients = new HashMap<>();
private final Version esVersion; private final Version esVersion;
private final Version masterVersion; private final Version clusterManagerVersion;
private final CheckedSupplier<RestClientBuilder, IOException> clientBuilderWithSniffedNodes; private final CheckedSupplier<RestClientBuilder, IOException> clientBuilderWithSniffedNodes;
ClientYamlTestClient( ClientYamlTestClient(
@ -86,14 +86,14 @@ public class ClientYamlTestClient implements Closeable {
final RestClient restClient, final RestClient restClient,
final List<HttpHost> hosts, final List<HttpHost> hosts,
final Version esVersion, final Version esVersion,
final Version masterVersion, final Version clusterManagerVersion,
final CheckedSupplier<RestClientBuilder, IOException> clientBuilderWithSniffedNodes final CheckedSupplier<RestClientBuilder, IOException> clientBuilderWithSniffedNodes
) { ) {
assert hosts.size() > 0; assert hosts.size() > 0;
this.restSpec = restSpec; this.restSpec = restSpec;
this.restClients.put(NodeSelector.ANY, restClient); this.restClients.put(NodeSelector.ANY, restClient);
this.esVersion = esVersion; this.esVersion = esVersion;
this.masterVersion = masterVersion; this.clusterManagerVersion = clusterManagerVersion;
this.clientBuilderWithSniffedNodes = clientBuilderWithSniffedNodes; this.clientBuilderWithSniffedNodes = clientBuilderWithSniffedNodes;
} }
@ -101,8 +101,8 @@ public class ClientYamlTestClient implements Closeable {
return esVersion; return esVersion;
} }
public Version getMasterVersion() { public Version getClusterManagerVersion() {
return masterVersion; return clusterManagerVersion;
} }
/** /**

View File

@ -228,7 +228,7 @@ public class ClientYamlTestExecutionContext {
} }
public Version masterVersion() { public Version masterVersion() {
return clientYamlTestClient.getMasterVersion(); return clientYamlTestClient.getClusterManagerVersion();
} }
} }

View File

@ -144,14 +144,14 @@ public abstract class OpenSearchClientYamlSuiteTestCase extends OpenSearchRestTe
final List<HttpHost> hosts = getClusterHosts(); final List<HttpHost> hosts = getClusterHosts();
Tuple<Version, Version> versionVersionTuple = readVersionsFromCatNodes(adminClient()); Tuple<Version, Version> versionVersionTuple = readVersionsFromCatNodes(adminClient());
final Version minVersion = versionVersionTuple.v1(); final Version minVersion = versionVersionTuple.v1();
final Version masterVersion = versionVersionTuple.v2(); final Version clusterManagerVersion = versionVersionTuple.v2();
logger.info( logger.info(
"initializing client, minimum OpenSearch version [{}], master version, [{}], hosts {}", "initializing client, minimum OpenSearch version [{}], cluster-manager version, [{}], hosts {}",
minVersion, minVersion,
masterVersion, clusterManagerVersion,
hosts hosts
); );
clientYamlTestClient = initClientYamlTestClient(restSpec, client(), hosts, minVersion, masterVersion); clientYamlTestClient = initClientYamlTestClient(restSpec, client(), hosts, minVersion, clusterManagerVersion);
restTestExecutionContext = new ClientYamlTestExecutionContext(clientYamlTestClient, randomizeContentType()); restTestExecutionContext = new ClientYamlTestExecutionContext(clientYamlTestClient, randomizeContentType());
adminExecutionContext = new ClientYamlTestExecutionContext(clientYamlTestClient, false); adminExecutionContext = new ClientYamlTestExecutionContext(clientYamlTestClient, false);
final String[] denylist = resolvePathsProperty(REST_TESTS_DENYLIST, null); final String[] denylist = resolvePathsProperty(REST_TESTS_DENYLIST, null);
@ -179,9 +179,16 @@ public abstract class OpenSearchClientYamlSuiteTestCase extends OpenSearchRestTe
final RestClient restClient, final RestClient restClient,
final List<HttpHost> hosts, final List<HttpHost> hosts,
final Version esVersion, final Version esVersion,
final Version masterVersion final Version clusterManagerVersion
) { ) {
return new ClientYamlTestClient(restSpec, restClient, hosts, esVersion, masterVersion, this::getClientBuilderWithSniffedHosts); return new ClientYamlTestClient(
restSpec,
restClient,
hosts,
esVersion,
clusterManagerVersion,
this::getClientBuilderWithSniffedHosts
);
} }
@AfterClass @AfterClass
@ -330,28 +337,28 @@ public abstract class OpenSearchClientYamlSuiteTestCase extends OpenSearchRestTe
* Detect minimal node version and master node version of cluster using REST Client. * Detect minimal node version and master node version of cluster using REST Client.
* *
* @param restClient REST client used to discover cluster nodes * @param restClient REST client used to discover cluster nodes
* @return {@link Tuple} of [minimal node version, master node version] * @return {@link Tuple} of [minimal node version, cluster-manager node version]
* @throws IOException When _cat API output parsing fails * @throws IOException When _cat API output parsing fails
*/ */
private Tuple<Version, Version> readVersionsFromCatNodes(RestClient restClient) throws IOException { private Tuple<Version, Version> readVersionsFromCatNodes(RestClient restClient) throws IOException {
// we simply go to the _cat/nodes API and parse all versions in the cluster // we simply go to the _cat/nodes API and parse all versions in the cluster
final Request request = new Request("GET", "/_cat/nodes"); final Request request = new Request("GET", "/_cat/nodes");
request.addParameter("h", "version,master"); request.addParameter("h", "version,master");
request.setOptions(getCatNodesVersionMasterRequestOptions()); request.setOptions(getCatNodesVersionClusterManagerRequestOptions());
Response response = restClient.performRequest(request); Response response = restClient.performRequest(request);
ClientYamlTestResponse restTestResponse = new ClientYamlTestResponse(response); ClientYamlTestResponse restTestResponse = new ClientYamlTestResponse(response);
String nodesCatResponse = restTestResponse.getBodyAsString(); String nodesCatResponse = restTestResponse.getBodyAsString();
String[] split = nodesCatResponse.split("\n"); String[] split = nodesCatResponse.split("\n");
Version version = null; Version version = null;
Version masterVersion = null; Version clusterManagerVersion = null;
for (String perNode : split) { for (String perNode : split) {
final String[] versionAndMaster = perNode.split("\\s+"); final String[] versionAndClusterManager = perNode.split("\\s+");
assert versionAndMaster.length == 2 : "invalid line: " + perNode + " length: " + versionAndMaster.length; assert versionAndClusterManager.length == 2 : "invalid line: " + perNode + " length: " + versionAndClusterManager.length;
final Version currentVersion = Version.fromString(versionAndMaster[0]); final Version currentVersion = Version.fromString(versionAndClusterManager[0]);
final boolean master = versionAndMaster[1].trim().equals("*"); final boolean clusterManager = versionAndClusterManager[1].trim().equals("*");
if (master) { if (clusterManager) {
assert masterVersion == null; assert clusterManagerVersion == null;
masterVersion = currentVersion; clusterManagerVersion = currentVersion;
} }
if (version == null) { if (version == null) {
version = currentVersion; version = currentVersion;
@ -359,10 +366,10 @@ public abstract class OpenSearchClientYamlSuiteTestCase extends OpenSearchRestTe
version = currentVersion; version = currentVersion;
} }
} }
return new Tuple<>(version, masterVersion); return new Tuple<>(version, clusterManagerVersion);
} }
protected RequestOptions getCatNodesVersionMasterRequestOptions() { protected RequestOptions getCatNodesVersionClusterManagerRequestOptions() {
return RequestOptions.DEFAULT; return RequestOptions.DEFAULT;
} }

View File

@ -353,7 +353,7 @@ public class DoSection implements ExecutableSection {
/** /**
* Check that the response contains only the warning headers that we expect. * Check that the response contains only the warning headers that we expect.
*/ */
void checkWarningHeaders(final List<String> warningHeaders, final Version masterVersion) { void checkWarningHeaders(final List<String> warningHeaders, final Version clusterManagerVersion) {
final List<String> unexpected = new ArrayList<>(); final List<String> unexpected = new ArrayList<>();
final List<String> unmatched = new ArrayList<>(); final List<String> unmatched = new ArrayList<>();
final List<String> missing = new ArrayList<>(); final List<String> missing = new ArrayList<>();
@ -367,7 +367,7 @@ public class DoSection implements ExecutableSection {
final boolean matches = matcher.matches(); final boolean matches = matcher.matches();
if (matches) { if (matches) {
final String message = HeaderWarning.extractWarningValueFromWarningHeader(header, true); final String message = HeaderWarning.extractWarningValueFromWarningHeader(header, true);
if (masterVersion.before(LegacyESVersion.V_7_0_0) if (clusterManagerVersion.before(LegacyESVersion.V_7_0_0)
&& message.equals( && message.equals(
"the default number of shards will change from [5] to [1] in 7.0.0; " "the default number of shards will change from [5] to [1] in 7.0.0; "
+ "if you wish to continue using the default of [5] shards, " + "if you wish to continue using the default of [5] shards, "
@ -375,7 +375,7 @@ public class DoSection implements ExecutableSection {
)) { )) {
/* /*
* This warning header will come back in the vast majority of our tests that create an index when running against an * This warning header will come back in the vast majority of our tests that create an index when running against an
* older master. Rather than rewrite our tests to assert this warning header, we assume that it is expected. * older cluster-manager. Rather than rewrite our tests to assert this warning header, we assume that it is expected.
*/ */
continue; continue;
} }

View File

@ -63,7 +63,7 @@ import static org.mockito.Mockito.when;
public class FakeThreadPoolMasterServiceTests extends OpenSearchTestCase { public class FakeThreadPoolMasterServiceTests extends OpenSearchTestCase {
public void testFakeMasterService() { public void testFakeClusterManagerService() {
List<Runnable> runnableTasks = new ArrayList<>(); List<Runnable> runnableTasks = new ArrayList<>();
AtomicReference<ClusterState> lastClusterStateRef = new AtomicReference<>(); AtomicReference<ClusterState> lastClusterStateRef = new AtomicReference<>();
DiscoveryNode discoveryNode = new DiscoveryNode( DiscoveryNode discoveryNode = new DiscoveryNode(
@ -84,21 +84,21 @@ public class FakeThreadPoolMasterServiceTests extends OpenSearchTestCase {
doAnswer(invocationOnMock -> runnableTasks.add((Runnable) invocationOnMock.getArguments()[0])).when(executorService).execute(any()); doAnswer(invocationOnMock -> runnableTasks.add((Runnable) invocationOnMock.getArguments()[0])).when(executorService).execute(any());
when(mockThreadPool.generic()).thenReturn(executorService); when(mockThreadPool.generic()).thenReturn(executorService);
FakeThreadPoolMasterService masterService = new FakeThreadPoolMasterService( FakeThreadPoolMasterService clusterManagerService = new FakeThreadPoolMasterService(
"test_node", "test_node",
"test", "test",
mockThreadPool, mockThreadPool,
runnableTasks::add runnableTasks::add
); );
masterService.setClusterStateSupplier(lastClusterStateRef::get); clusterManagerService.setClusterStateSupplier(lastClusterStateRef::get);
masterService.setClusterStatePublisher((event, publishListener, ackListener) -> { clusterManagerService.setClusterStatePublisher((event, publishListener, ackListener) -> {
lastClusterStateRef.set(event.state()); lastClusterStateRef.set(event.state());
publishingCallback.set(publishListener); publishingCallback.set(publishListener);
}); });
masterService.start(); clusterManagerService.start();
AtomicBoolean firstTaskCompleted = new AtomicBoolean(); AtomicBoolean firstTaskCompleted = new AtomicBoolean();
masterService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() { clusterManagerService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() {
@Override @Override
public ClusterState execute(ClusterState currentState) { public ClusterState execute(ClusterState currentState) {
return ClusterState.builder(currentState) return ClusterState.builder(currentState)
@ -124,7 +124,7 @@ public class FakeThreadPoolMasterServiceTests extends OpenSearchTestCase {
assertFalse(firstTaskCompleted.get()); assertFalse(firstTaskCompleted.get());
final Runnable scheduleTask = runnableTasks.remove(0); final Runnable scheduleTask = runnableTasks.remove(0);
assertThat(scheduleTask, hasToString("master service scheduling next task")); assertThat(scheduleTask, hasToString("cluster-manager service scheduling next task"));
scheduleTask.run(); scheduleTask.run();
final Runnable publishTask = runnableTasks.remove(0); final Runnable publishTask = runnableTasks.remove(0);
@ -138,7 +138,7 @@ public class FakeThreadPoolMasterServiceTests extends OpenSearchTestCase {
assertThat(runnableTasks.size(), equalTo(0)); assertThat(runnableTasks.size(), equalTo(0));
AtomicBoolean secondTaskCompleted = new AtomicBoolean(); AtomicBoolean secondTaskCompleted = new AtomicBoolean();
masterService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() { clusterManagerService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() {
@Override @Override
public ClusterState execute(ClusterState currentState) { public ClusterState execute(ClusterState currentState) {
return ClusterState.builder(currentState) return ClusterState.builder(currentState)

View File

@ -76,11 +76,11 @@ public class NetworkDisruptionIT extends OpenSearchIntegTestCase {
/** /**
* Creates 3 to 5 mixed-node cluster and splits it into 2 parts. * Creates 3 to 5 mixed-node cluster and splits it into 2 parts.
* The first part is guaranteed to have at least the majority of the nodes, * The first part is guaranteed to have at least the majority of the nodes,
* so that master could be elected on this side. * so that cluster-manager could be elected on this side.
*/ */
private Tuple<Set<String>, Set<String>> prepareDisruptedCluster() { private Tuple<Set<String>, Set<String>> prepareDisruptedCluster() {
int numOfNodes = randomIntBetween(3, 5); int numOfNodes = randomIntBetween(3, 5);
internalCluster().setBootstrapMasterNodeIndex(numOfNodes - 1); internalCluster().setBootstrapClusterManagerNodeIndex(numOfNodes - 1);
Set<String> nodes = new HashSet<>(internalCluster().startNodes(numOfNodes, DISRUPTION_TUNED_SETTINGS)); Set<String> nodes = new HashSet<>(internalCluster().startNodes(numOfNodes, DISRUPTION_TUNED_SETTINGS));
ensureGreen(); ensureGreen();
assertThat(nodes.size(), greaterThanOrEqualTo(3)); assertThat(nodes.size(), greaterThanOrEqualTo(3));
@ -127,7 +127,7 @@ public class NetworkDisruptionIT extends OpenSearchIntegTestCase {
} }
public void testTransportRespondsEventually() throws InterruptedException { public void testTransportRespondsEventually() throws InterruptedException {
internalCluster().setBootstrapMasterNodeIndex(0); internalCluster().setBootstrapClusterManagerNodeIndex(0);
internalCluster().ensureAtLeastNumDataNodes(randomIntBetween(3, 5)); internalCluster().ensureAtLeastNumDataNodes(randomIntBetween(3, 5));
final NetworkDisruption.DisruptedLinks disruptedLinks; final NetworkDisruption.DisruptedLinks disruptedLinks;
if (randomBoolean()) { if (randomBoolean()) {

View File

@ -79,7 +79,7 @@ public class InternalTestClusterIT extends OpenSearchIntegTestCase {
} }
public void testOperationsDuringRestart() throws Exception { public void testOperationsDuringRestart() throws Exception {
internalCluster().startMasterOnlyNode(); internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNodes(2); internalCluster().startDataOnlyNodes(2);
internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() {
@Override @Override

View File

@ -86,7 +86,7 @@ public class InternalTestClusterTests extends OpenSearchTestCase {
public void testInitializiationIsConsistent() { public void testInitializiationIsConsistent() {
long clusterSeed = randomLong(); long clusterSeed = randomLong();
boolean masterNodes = randomBoolean(); boolean clusterManagerNodes = randomBoolean();
int minNumDataNodes = randomIntBetween(0, 9); int minNumDataNodes = randomIntBetween(0, 9);
int maxNumDataNodes = randomIntBetween(minNumDataNodes, 10); int maxNumDataNodes = randomIntBetween(minNumDataNodes, 10);
String clusterName = randomRealisticUnicodeOfCodepointLengthBetween(1, 10); String clusterName = randomRealisticUnicodeOfCodepointLengthBetween(1, 10);
@ -98,7 +98,7 @@ public class InternalTestClusterTests extends OpenSearchTestCase {
InternalTestCluster cluster0 = new InternalTestCluster( InternalTestCluster cluster0 = new InternalTestCluster(
clusterSeed, clusterSeed,
baseDir, baseDir,
masterNodes, clusterManagerNodes,
randomBoolean(), randomBoolean(),
minNumDataNodes, minNumDataNodes,
maxNumDataNodes, maxNumDataNodes,
@ -112,7 +112,7 @@ public class InternalTestClusterTests extends OpenSearchTestCase {
InternalTestCluster cluster1 = new InternalTestCluster( InternalTestCluster cluster1 = new InternalTestCluster(
clusterSeed, clusterSeed,
baseDir, baseDir,
masterNodes, clusterManagerNodes,
randomBoolean(), randomBoolean(),
minNumDataNodes, minNumDataNodes,
maxNumDataNodes, maxNumDataNodes,
@ -165,23 +165,23 @@ public class InternalTestClusterTests extends OpenSearchTestCase {
} }
public void testBeforeTest() throws Exception { public void testBeforeTest() throws Exception {
final boolean autoManageMinMasterNodes = randomBoolean(); final boolean autoManageMinClusterManagerNodes = randomBoolean();
long clusterSeed = randomLong(); long clusterSeed = randomLong();
final boolean masterNodes; final boolean clusterManagerNodes;
final int minNumDataNodes; final int minNumDataNodes;
final int maxNumDataNodes; final int maxNumDataNodes;
final int bootstrapMasterNodeIndex; final int bootstrapClusterManagerNodeIndex;
if (autoManageMinMasterNodes) { if (autoManageMinClusterManagerNodes) {
masterNodes = randomBoolean(); clusterManagerNodes = randomBoolean();
minNumDataNodes = randomIntBetween(0, 3); minNumDataNodes = randomIntBetween(0, 3);
maxNumDataNodes = randomIntBetween(minNumDataNodes, 4); maxNumDataNodes = randomIntBetween(minNumDataNodes, 4);
bootstrapMasterNodeIndex = -1; bootstrapClusterManagerNodeIndex = -1;
} else { } else {
// if we manage min master nodes, we need to lock down the number of nodes // if we manage min cluster-manager nodes, we need to lock down the number of nodes
minNumDataNodes = randomIntBetween(0, 4); minNumDataNodes = randomIntBetween(0, 4);
maxNumDataNodes = minNumDataNodes; maxNumDataNodes = minNumDataNodes;
masterNodes = false; clusterManagerNodes = false;
bootstrapMasterNodeIndex = maxNumDataNodes == 0 ? -1 : randomIntBetween(0, maxNumDataNodes - 1); bootstrapClusterManagerNodeIndex = maxNumDataNodes == 0 ? -1 : randomIntBetween(0, maxNumDataNodes - 1);
} }
final int numClientNodes = randomIntBetween(0, 2); final int numClientNodes = randomIntBetween(0, 2);
NodeConfigurationSource nodeConfigurationSource = new NodeConfigurationSource() { NodeConfigurationSource nodeConfigurationSource = new NodeConfigurationSource() {
@ -191,9 +191,9 @@ public class InternalTestClusterTests extends OpenSearchTestCase {
.put(DiscoveryModule.DISCOVERY_SEED_PROVIDERS_SETTING.getKey(), "file") .put(DiscoveryModule.DISCOVERY_SEED_PROVIDERS_SETTING.getKey(), "file")
.putList(SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING.getKey()) .putList(SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING.getKey())
.put(NetworkModule.TRANSPORT_TYPE_KEY, getTestTransportType()); .put(NetworkModule.TRANSPORT_TYPE_KEY, getTestTransportType());
if (autoManageMinMasterNodes == false) { if (autoManageMinClusterManagerNodes == false) {
assert minNumDataNodes == maxNumDataNodes; assert minNumDataNodes == maxNumDataNodes;
assert masterNodes == false; assert clusterManagerNodes == false;
} }
return settings.build(); return settings.build();
} }
@ -209,8 +209,8 @@ public class InternalTestClusterTests extends OpenSearchTestCase {
InternalTestCluster cluster0 = new InternalTestCluster( InternalTestCluster cluster0 = new InternalTestCluster(
clusterSeed, clusterSeed,
createTempDir(), createTempDir(),
masterNodes, clusterManagerNodes,
autoManageMinMasterNodes, autoManageMinClusterManagerNodes,
minNumDataNodes, minNumDataNodes,
maxNumDataNodes, maxNumDataNodes,
"clustername", "clustername",
@ -220,13 +220,13 @@ public class InternalTestClusterTests extends OpenSearchTestCase {
mockPlugins(), mockPlugins(),
Function.identity() Function.identity()
); );
cluster0.setBootstrapMasterNodeIndex(bootstrapMasterNodeIndex); cluster0.setBootstrapClusterManagerNodeIndex(bootstrapClusterManagerNodeIndex);
InternalTestCluster cluster1 = new InternalTestCluster( InternalTestCluster cluster1 = new InternalTestCluster(
clusterSeed, clusterSeed,
createTempDir(), createTempDir(),
masterNodes, clusterManagerNodes,
autoManageMinMasterNodes, autoManageMinClusterManagerNodes,
minNumDataNodes, minNumDataNodes,
maxNumDataNodes, maxNumDataNodes,
"clustername", "clustername",
@ -236,7 +236,7 @@ public class InternalTestClusterTests extends OpenSearchTestCase {
mockPlugins(), mockPlugins(),
Function.identity() Function.identity()
); );
cluster1.setBootstrapMasterNodeIndex(bootstrapMasterNodeIndex); cluster1.setBootstrapClusterManagerNodeIndex(bootstrapClusterManagerNodeIndex);
assertClusters(cluster0, cluster1, false); assertClusters(cluster0, cluster1, false);
long seed = randomLong(); long seed = randomLong();
@ -265,7 +265,7 @@ public class InternalTestClusterTests extends OpenSearchTestCase {
public void testDataFolderAssignmentAndCleaning() throws IOException, InterruptedException { public void testDataFolderAssignmentAndCleaning() throws IOException, InterruptedException {
long clusterSeed = randomLong(); long clusterSeed = randomLong();
boolean masterNodes = randomBoolean(); boolean clusterManagerNodes = randomBoolean();
// we need one stable node // we need one stable node
final int minNumDataNodes = 2; final int minNumDataNodes = 2;
final int maxNumDataNodes = 2; final int maxNumDataNodes = 2;
@ -291,7 +291,7 @@ public class InternalTestClusterTests extends OpenSearchTestCase {
InternalTestCluster cluster = new InternalTestCluster( InternalTestCluster cluster = new InternalTestCluster(
clusterSeed, clusterSeed,
baseDir, baseDir,
masterNodes, clusterManagerNodes,
true, true,
minNumDataNodes, minNumDataNodes,
maxNumDataNodes, maxNumDataNodes,
@ -304,13 +304,13 @@ public class InternalTestClusterTests extends OpenSearchTestCase {
); );
try { try {
cluster.beforeTest(random()); cluster.beforeTest(random());
final int originalMasterCount = cluster.numMasterNodes(); final int originalClusterManagerCount = cluster.numMasterNodes();
final Map<String, Path[]> shardNodePaths = new HashMap<>(); final Map<String, Path[]> shardNodePaths = new HashMap<>();
for (String name : cluster.getNodeNames()) { for (String name : cluster.getNodeNames()) {
shardNodePaths.put(name, getNodePaths(cluster, name)); shardNodePaths.put(name, getNodePaths(cluster, name));
} }
String poorNode = randomValueOtherThanMany( String poorNode = randomValueOtherThanMany(
n -> originalMasterCount == 1 && n.equals(cluster.getMasterName()), n -> originalClusterManagerCount == 1 && n.equals(cluster.getMasterName()),
() -> randomFrom(cluster.getNodeNames()) () -> randomFrom(cluster.getNodeNames())
); );
Path dataPath = getNodePaths(cluster, poorNode)[0]; Path dataPath = getNodePaths(cluster, poorNode)[0];
@ -409,7 +409,7 @@ public class InternalTestClusterTests extends OpenSearchTestCase {
roles.add(role); roles.add(role);
} }
cluster.setBootstrapMasterNodeIndex( cluster.setBootstrapClusterManagerNodeIndex(
randomIntBetween(0, (int) roles.stream().filter(role -> role.equals(clusterManagerRole)).count() - 1) randomIntBetween(0, (int) roles.stream().filter(role -> role.equals(clusterManagerRole)).count() - 1)
); );
@ -419,7 +419,7 @@ public class InternalTestClusterTests extends OpenSearchTestCase {
final DiscoveryNodeRole role = roles.get(i); final DiscoveryNodeRole role = roles.get(i);
final String node; final String node;
if (role == clusterManagerRole) { if (role == clusterManagerRole) {
node = cluster.startMasterOnlyNode(); node = cluster.startClusterManagerOnlyNode();
} else if (role == DiscoveryNodeRole.DATA_ROLE) { } else if (role == DiscoveryNodeRole.DATA_ROLE) {
node = cluster.startDataOnlyNode(); node = cluster.startDataOnlyNode();
} else if (role == DiscoveryNodeRole.INGEST_ROLE) { } else if (role == DiscoveryNodeRole.INGEST_ROLE) {