Replace internal usages of 'master' term in 'server/src/test' directory (#2520)
* Replace the non-inclusive terminology "master" with "cluster manager" in code comments, internal variable/method/class names, in `server/src/test` directory. * Backwards compatibility is not impacted. * Add a new unit test `testDeprecatedMasterNodeFilter()` to validate using `master:true` or `master:false` can filter the node in [Cluster Stats](https://opensearch.org/docs/latest/opensearch/rest-api/cluster-stats/) API, after the `master` role is deprecated in PR https://github.com/opensearch-project/OpenSearch/pull/2424 Signed-off-by: Tianli Feng <ftianli@amazon.com>
This commit is contained in:
parent
509978012b
commit
296fa092c2
|
@ -386,7 +386,7 @@ public class ClusterDisruptionIT extends AbstractDisruptionTestCase {
|
|||
);
|
||||
|
||||
if (isolatedNode.equals(nonClusterManagerNode)) {
|
||||
assertNoMaster(nonClusterManagerNode);
|
||||
assertNoClusterManager(nonClusterManagerNode);
|
||||
} else {
|
||||
ensureStableCluster(2, nonClusterManagerNode);
|
||||
}
|
||||
|
|
|
@ -91,7 +91,7 @@ public class ClusterManagerDisruptionIT extends AbstractDisruptionTestCase {
|
|||
|
||||
logger.info("waiting for nodes to de-elect cluster-manager [{}]", oldClusterManagerNode);
|
||||
for (String node : oldNonClusterManagerNodesSet) {
|
||||
assertDifferentMaster(node, oldClusterManagerNode);
|
||||
assertDifferentClusterManager(node, oldClusterManagerNode);
|
||||
}
|
||||
|
||||
logger.info("waiting for nodes to elect a new cluster-manager");
|
||||
|
@ -107,7 +107,7 @@ public class ClusterManagerDisruptionIT extends AbstractDisruptionTestCase {
|
|||
// make sure all nodes agree on cluster-manager
|
||||
String newClusterManager = internalCluster().getMasterName();
|
||||
assertThat(newClusterManager, not(equalTo(oldClusterManagerNode)));
|
||||
assertMaster(newClusterManager, nodes);
|
||||
assertClusterManager(newClusterManager, nodes);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -137,7 +137,7 @@ public class ClusterManagerDisruptionIT extends AbstractDisruptionTestCase {
|
|||
ensureStableCluster(2, nonIsolatedNode);
|
||||
|
||||
// make sure isolated need picks up on things.
|
||||
assertNoMaster(isolatedNode, TimeValue.timeValueSeconds(40));
|
||||
assertNoClusterManager(isolatedNode, TimeValue.timeValueSeconds(40));
|
||||
|
||||
// restore isolation
|
||||
networkDisruption.stopDisrupting();
|
||||
|
@ -227,7 +227,7 @@ public class ClusterManagerDisruptionIT extends AbstractDisruptionTestCase {
|
|||
// continuously ping until network failures have been resolved. However
|
||||
// It may a take a bit before the node detects it has been cut off from the elected cluster-manager
|
||||
logger.info("waiting for isolated node [{}] to have no cluster-manager", isolatedNode);
|
||||
assertNoMaster(isolatedNode, NoMasterBlockService.NO_MASTER_BLOCK_WRITES, TimeValue.timeValueSeconds(30));
|
||||
assertNoClusterManager(isolatedNode, NoMasterBlockService.NO_MASTER_BLOCK_WRITES, TimeValue.timeValueSeconds(30));
|
||||
|
||||
logger.info("wait until elected cluster-manager has been removed and a new 2 node cluster was from (via [{}])", isolatedNode);
|
||||
ensureStableCluster(2, nonIsolatedNode);
|
||||
|
@ -273,7 +273,7 @@ public class ClusterManagerDisruptionIT extends AbstractDisruptionTestCase {
|
|||
// continuously ping until network failures have been resolved. However
|
||||
// It may a take a bit before the node detects it has been cut off from the elected cluster-manager
|
||||
logger.info("waiting for isolated node [{}] to have no cluster-manager", isolatedNode);
|
||||
assertNoMaster(isolatedNode, NoMasterBlockService.NO_MASTER_BLOCK_ALL, TimeValue.timeValueSeconds(30));
|
||||
assertNoClusterManager(isolatedNode, NoMasterBlockService.NO_MASTER_BLOCK_ALL, TimeValue.timeValueSeconds(30));
|
||||
|
||||
// make sure we have stable cluster & cross partition recoveries are canceled by the removal of the missing node
|
||||
// the unresponsive partition causes recoveries to only time out after 15m (default) and these will cause
|
||||
|
|
|
@ -81,7 +81,7 @@ public class DiscoveryDisruptionIT extends AbstractDisruptionTestCase {
|
|||
);
|
||||
nonClusterManagerTransportService.addFailToSendNoConnectRule(clusterManagerTranspotService);
|
||||
|
||||
assertNoMaster(nonClusterManagerNode);
|
||||
assertNoClusterManager(nonClusterManagerNode);
|
||||
|
||||
logger.info(
|
||||
"blocking cluster state publishing from cluster-manager [{}] to non cluster-manager [{}]",
|
||||
|
@ -166,7 +166,7 @@ public class DiscoveryDisruptionIT extends AbstractDisruptionTestCase {
|
|||
logger.info("--> forcing a complete election to make sure \"preferred\" cluster-manager is elected");
|
||||
isolateAllNodes.startDisrupting();
|
||||
for (String node : nodes) {
|
||||
assertNoMaster(node);
|
||||
assertNoClusterManager(node);
|
||||
}
|
||||
internalCluster().clearDisruptionScheme();
|
||||
ensureStableCluster(3);
|
||||
|
@ -194,7 +194,7 @@ public class DiscoveryDisruptionIT extends AbstractDisruptionTestCase {
|
|||
logger.info("--> forcing a complete election again");
|
||||
isolateAllNodes.startDisrupting();
|
||||
for (String node : nodes) {
|
||||
assertNoMaster(node);
|
||||
assertNoClusterManager(node);
|
||||
}
|
||||
|
||||
isolateAllNodes.stopDisrupting();
|
||||
|
@ -242,7 +242,7 @@ public class DiscoveryDisruptionIT extends AbstractDisruptionTestCase {
|
|||
ensureStableCluster(2, clusterManagerNode);
|
||||
|
||||
logger.info("waiting for [{}] to have no cluster-manager", nonClusterManagerNode);
|
||||
assertNoMaster(nonClusterManagerNode);
|
||||
assertNoClusterManager(nonClusterManagerNode);
|
||||
|
||||
logger.info("healing partition and checking cluster reforms");
|
||||
clusterManagerTransportService.clearAllRules();
|
||||
|
|
|
@ -252,7 +252,7 @@ public class TransportAddVotingConfigExclusionsActionTests extends OpenSearchTes
|
|||
assertWarnings(AddVotingConfigExclusionsRequest.DEPRECATION_MESSAGE);
|
||||
}
|
||||
|
||||
public void testWithdrawsVotesFromAllMasterEligibleNodes() throws InterruptedException {
|
||||
public void testWithdrawsVotesFromAllClusterManagerEligibleNodes() throws InterruptedException {
|
||||
final CountDownLatch countDownLatch = new CountDownLatch(2);
|
||||
|
||||
clusterStateObserver.waitForNextChange(new AdjustConfigurationForExclusions(countDownLatch));
|
||||
|
@ -349,14 +349,14 @@ public class TransportAddVotingConfigExclusionsActionTests extends OpenSearchTes
|
|||
assertWarnings(AddVotingConfigExclusionsRequest.DEPRECATION_MESSAGE);
|
||||
}
|
||||
|
||||
public void testOnlyMatchesMasterEligibleNodes() throws InterruptedException {
|
||||
public void testOnlyMatchesClusterManagerEligibleNodes() throws InterruptedException {
|
||||
final CountDownLatch countDownLatch = new CountDownLatch(1);
|
||||
final SetOnce<TransportException> exceptionHolder = new SetOnce<>();
|
||||
|
||||
transportService.sendRequest(
|
||||
localNode,
|
||||
AddVotingConfigExclusionsAction.NAME,
|
||||
makeRequestWithNodeDescriptions("_all", "master:false"),
|
||||
makeRequestWithNodeDescriptions("_all", "cluster_manager:false"),
|
||||
expectError(e -> {
|
||||
exceptionHolder.set(e);
|
||||
countDownLatch.countDown();
|
||||
|
@ -368,7 +368,7 @@ public class TransportAddVotingConfigExclusionsActionTests extends OpenSearchTes
|
|||
assertThat(rootCause, instanceOf(IllegalArgumentException.class));
|
||||
assertThat(
|
||||
rootCause.getMessage(),
|
||||
equalTo("add voting config exclusions request for [_all, master:false] matched no cluster-manager-eligible nodes")
|
||||
equalTo("add voting config exclusions request for [_all, cluster_manager:false] matched no cluster-manager-eligible nodes")
|
||||
);
|
||||
assertWarnings(AddVotingConfigExclusionsRequest.DEPRECATION_MESSAGE);
|
||||
}
|
||||
|
|
|
@ -110,9 +110,9 @@ public class ClusterHealthResponsesTests extends AbstractSerializingTestCase<Clu
|
|||
assertThat(clusterHealth.getActiveShardsPercent(), is(allOf(greaterThanOrEqualTo(0.0), lessThanOrEqualTo(100.0))));
|
||||
}
|
||||
|
||||
public void testClusterHealthVerifyMasterNodeDiscovery() throws IOException {
|
||||
public void testClusterHealthVerifyClusterManagerNodeDiscovery() throws IOException {
|
||||
DiscoveryNode localNode = new DiscoveryNode("node", OpenSearchTestCase.buildNewFakeTransportAddress(), Version.CURRENT);
|
||||
// set the node information to verify master_node discovery in ClusterHealthResponse
|
||||
// set the node information to verify cluster_manager_node discovery in ClusterHealthResponse
|
||||
ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
|
||||
.nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).masterNodeId(localNode.getId()))
|
||||
.build();
|
||||
|
|
|
@ -474,12 +474,12 @@ public class CancellableTasksTests extends TaskManagerTestCase {
|
|||
for (int i = 1; i < testNodes.length; i++) {
|
||||
discoveryNodes[i - 1] = testNodes[i].discoveryNode();
|
||||
}
|
||||
DiscoveryNode master = discoveryNodes[0];
|
||||
DiscoveryNode clusterManager = discoveryNodes[0];
|
||||
for (int i = 1; i < testNodes.length; i++) {
|
||||
// Notify only nodes that should remain in the cluster
|
||||
setState(
|
||||
testNodes[i].clusterService,
|
||||
ClusterStateCreationUtils.state(testNodes[i].discoveryNode(), master, discoveryNodes)
|
||||
ClusterStateCreationUtils.state(testNodes[i].discoveryNode(), clusterManager, discoveryNodes)
|
||||
);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
|
|
|
@ -257,9 +257,9 @@ public abstract class TaskManagerTestCase extends OpenSearchTestCase {
|
|||
for (int i = 0; i < nodes.length; i++) {
|
||||
discoveryNodes[i] = nodes[i].discoveryNode();
|
||||
}
|
||||
DiscoveryNode master = discoveryNodes[0];
|
||||
DiscoveryNode clusterManager = discoveryNodes[0];
|
||||
for (TestNode node : nodes) {
|
||||
setState(node.clusterService, ClusterStateCreationUtils.state(node.discoveryNode(), master, discoveryNodes));
|
||||
setState(node.clusterService, ClusterStateCreationUtils.state(node.discoveryNode(), clusterManager, discoveryNodes));
|
||||
}
|
||||
for (TestNode nodeA : nodes) {
|
||||
for (TestNode nodeB : nodes) {
|
||||
|
|
|
@ -161,7 +161,7 @@ public class ClusterRerouteRequestTests extends OpenSearchTestCase {
|
|||
assertEquals(request, copy);
|
||||
assertEquals(request.hashCode(), copy.hashCode());
|
||||
|
||||
// Changing masterNodeTime makes requests not equal
|
||||
// Changing clusterManagerNodeTimeout makes requests not equal
|
||||
copy.masterNodeTimeout(timeValueMillis(request.masterNodeTimeout().millis() + 1));
|
||||
assertNotEquals(request, copy);
|
||||
assertNotEquals(request.hashCode(), copy.hashCode());
|
||||
|
|
|
@ -260,7 +260,7 @@ public class TransportMultiSearchActionTests extends OpenSearchTestCase {
|
|||
}
|
||||
builder.add(
|
||||
new DiscoveryNode(
|
||||
"master",
|
||||
"cluster_manager",
|
||||
buildNewFakeTransportAddress(),
|
||||
Collections.emptyMap(),
|
||||
Collections.singleton(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE),
|
||||
|
|
|
@ -366,17 +366,17 @@ public class TransportBroadcastByNodeActionTests extends OpenSearchTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
// simulate the master being removed from the cluster but before a new master is elected
|
||||
// as such, the shards assigned to the master will still show up in the cluster state as assigned to a node but
|
||||
// that node will not be in the local cluster state on any node that has detected the master as failing
|
||||
// simulate the cluster-manager being removed from the cluster but before a new cluster-manager is elected
|
||||
// as such, the shards assigned to the cluster-manager will still show up in the cluster state as assigned to a node but
|
||||
// that node will not be in the local cluster state on any node that has detected the cluster-manager as failing
|
||||
// in this case, such a shard should be treated as unassigned
|
||||
public void testRequestsAreNotSentToFailedMaster() {
|
||||
public void testRequestsAreNotSentToFailedClusterManager() {
|
||||
Request request = new Request(new String[] { TEST_INDEX });
|
||||
PlainActionFuture<Response> listener = new PlainActionFuture<>();
|
||||
|
||||
DiscoveryNode masterNode = clusterService.state().nodes().getMasterNode();
|
||||
DiscoveryNode clusterManagerNode = clusterService.state().nodes().getMasterNode();
|
||||
DiscoveryNodes.Builder builder = DiscoveryNodes.builder(clusterService.state().getNodes());
|
||||
builder.remove(masterNode.getId());
|
||||
builder.remove(clusterManagerNode.getId());
|
||||
|
||||
setState(clusterService, ClusterState.builder(clusterService.state()).nodes(builder));
|
||||
|
||||
|
@ -384,11 +384,11 @@ public class TransportBroadcastByNodeActionTests extends OpenSearchTestCase {
|
|||
|
||||
Map<String, List<CapturingTransport.CapturedRequest>> capturedRequests = transport.getCapturedRequestsByTargetNodeAndClear();
|
||||
|
||||
// the master should not be in the list of nodes that requests were sent to
|
||||
// the cluster manager should not be in the list of nodes that requests were sent to
|
||||
ShardsIterator shardIt = clusterService.state().routingTable().allShards(new String[] { TEST_INDEX });
|
||||
Set<String> set = new HashSet<>();
|
||||
for (ShardRouting shard : shardIt) {
|
||||
if (!shard.currentNodeId().equals(masterNode.getId())) {
|
||||
if (!shard.currentNodeId().equals(clusterManagerNode.getId())) {
|
||||
set.add(shard.currentNodeId());
|
||||
}
|
||||
}
|
||||
|
@ -399,7 +399,7 @@ public class TransportBroadcastByNodeActionTests extends OpenSearchTestCase {
|
|||
// check requests were sent to the right nodes
|
||||
assertEquals(set, capturedRequests.keySet());
|
||||
for (Map.Entry<String, List<CapturingTransport.CapturedRequest>> entry : capturedRequests.entrySet()) {
|
||||
// check one request was sent to each non-master node
|
||||
// check one request was sent to each non-cluster-manager node
|
||||
assertEquals(1, entry.getValue().size());
|
||||
}
|
||||
}
|
||||
|
@ -456,13 +456,13 @@ public class TransportBroadcastByNodeActionTests extends OpenSearchTestCase {
|
|||
Request request = new Request(new String[] { TEST_INDEX });
|
||||
PlainActionFuture<Response> listener = new PlainActionFuture<>();
|
||||
|
||||
// simulate removing the master
|
||||
final boolean simulateFailedMasterNode = rarely();
|
||||
DiscoveryNode failedMasterNode = null;
|
||||
if (simulateFailedMasterNode) {
|
||||
failedMasterNode = clusterService.state().nodes().getMasterNode();
|
||||
// simulate removing the cluster-manager
|
||||
final boolean simulateFailedClusterManagerNode = rarely();
|
||||
DiscoveryNode failedClusterManagerNode = null;
|
||||
if (simulateFailedClusterManagerNode) {
|
||||
failedClusterManagerNode = clusterService.state().nodes().getMasterNode();
|
||||
DiscoveryNodes.Builder builder = DiscoveryNodes.builder(clusterService.state().getNodes());
|
||||
builder.remove(failedMasterNode.getId());
|
||||
builder.remove(failedClusterManagerNode.getId());
|
||||
builder.masterNodeId(null);
|
||||
|
||||
setState(clusterService, ClusterState.builder(clusterService.state()).nodes(builder));
|
||||
|
@ -511,8 +511,8 @@ public class TransportBroadcastByNodeActionTests extends OpenSearchTestCase {
|
|||
transport.handleResponse(requestId, nodeResponse);
|
||||
}
|
||||
}
|
||||
if (simulateFailedMasterNode) {
|
||||
totalShards += map.get(failedMasterNode.getId()).size();
|
||||
if (simulateFailedClusterManagerNode) {
|
||||
totalShards += map.get(failedClusterManagerNode.getId()).size();
|
||||
}
|
||||
|
||||
Response response = listener.get();
|
||||
|
|
|
@ -240,7 +240,7 @@ public class TransportMasterNodeActionTests extends OpenSearchTestCase {
|
|||
}
|
||||
|
||||
public void testLocalOperationWithoutBlocks() throws ExecutionException, InterruptedException {
|
||||
final boolean masterOperationFailure = randomBoolean();
|
||||
final boolean clusterManagerOperationFailure = randomBoolean();
|
||||
|
||||
Request request = new Request();
|
||||
PlainActionFuture<Response> listener = new PlainActionFuture<>();
|
||||
|
@ -253,7 +253,7 @@ public class TransportMasterNodeActionTests extends OpenSearchTestCase {
|
|||
new Action("internal:testAction", transportService, clusterService, threadPool) {
|
||||
@Override
|
||||
protected void masterOperation(Task task, Request request, ClusterState state, ActionListener<Response> listener) {
|
||||
if (masterOperationFailure) {
|
||||
if (clusterManagerOperationFailure) {
|
||||
listener.onFailure(exception);
|
||||
} else {
|
||||
listener.onResponse(response);
|
||||
|
@ -262,7 +262,7 @@ public class TransportMasterNodeActionTests extends OpenSearchTestCase {
|
|||
}.execute(request, listener);
|
||||
assertTrue(listener.isDone());
|
||||
|
||||
if (masterOperationFailure) {
|
||||
if (clusterManagerOperationFailure) {
|
||||
try {
|
||||
listener.get();
|
||||
fail("Expected exception but returned proper result");
|
||||
|
@ -376,7 +376,7 @@ public class TransportMasterNodeActionTests extends OpenSearchTestCase {
|
|||
listener.get();
|
||||
}
|
||||
|
||||
public void testMasterNotAvailable() throws ExecutionException, InterruptedException {
|
||||
public void testClusterManagerNotAvailable() throws ExecutionException, InterruptedException {
|
||||
Request request = new Request().masterNodeTimeout(TimeValue.timeValueSeconds(0));
|
||||
setState(clusterService, ClusterStateCreationUtils.state(localNode, null, allNodes));
|
||||
PlainActionFuture<Response> listener = new PlainActionFuture<>();
|
||||
|
@ -385,7 +385,7 @@ public class TransportMasterNodeActionTests extends OpenSearchTestCase {
|
|||
assertListenerThrows("MasterNotDiscoveredException should be thrown", listener, MasterNotDiscoveredException.class);
|
||||
}
|
||||
|
||||
public void testMasterBecomesAvailable() throws ExecutionException, InterruptedException {
|
||||
public void testClusterManagerBecomesAvailable() throws ExecutionException, InterruptedException {
|
||||
Request request = new Request();
|
||||
setState(clusterService, ClusterStateCreationUtils.state(localNode, null, allNodes));
|
||||
PlainActionFuture<Response> listener = new PlainActionFuture<>();
|
||||
|
@ -396,7 +396,7 @@ public class TransportMasterNodeActionTests extends OpenSearchTestCase {
|
|||
listener.get();
|
||||
}
|
||||
|
||||
public void testDelegateToMaster() throws ExecutionException, InterruptedException {
|
||||
public void testDelegateToClusterManager() throws ExecutionException, InterruptedException {
|
||||
Request request = new Request();
|
||||
setState(clusterService, ClusterStateCreationUtils.state(localNode, remoteNode, allNodes));
|
||||
|
||||
|
@ -415,15 +415,15 @@ public class TransportMasterNodeActionTests extends OpenSearchTestCase {
|
|||
assertThat(listener.get(), equalTo(response));
|
||||
}
|
||||
|
||||
public void testDelegateToFailingMaster() throws ExecutionException, InterruptedException {
|
||||
public void testDelegateToFailingClusterManager() throws ExecutionException, InterruptedException {
|
||||
boolean failsWithConnectTransportException = randomBoolean();
|
||||
boolean rejoinSameMaster = failsWithConnectTransportException && randomBoolean();
|
||||
boolean rejoinSameClusterManager = failsWithConnectTransportException && randomBoolean();
|
||||
Request request = new Request().masterNodeTimeout(TimeValue.timeValueSeconds(failsWithConnectTransportException ? 60 : 0));
|
||||
DiscoveryNode masterNode = this.remoteNode;
|
||||
DiscoveryNode clusterManagerNode = this.remoteNode;
|
||||
setState(
|
||||
clusterService,
|
||||
// use a random base version so it can go down when simulating a restart.
|
||||
ClusterState.builder(ClusterStateCreationUtils.state(localNode, masterNode, allNodes)).version(randomIntBetween(0, 10))
|
||||
ClusterState.builder(ClusterStateCreationUtils.state(localNode, clusterManagerNode, allNodes)).version(randomIntBetween(0, 10))
|
||||
);
|
||||
|
||||
PlainActionFuture<Response> listener = new PlainActionFuture<>();
|
||||
|
@ -436,14 +436,16 @@ public class TransportMasterNodeActionTests extends OpenSearchTestCase {
|
|||
assertThat(capturedRequest.request, equalTo(request));
|
||||
assertThat(capturedRequest.action, equalTo("internal:testAction"));
|
||||
|
||||
if (rejoinSameMaster) {
|
||||
if (rejoinSameClusterManager) {
|
||||
transport.handleRemoteError(
|
||||
capturedRequest.requestId,
|
||||
randomBoolean() ? new ConnectTransportException(masterNode, "Fake error") : new NodeClosedException(masterNode)
|
||||
randomBoolean()
|
||||
? new ConnectTransportException(clusterManagerNode, "Fake error")
|
||||
: new NodeClosedException(clusterManagerNode)
|
||||
);
|
||||
assertFalse(listener.isDone());
|
||||
if (randomBoolean()) {
|
||||
// simulate master node removal
|
||||
// simulate cluster-manager node removal
|
||||
final DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(clusterService.state().nodes());
|
||||
nodesBuilder.masterNodeId(null);
|
||||
setState(clusterService, ClusterState.builder(clusterService.state()).nodes(nodesBuilder));
|
||||
|
@ -452,15 +454,19 @@ public class TransportMasterNodeActionTests extends OpenSearchTestCase {
|
|||
// reset the same state to increment a version simulating a join of an existing node
|
||||
// simulating use being disconnected
|
||||
final DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(clusterService.state().nodes());
|
||||
nodesBuilder.masterNodeId(masterNode.getId());
|
||||
nodesBuilder.masterNodeId(clusterManagerNode.getId());
|
||||
setState(clusterService, ClusterState.builder(clusterService.state()).nodes(nodesBuilder));
|
||||
} else {
|
||||
// simulate master restart followed by a state recovery - this will reset the cluster state version
|
||||
// simulate cluster-manager restart followed by a state recovery - this will reset the cluster state version
|
||||
final DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(clusterService.state().nodes());
|
||||
nodesBuilder.remove(masterNode);
|
||||
masterNode = new DiscoveryNode(masterNode.getId(), masterNode.getAddress(), masterNode.getVersion());
|
||||
nodesBuilder.add(masterNode);
|
||||
nodesBuilder.masterNodeId(masterNode.getId());
|
||||
nodesBuilder.remove(clusterManagerNode);
|
||||
clusterManagerNode = new DiscoveryNode(
|
||||
clusterManagerNode.getId(),
|
||||
clusterManagerNode.getAddress(),
|
||||
clusterManagerNode.getVersion()
|
||||
);
|
||||
nodesBuilder.add(clusterManagerNode);
|
||||
nodesBuilder.masterNodeId(clusterManagerNode.getId());
|
||||
final ClusterState.Builder builder = ClusterState.builder(clusterService.state()).nodes(nodesBuilder);
|
||||
setState(clusterService, builder.version(0));
|
||||
}
|
||||
|
@ -472,7 +478,7 @@ public class TransportMasterNodeActionTests extends OpenSearchTestCase {
|
|||
assertThat(capturedRequest.request, equalTo(request));
|
||||
assertThat(capturedRequest.action, equalTo("internal:testAction"));
|
||||
} else if (failsWithConnectTransportException) {
|
||||
transport.handleRemoteError(capturedRequest.requestId, new ConnectTransportException(masterNode, "Fake error"));
|
||||
transport.handleRemoteError(capturedRequest.requestId, new ConnectTransportException(clusterManagerNode, "Fake error"));
|
||||
assertFalse(listener.isDone());
|
||||
setState(clusterService, ClusterStateCreationUtils.state(localNode, localNode, allNodes));
|
||||
assertTrue(listener.isDone());
|
||||
|
@ -495,7 +501,7 @@ public class TransportMasterNodeActionTests extends OpenSearchTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testMasterFailoverAfterStepDown() throws ExecutionException, InterruptedException {
|
||||
public void testClusterManagerFailoverAfterStepDown() throws ExecutionException, InterruptedException {
|
||||
Request request = new Request().masterNodeTimeout(TimeValue.timeValueHours(1));
|
||||
PlainActionFuture<Response> listener = new PlainActionFuture<>();
|
||||
|
||||
|
@ -506,7 +512,8 @@ public class TransportMasterNodeActionTests extends OpenSearchTestCase {
|
|||
new Action("internal:testAction", transportService, clusterService, threadPool) {
|
||||
@Override
|
||||
protected void masterOperation(Request request, ClusterState state, ActionListener<Response> listener) throws Exception {
|
||||
// The other node has become master, simulate failures of this node while publishing cluster state through ZenDiscovery
|
||||
// The other node has become cluster-manager, simulate failures of this node while publishing cluster state through
|
||||
// ZenDiscovery
|
||||
setState(clusterService, ClusterStateCreationUtils.state(localNode, remoteNode, allNodes));
|
||||
Exception failure = randomBoolean()
|
||||
? new FailedToCommitClusterStateException("Fake error")
|
||||
|
@ -526,8 +533,8 @@ public class TransportMasterNodeActionTests extends OpenSearchTestCase {
|
|||
assertThat(listener.get(), equalTo(response));
|
||||
}
|
||||
|
||||
// Validate TransportMasterNodeAction.testDelegateToMaster() works correctly on node with the deprecated MASTER_ROLE.
|
||||
public void testDelegateToMasterOnNodeWithDeprecatedMasterRole() throws ExecutionException, InterruptedException {
|
||||
// Validate TransportMasterNodeAction.testDelegateToClusterManager() works correctly on node with the deprecated MASTER_ROLE.
|
||||
public void testDelegateToClusterManagerOnNodeWithDeprecatedMasterRole() throws ExecutionException, InterruptedException {
|
||||
DiscoveryNode localNode = new DiscoveryNode(
|
||||
"local_node",
|
||||
buildNewFakeTransportAddress(),
|
||||
|
|
|
@ -42,13 +42,13 @@ public class TransportMasterNodeActionUtils {
|
|||
* Allows to directly call {@link TransportMasterNodeAction#masterOperation(MasterNodeRequest, ClusterState, ActionListener)} which is
|
||||
* a protected method.
|
||||
*/
|
||||
public static <Request extends MasterNodeRequest<Request>, Response extends ActionResponse> void runMasterOperation(
|
||||
TransportMasterNodeAction<Request, Response> masterNodeAction,
|
||||
public static <Request extends MasterNodeRequest<Request>, Response extends ActionResponse> void runClusterManagerOperation(
|
||||
TransportMasterNodeAction<Request, Response> clusterManagerNodeAction,
|
||||
Request request,
|
||||
ClusterState clusterState,
|
||||
ActionListener<Response> actionListener
|
||||
) throws Exception {
|
||||
assert masterNodeAction.checkBlock(request, clusterState) == null;
|
||||
masterNodeAction.masterOperation(request, clusterState, actionListener);
|
||||
assert clusterManagerNodeAction.checkBlock(request, clusterState) == null;
|
||||
clusterManagerNodeAction.masterOperation(request, clusterState, actionListener);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -107,19 +107,19 @@ public class ClusterChangedEventTests extends OpenSearchTestCase {
|
|||
}
|
||||
|
||||
/**
|
||||
* Test whether the ClusterChangedEvent returns the correct value for whether the local node is master,
|
||||
* Test whether the ClusterChangedEvent returns the correct value for whether the local node is cluster-manager,
|
||||
* based on what was set on the cluster state.
|
||||
*/
|
||||
public void testLocalNodeIsMaster() {
|
||||
public void testLocalNodeIsClusterManager() {
|
||||
final int numNodesInCluster = 3;
|
||||
ClusterState previousState = createSimpleClusterState();
|
||||
ClusterState newState = createState(numNodesInCluster, true, initialIndices);
|
||||
ClusterChangedEvent event = new ClusterChangedEvent("_na_", newState, previousState);
|
||||
assertTrue("local node should be master", event.localNodeMaster());
|
||||
assertTrue("local node should be cluster-manager", event.localNodeMaster());
|
||||
|
||||
newState = createState(numNodesInCluster, false, initialIndices);
|
||||
event = new ClusterChangedEvent("_na_", newState, previousState);
|
||||
assertFalse("local node should not be master", event.localNodeMaster());
|
||||
assertFalse("local node should not be cluster-manager", event.localNodeMaster());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -314,8 +314,8 @@ public class ClusterChangedEventTests extends OpenSearchTestCase {
|
|||
assertTrue(changedCustomMetadataTypeSet.contains(customMetadata1.getWriteableName()));
|
||||
}
|
||||
|
||||
// Validate the above test case testLocalNodeIsMaster() passes when the deprecated 'master' role is assigned to the local node.
|
||||
public void testLocalNodeIsMasterWithDeprecatedMasterRole() {
|
||||
// Validate the above test case testLocalNodeIsClusterManager() passes when the deprecated 'master' role is assigned to the local node.
|
||||
public void testLocalNodeIsClusterManagerWithDeprecatedMasterRole() {
|
||||
final DiscoveryNodes.Builder builderLocalIsMaster = DiscoveryNodes.builder();
|
||||
final DiscoveryNode node0 = newNode("node_0", Set.of(DiscoveryNodeRole.MASTER_ROLE));
|
||||
final DiscoveryNode node1 = newNode("node_1", Set.of(DiscoveryNodeRole.DATA_ROLE));
|
||||
|
@ -390,18 +390,18 @@ public class ClusterChangedEventTests extends OpenSearchTestCase {
|
|||
}
|
||||
|
||||
// Create a basic cluster state with a given set of indices
|
||||
private static ClusterState createState(final int numNodes, final boolean isLocalMaster, final List<Index> indices) {
|
||||
private static ClusterState createState(final int numNodes, final boolean isLocalClusterManager, final List<Index> indices) {
|
||||
final Metadata metadata = createMetadata(indices);
|
||||
return ClusterState.builder(TEST_CLUSTER_NAME)
|
||||
.nodes(createDiscoveryNodes(numNodes, isLocalMaster))
|
||||
.nodes(createDiscoveryNodes(numNodes, isLocalClusterManager))
|
||||
.metadata(metadata)
|
||||
.routingTable(createRoutingTable(1, metadata))
|
||||
.build();
|
||||
}
|
||||
|
||||
// Create a non-initialized cluster state
|
||||
private static ClusterState createNonInitializedState(final int numNodes, final boolean isLocalMaster) {
|
||||
final ClusterState withoutBlock = createState(numNodes, isLocalMaster, Collections.emptyList());
|
||||
private static ClusterState createNonInitializedState(final int numNodes, final boolean isLocalClusterManager) {
|
||||
final ClusterState withoutBlock = createState(numNodes, isLocalClusterManager, Collections.emptyList());
|
||||
return ClusterState.builder(withoutBlock)
|
||||
.blocks(ClusterBlocks.builder().addGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK).build())
|
||||
.build();
|
||||
|
@ -463,28 +463,29 @@ public class ClusterChangedEventTests extends OpenSearchTestCase {
|
|||
}
|
||||
|
||||
// Create the discovery nodes for a cluster state. For our testing purposes, we want
|
||||
// the first to be master, the second to be master eligible, the third to be a data node,
|
||||
// and the remainder can be any kinds of nodes (master eligible, data, or both).
|
||||
private static DiscoveryNodes createDiscoveryNodes(final int numNodes, final boolean isLocalMaster) {
|
||||
// the first to be cluster-manager, the second to be cluster-manager eligible, the third to be a data node,
|
||||
// and the remainder can be any kinds of nodes (cluster-manager eligible, data, or both).
|
||||
private static DiscoveryNodes createDiscoveryNodes(final int numNodes, final boolean isLocalClusterManager) {
|
||||
assert (numNodes >= 3) : "the initial cluster state for event change tests should have a minimum of 3 nodes "
|
||||
+ "so there are a minimum of 2 master nodes for testing master change events.";
|
||||
+ "so there are a minimum of 2 cluster-manager nodes for testing cluster-manager change events.";
|
||||
final DiscoveryNodes.Builder builder = DiscoveryNodes.builder();
|
||||
final int localNodeIndex = isLocalMaster ? 0 : randomIntBetween(1, numNodes - 1); // randomly assign the local node if not master
|
||||
final int localNodeIndex = isLocalClusterManager ? 0 : randomIntBetween(1, numNodes - 1); // randomly assign the local node if not
|
||||
// cluster-manager
|
||||
for (int i = 0; i < numNodes; i++) {
|
||||
final String nodeId = NODE_ID_PREFIX + i;
|
||||
Set<DiscoveryNodeRole> roles = new HashSet<>();
|
||||
if (i == 0) {
|
||||
// the master node
|
||||
// the cluster-manager node
|
||||
builder.masterNodeId(nodeId);
|
||||
roles.add(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE);
|
||||
} else if (i == 1) {
|
||||
// the alternate master node
|
||||
// the alternate cluster-manager node
|
||||
roles.add(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE);
|
||||
} else if (i == 2) {
|
||||
// we need at least one data node
|
||||
roles.add(DiscoveryNodeRole.DATA_ROLE);
|
||||
} else {
|
||||
// remaining nodes can be anything (except for master)
|
||||
// remaining nodes can be anything (except for cluster-manager)
|
||||
if (randomBoolean()) {
|
||||
roles.add(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE);
|
||||
}
|
||||
|
|
|
@ -86,31 +86,34 @@ public class ClusterStateTests extends OpenSearchTestCase {
|
|||
final DiscoveryNode node2 = new DiscoveryNode("node2", buildNewFakeTransportAddress(), emptyMap(), emptySet(), version);
|
||||
final DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).add(node2).build();
|
||||
ClusterName name = ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY);
|
||||
ClusterState noMaster1 = ClusterState.builder(name).version(randomInt(5)).nodes(nodes).build();
|
||||
ClusterState noMaster2 = ClusterState.builder(name).version(randomInt(5)).nodes(nodes).build();
|
||||
ClusterState withMaster1a = ClusterState.builder(name)
|
||||
ClusterState noClusterManager1 = ClusterState.builder(name).version(randomInt(5)).nodes(nodes).build();
|
||||
ClusterState noClusterManager2 = ClusterState.builder(name).version(randomInt(5)).nodes(nodes).build();
|
||||
ClusterState withClusterManager1a = ClusterState.builder(name)
|
||||
.version(randomInt(5))
|
||||
.nodes(DiscoveryNodes.builder(nodes).masterNodeId(node1.getId()))
|
||||
.build();
|
||||
ClusterState withMaster1b = ClusterState.builder(name)
|
||||
ClusterState withClusterManager1b = ClusterState.builder(name)
|
||||
.version(randomInt(5))
|
||||
.nodes(DiscoveryNodes.builder(nodes).masterNodeId(node1.getId()))
|
||||
.build();
|
||||
ClusterState withMaster2 = ClusterState.builder(name)
|
||||
ClusterState withClusterManager2 = ClusterState.builder(name)
|
||||
.version(randomInt(5))
|
||||
.nodes(DiscoveryNodes.builder(nodes).masterNodeId(node2.getId()))
|
||||
.build();
|
||||
|
||||
// states with no cluster-manager should never supersede anything
|
||||
assertFalse(noMaster1.supersedes(noMaster2));
|
||||
assertFalse(noMaster1.supersedes(withMaster1a));
|
||||
assertFalse(noClusterManager1.supersedes(noClusterManager2));
|
||||
assertFalse(noClusterManager1.supersedes(withClusterManager1a));
|
||||
|
||||
// states should never supersede states from another master
|
||||
assertFalse(withMaster1a.supersedes(withMaster2));
|
||||
assertFalse(withMaster1a.supersedes(noMaster1));
|
||||
// states should never supersede states from another cluster-manager
|
||||
assertFalse(withClusterManager1a.supersedes(withClusterManager2));
|
||||
assertFalse(withClusterManager1a.supersedes(noClusterManager1));
|
||||
|
||||
// state from the same master compare by version
|
||||
assertThat(withMaster1a.supersedes(withMaster1b), equalTo(withMaster1a.version() > withMaster1b.version()));
|
||||
// state from the same cluster-manager compare by version
|
||||
assertThat(
|
||||
withClusterManager1a.supersedes(withClusterManager1b),
|
||||
equalTo(withClusterManager1a.version() > withClusterManager1b.version())
|
||||
);
|
||||
}
|
||||
|
||||
public void testBuilderRejectsNullCustom() {
|
||||
|
@ -146,8 +149,8 @@ public class ClusterStateTests extends OpenSearchTestCase {
|
|||
+ " \"cluster_uuid\" : \"clusterUUID\",\n"
|
||||
+ " \"version\" : 0,\n"
|
||||
+ " \"state_uuid\" : \"stateUUID\",\n"
|
||||
+ " \"master_node\" : \"masterNodeId\",\n"
|
||||
+ " \"cluster_manager_node\" : \"masterNodeId\",\n"
|
||||
+ " \"master_node\" : \"clusterManagerNodeId\",\n"
|
||||
+ " \"cluster_manager_node\" : \"clusterManagerNodeId\",\n"
|
||||
+ " \"blocks\" : {\n"
|
||||
+ " \"global\" : {\n"
|
||||
+ " \"1\" : {\n"
|
||||
|
@ -352,8 +355,8 @@ public class ClusterStateTests extends OpenSearchTestCase {
|
|||
+ " \"cluster_uuid\" : \"clusterUUID\",\n"
|
||||
+ " \"version\" : 0,\n"
|
||||
+ " \"state_uuid\" : \"stateUUID\",\n"
|
||||
+ " \"master_node\" : \"masterNodeId\",\n"
|
||||
+ " \"cluster_manager_node\" : \"masterNodeId\",\n"
|
||||
+ " \"master_node\" : \"clusterManagerNodeId\",\n"
|
||||
+ " \"cluster_manager_node\" : \"clusterManagerNodeId\",\n"
|
||||
+ " \"blocks\" : {\n"
|
||||
+ " \"global\" : {\n"
|
||||
+ " \"1\" : {\n"
|
||||
|
@ -551,8 +554,8 @@ public class ClusterStateTests extends OpenSearchTestCase {
|
|||
+ " \"cluster_uuid\" : \"clusterUUID\",\n"
|
||||
+ " \"version\" : 0,\n"
|
||||
+ " \"state_uuid\" : \"stateUUID\",\n"
|
||||
+ " \"master_node\" : \"masterNodeId\",\n"
|
||||
+ " \"cluster_manager_node\" : \"masterNodeId\",\n"
|
||||
+ " \"master_node\" : \"clusterManagerNodeId\",\n"
|
||||
+ " \"cluster_manager_node\" : \"clusterManagerNodeId\",\n"
|
||||
+ " \"blocks\" : {\n"
|
||||
+ " \"global\" : {\n"
|
||||
+ " \"1\" : {\n"
|
||||
|
@ -868,7 +871,7 @@ public class ClusterStateTests extends OpenSearchTestCase {
|
|||
.stateUUID("stateUUID")
|
||||
.nodes(
|
||||
DiscoveryNodes.builder()
|
||||
.masterNodeId("masterNodeId")
|
||||
.masterNodeId("clusterManagerNodeId")
|
||||
.add(new DiscoveryNode("nodeId1", new TransportAddress(InetAddress.getByName("127.0.0.1"), 111), Version.CURRENT))
|
||||
.build()
|
||||
)
|
||||
|
|
|
@ -68,8 +68,8 @@ public class InternalClusterInfoServiceSchedulingTests extends OpenSearchTestCas
|
|||
|
||||
public void testScheduling() {
|
||||
final DiscoveryNode discoveryNode = new DiscoveryNode("test", buildNewFakeTransportAddress(), Version.CURRENT);
|
||||
final DiscoveryNodes noMaster = DiscoveryNodes.builder().add(discoveryNode).localNodeId(discoveryNode.getId()).build();
|
||||
final DiscoveryNodes localMaster = DiscoveryNodes.builder(noMaster).masterNodeId(discoveryNode.getId()).build();
|
||||
final DiscoveryNodes noClusterManager = DiscoveryNodes.builder().add(discoveryNode).localNodeId(discoveryNode.getId()).build();
|
||||
final DiscoveryNodes localClusterManager = DiscoveryNodes.builder(noClusterManager).masterNodeId(discoveryNode.getId()).build();
|
||||
|
||||
final Settings.Builder settingsBuilder = Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), discoveryNode.getName());
|
||||
if (randomBoolean()) {
|
||||
|
@ -87,14 +87,14 @@ public class InternalClusterInfoServiceSchedulingTests extends OpenSearchTestCas
|
|||
}
|
||||
};
|
||||
|
||||
final MasterService masterService = new FakeThreadPoolMasterService(
|
||||
final MasterService clusterManagerService = new FakeThreadPoolMasterService(
|
||||
"test",
|
||||
"masterService",
|
||||
"clusterManagerService",
|
||||
threadPool,
|
||||
r -> { fail("master service should not run any tasks"); }
|
||||
r -> { fail("cluster-manager service should not run any tasks"); }
|
||||
);
|
||||
|
||||
final ClusterService clusterService = new ClusterService(settings, clusterSettings, masterService, clusterApplierService);
|
||||
final ClusterService clusterService = new ClusterService(settings, clusterSettings, clusterManagerService, clusterApplierService);
|
||||
|
||||
final FakeClusterInfoServiceClient client = new FakeClusterInfoServiceClient(threadPool);
|
||||
final InternalClusterInfoService clusterInfoService = new InternalClusterInfoService(settings, clusterService, threadPool, client);
|
||||
|
@ -102,34 +102,34 @@ public class InternalClusterInfoServiceSchedulingTests extends OpenSearchTestCas
|
|||
clusterInfoService.addListener(ignored -> {});
|
||||
|
||||
clusterService.setNodeConnectionsService(ClusterServiceUtils.createNoOpNodeConnectionsService());
|
||||
clusterApplierService.setInitialState(ClusterState.builder(new ClusterName("cluster")).nodes(noMaster).build());
|
||||
masterService.setClusterStatePublisher((clusterChangedEvent, publishListener, ackListener) -> fail("should not publish"));
|
||||
masterService.setClusterStateSupplier(clusterApplierService::state);
|
||||
clusterApplierService.setInitialState(ClusterState.builder(new ClusterName("cluster")).nodes(noClusterManager).build());
|
||||
clusterManagerService.setClusterStatePublisher((clusterChangedEvent, publishListener, ackListener) -> fail("should not publish"));
|
||||
clusterManagerService.setClusterStateSupplier(clusterApplierService::state);
|
||||
clusterService.start();
|
||||
|
||||
final AtomicBoolean becameMaster1 = new AtomicBoolean();
|
||||
final AtomicBoolean becameClusterManager1 = new AtomicBoolean();
|
||||
clusterApplierService.onNewClusterState(
|
||||
"become master 1",
|
||||
() -> ClusterState.builder(new ClusterName("cluster")).nodes(localMaster).build(),
|
||||
setFlagOnSuccess(becameMaster1)
|
||||
"become cluster-manager 1",
|
||||
() -> ClusterState.builder(new ClusterName("cluster")).nodes(localClusterManager).build(),
|
||||
setFlagOnSuccess(becameClusterManager1)
|
||||
);
|
||||
runUntilFlag(deterministicTaskQueue, becameMaster1);
|
||||
runUntilFlag(deterministicTaskQueue, becameClusterManager1);
|
||||
|
||||
final AtomicBoolean failMaster1 = new AtomicBoolean();
|
||||
final AtomicBoolean failClusterManager1 = new AtomicBoolean();
|
||||
clusterApplierService.onNewClusterState(
|
||||
"fail master 1",
|
||||
() -> ClusterState.builder(new ClusterName("cluster")).nodes(noMaster).build(),
|
||||
setFlagOnSuccess(failMaster1)
|
||||
"fail cluster-manager 1",
|
||||
() -> ClusterState.builder(new ClusterName("cluster")).nodes(noClusterManager).build(),
|
||||
setFlagOnSuccess(failClusterManager1)
|
||||
);
|
||||
runUntilFlag(deterministicTaskQueue, failMaster1);
|
||||
runUntilFlag(deterministicTaskQueue, failClusterManager1);
|
||||
|
||||
final AtomicBoolean becameMaster2 = new AtomicBoolean();
|
||||
final AtomicBoolean becameClusterManager2 = new AtomicBoolean();
|
||||
clusterApplierService.onNewClusterState(
|
||||
"become master 2",
|
||||
() -> ClusterState.builder(new ClusterName("cluster")).nodes(localMaster).build(),
|
||||
setFlagOnSuccess(becameMaster2)
|
||||
"become cluster-manager 2",
|
||||
() -> ClusterState.builder(new ClusterName("cluster")).nodes(localClusterManager).build(),
|
||||
setFlagOnSuccess(becameClusterManager2)
|
||||
);
|
||||
runUntilFlag(deterministicTaskQueue, becameMaster2);
|
||||
runUntilFlag(deterministicTaskQueue, becameClusterManager2);
|
||||
|
||||
for (int i = 0; i < 3; i++) {
|
||||
final int initialRequestCount = client.requestCount;
|
||||
|
@ -139,13 +139,13 @@ public class InternalClusterInfoServiceSchedulingTests extends OpenSearchTestCas
|
|||
assertThat(client.requestCount, equalTo(initialRequestCount + 2)); // should have run two client requests per interval
|
||||
}
|
||||
|
||||
final AtomicBoolean failMaster2 = new AtomicBoolean();
|
||||
final AtomicBoolean failClusterManager2 = new AtomicBoolean();
|
||||
clusterApplierService.onNewClusterState(
|
||||
"fail master 2",
|
||||
() -> ClusterState.builder(new ClusterName("cluster")).nodes(noMaster).build(),
|
||||
setFlagOnSuccess(failMaster2)
|
||||
"fail cluster-manager 2",
|
||||
() -> ClusterState.builder(new ClusterName("cluster")).nodes(noClusterManager).build(),
|
||||
setFlagOnSuccess(failClusterManager2)
|
||||
);
|
||||
runUntilFlag(deterministicTaskQueue, failMaster2);
|
||||
runUntilFlag(deterministicTaskQueue, failClusterManager2);
|
||||
|
||||
runFor(deterministicTaskQueue, INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING.get(settings).millis());
|
||||
deterministicTaskQueue.runAllRunnableTasks();
|
||||
|
|
|
@ -112,16 +112,16 @@ public class ShardStateActionTests extends OpenSearchTestCase {
|
|||
super(clusterService, transportService, allocationService, rerouteService, THREAD_POOL);
|
||||
}
|
||||
|
||||
private Runnable onBeforeWaitForNewMasterAndRetry;
|
||||
private Runnable onBeforeWaitForNewClusterManagerAndRetry;
|
||||
|
||||
public void setOnBeforeWaitForNewMasterAndRetry(Runnable onBeforeWaitForNewMasterAndRetry) {
|
||||
this.onBeforeWaitForNewMasterAndRetry = onBeforeWaitForNewMasterAndRetry;
|
||||
public void setOnBeforeWaitForNewClusterManagerAndRetry(Runnable onBeforeWaitForNewClusterManagerAndRetry) {
|
||||
this.onBeforeWaitForNewClusterManagerAndRetry = onBeforeWaitForNewClusterManagerAndRetry;
|
||||
}
|
||||
|
||||
private Runnable onAfterWaitForNewMasterAndRetry;
|
||||
private Runnable onAfterWaitForNewClusterManagerAndRetry;
|
||||
|
||||
public void setOnAfterWaitForNewMasterAndRetry(Runnable onAfterWaitForNewMasterAndRetry) {
|
||||
this.onAfterWaitForNewMasterAndRetry = onAfterWaitForNewMasterAndRetry;
|
||||
public void setOnAfterWaitFornewClusterManagerAndRetry(Runnable onAfterWaitFornewClusterManagerAndRetry) {
|
||||
this.onAfterWaitForNewClusterManagerAndRetry = onAfterWaitFornewClusterManagerAndRetry;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -132,9 +132,9 @@ public class ShardStateActionTests extends OpenSearchTestCase {
|
|||
ActionListener<Void> listener,
|
||||
Predicate<ClusterState> changePredicate
|
||||
) {
|
||||
onBeforeWaitForNewMasterAndRetry.run();
|
||||
onBeforeWaitForNewClusterManagerAndRetry.run();
|
||||
super.waitForNewClusterManagerAndRetry(actionName, observer, request, listener, changePredicate);
|
||||
onAfterWaitForNewMasterAndRetry.run();
|
||||
onAfterWaitForNewClusterManagerAndRetry.run();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -160,8 +160,8 @@ public class ShardStateActionTests extends OpenSearchTestCase {
|
|||
transportService.start();
|
||||
transportService.acceptIncomingRequests();
|
||||
shardStateAction = new TestShardStateAction(clusterService, transportService, null, null);
|
||||
shardStateAction.setOnBeforeWaitForNewMasterAndRetry(() -> {});
|
||||
shardStateAction.setOnAfterWaitForNewMasterAndRetry(() -> {});
|
||||
shardStateAction.setOnBeforeWaitForNewClusterManagerAndRetry(() -> {});
|
||||
shardStateAction.setOnAfterWaitFornewClusterManagerAndRetry(() -> {});
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -196,7 +196,7 @@ public class ShardStateActionTests extends OpenSearchTestCase {
|
|||
// for the right shard
|
||||
assertEquals(shardEntry.shardId, shardRouting.shardId());
|
||||
assertEquals(shardEntry.allocationId, shardRouting.allocationId().getId());
|
||||
// sent to the master
|
||||
// sent to the cluster-manager
|
||||
assertEquals(clusterService.state().nodes().getMasterNode().getId(), capturedRequests[0].node.getId());
|
||||
|
||||
transport.handleResponse(capturedRequests[0].requestId, TransportResponse.Empty.INSTANCE);
|
||||
|
@ -205,20 +205,20 @@ public class ShardStateActionTests extends OpenSearchTestCase {
|
|||
assertNull(listener.failure.get());
|
||||
}
|
||||
|
||||
public void testNoMaster() throws InterruptedException {
|
||||
public void testNoClusterManager() throws InterruptedException {
|
||||
final String index = "test";
|
||||
|
||||
setState(clusterService, ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5)));
|
||||
|
||||
DiscoveryNodes.Builder noMasterBuilder = DiscoveryNodes.builder(clusterService.state().nodes());
|
||||
noMasterBuilder.masterNodeId(null);
|
||||
setState(clusterService, ClusterState.builder(clusterService.state()).nodes(noMasterBuilder));
|
||||
DiscoveryNodes.Builder noClusterManagerBuilder = DiscoveryNodes.builder(clusterService.state().nodes());
|
||||
noClusterManagerBuilder.masterNodeId(null);
|
||||
setState(clusterService, ClusterState.builder(clusterService.state()).nodes(noClusterManagerBuilder));
|
||||
|
||||
CountDownLatch latch = new CountDownLatch(1);
|
||||
AtomicInteger retries = new AtomicInteger();
|
||||
AtomicBoolean success = new AtomicBoolean();
|
||||
|
||||
setUpMasterRetryVerification(1, retries, latch, requestId -> {});
|
||||
setUpClusterManagerRetryVerification(1, retries, latch, requestId -> {});
|
||||
|
||||
ShardRouting failedShard = getRandomShardRouting(index);
|
||||
shardStateAction.localShardFailed(failedShard, "test", getSimulatedFailure(), new ActionListener<Void>() {
|
||||
|
@ -242,7 +242,7 @@ public class ShardStateActionTests extends OpenSearchTestCase {
|
|||
assertTrue(success.get());
|
||||
}
|
||||
|
||||
public void testMasterChannelException() throws InterruptedException {
|
||||
public void testClusterManagerChannelException() throws InterruptedException {
|
||||
final String index = "test";
|
||||
|
||||
setState(clusterService, ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5)));
|
||||
|
@ -268,7 +268,7 @@ public class ShardStateActionTests extends OpenSearchTestCase {
|
|||
};
|
||||
|
||||
final int numberOfRetries = randomIntBetween(1, 256);
|
||||
setUpMasterRetryVerification(numberOfRetries, retries, latch, retryLoop);
|
||||
setUpClusterManagerRetryVerification(numberOfRetries, retries, latch, retryLoop);
|
||||
|
||||
ShardRouting failedShard = getRandomShardRouting(index);
|
||||
shardStateAction.localShardFailed(failedShard, "test", getSimulatedFailure(), new ActionListener<Void>() {
|
||||
|
@ -413,8 +413,8 @@ public class ShardStateActionTests extends OpenSearchTestCase {
|
|||
}
|
||||
Thread[] clientThreads = new Thread[between(1, 6)];
|
||||
int iterationsPerThread = scaledRandomIntBetween(50, 500);
|
||||
Phaser barrier = new Phaser(clientThreads.length + 2); // one for master thread, one for the main thread
|
||||
Thread masterThread = new Thread(() -> {
|
||||
Phaser barrier = new Phaser(clientThreads.length + 2); // one for cluster-manager thread, one for the main thread
|
||||
Thread clusterManagerThread = new Thread(() -> {
|
||||
barrier.arriveAndAwaitAdvance();
|
||||
while (shutdown.get() == false) {
|
||||
for (CapturingTransport.CapturedRequest request : transport.getCapturedRequestsAndClear()) {
|
||||
|
@ -426,7 +426,7 @@ public class ShardStateActionTests extends OpenSearchTestCase {
|
|||
}
|
||||
}
|
||||
});
|
||||
masterThread.start();
|
||||
clusterManagerThread.start();
|
||||
|
||||
AtomicInteger notifiedResponses = new AtomicInteger();
|
||||
for (int t = 0; t < clientThreads.length; t++) {
|
||||
|
@ -463,7 +463,7 @@ public class ShardStateActionTests extends OpenSearchTestCase {
|
|||
}
|
||||
assertBusy(() -> assertThat(notifiedResponses.get(), equalTo(clientThreads.length * iterationsPerThread)));
|
||||
shutdown.set(true);
|
||||
masterThread.join();
|
||||
clusterManagerThread.join();
|
||||
}
|
||||
|
||||
public void testShardStarted() throws InterruptedException {
|
||||
|
@ -496,14 +496,19 @@ public class ShardStateActionTests extends OpenSearchTestCase {
|
|||
return shardRouting;
|
||||
}
|
||||
|
||||
private void setUpMasterRetryVerification(int numberOfRetries, AtomicInteger retries, CountDownLatch latch, LongConsumer retryLoop) {
|
||||
shardStateAction.setOnBeforeWaitForNewMasterAndRetry(() -> {
|
||||
DiscoveryNodes.Builder masterBuilder = DiscoveryNodes.builder(clusterService.state().nodes());
|
||||
masterBuilder.masterNodeId(clusterService.state().nodes().getMasterNodes().iterator().next().value.getId());
|
||||
setState(clusterService, ClusterState.builder(clusterService.state()).nodes(masterBuilder));
|
||||
private void setUpClusterManagerRetryVerification(
|
||||
int numberOfRetries,
|
||||
AtomicInteger retries,
|
||||
CountDownLatch latch,
|
||||
LongConsumer retryLoop
|
||||
) {
|
||||
shardStateAction.setOnBeforeWaitForNewClusterManagerAndRetry(() -> {
|
||||
DiscoveryNodes.Builder clusterManagerBuilder = DiscoveryNodes.builder(clusterService.state().nodes());
|
||||
clusterManagerBuilder.masterNodeId(clusterService.state().nodes().getMasterNodes().iterator().next().value.getId());
|
||||
setState(clusterService, ClusterState.builder(clusterService.state()).nodes(clusterManagerBuilder));
|
||||
});
|
||||
|
||||
shardStateAction.setOnAfterWaitForNewMasterAndRetry(() -> verifyRetry(numberOfRetries, retries, latch, retryLoop));
|
||||
shardStateAction.setOnAfterWaitFornewClusterManagerAndRetry(() -> verifyRetry(numberOfRetries, retries, latch, retryLoop));
|
||||
}
|
||||
|
||||
private void verifyRetry(int numberOfRetries, AtomicInteger retries, CountDownLatch latch, LongConsumer retryLoop) {
|
||||
|
|
|
@ -169,7 +169,7 @@ public class ClusterBootstrapServiceTests extends OpenSearchTestCase {
|
|||
testDoesNothingWithSettings(builder().putList(INITIAL_CLUSTER_MANAGER_NODES_SETTING.getKey()));
|
||||
}
|
||||
|
||||
public void testDoesNothingByDefaultOnMasterIneligibleNodes() {
|
||||
public void testDoesNothingByDefaultOnClusterManagerIneligibleNodes() {
|
||||
localNode = new DiscoveryNode(
|
||||
"local",
|
||||
randomAlphaOfLength(10),
|
||||
|
@ -401,7 +401,7 @@ public class ClusterBootstrapServiceTests extends OpenSearchTestCase {
|
|||
deterministicTaskQueue.runAllTasks();
|
||||
}
|
||||
|
||||
public void testDoesNotBootstrapsOnNonMasterNode() {
|
||||
public void testDoesNotBootstrapsOnNonClusterManagerNode() {
|
||||
localNode = new DiscoveryNode(
|
||||
"local",
|
||||
randomAlphaOfLength(10),
|
||||
|
@ -676,7 +676,7 @@ public class ClusterBootstrapServiceTests extends OpenSearchTestCase {
|
|||
);
|
||||
}
|
||||
|
||||
public void testFailBootstrapNonMasterEligibleNodeWithSingleNodeDiscovery() {
|
||||
public void testFailBootstrapNonClusterManagerEligibleNodeWithSingleNodeDiscovery() {
|
||||
final Settings.Builder settings = Settings.builder()
|
||||
.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE)
|
||||
.put(NODE_NAME_SETTING.getKey(), localNode.getName())
|
||||
|
|
|
@ -173,7 +173,7 @@ public class ClusterFormationFailureHelperTests extends OpenSearchTestCase {
|
|||
assertThat(logLastFailedJoinAttemptWarningCount.get(), is(5L));
|
||||
}
|
||||
|
||||
public void testDescriptionOnMasterIneligibleNodes() {
|
||||
public void testDescriptionOnClusterManagerIneligibleNodes() {
|
||||
final DiscoveryNode localNode = new DiscoveryNode("local", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT);
|
||||
final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT)
|
||||
.version(12L)
|
||||
|
@ -284,7 +284,7 @@ public class ClusterFormationFailureHelperTests extends OpenSearchTestCase {
|
|||
is("this node is unhealthy: unhealthy-info")
|
||||
);
|
||||
|
||||
final DiscoveryNode masterNode = new DiscoveryNode(
|
||||
final DiscoveryNode clusterManagerNode = new DiscoveryNode(
|
||||
"local",
|
||||
buildNewFakeTransportAddress(),
|
||||
emptyMap(),
|
||||
|
@ -293,7 +293,7 @@ public class ClusterFormationFailureHelperTests extends OpenSearchTestCase {
|
|||
);
|
||||
clusterState = ClusterState.builder(ClusterName.DEFAULT)
|
||||
.version(12L)
|
||||
.nodes(DiscoveryNodes.builder().add(masterNode).localNodeId(masterNode.getId()))
|
||||
.nodes(DiscoveryNodes.builder().add(clusterManagerNode).localNodeId(clusterManagerNode.getId()))
|
||||
.build();
|
||||
|
||||
assertThat(
|
||||
|
@ -851,9 +851,13 @@ public class ClusterFormationFailureHelperTests extends OpenSearchTestCase {
|
|||
)
|
||||
);
|
||||
|
||||
final DiscoveryNode otherMasterNode = new DiscoveryNode("other-master", buildNewFakeTransportAddress(), Version.CURRENT);
|
||||
final DiscoveryNode otherNonMasterNode = new DiscoveryNode(
|
||||
"other-non-master",
|
||||
final DiscoveryNode otherClusterManagerNode = new DiscoveryNode(
|
||||
"other-cluster-manager",
|
||||
buildNewFakeTransportAddress(),
|
||||
Version.CURRENT
|
||||
);
|
||||
final DiscoveryNode otherNonClusterManagerNode = new DiscoveryNode(
|
||||
"other-non-cluster-manager",
|
||||
buildNewFakeTransportAddress(),
|
||||
emptyMap(),
|
||||
new HashSet<>(
|
||||
|
@ -866,7 +870,13 @@ public class ClusterFormationFailureHelperTests extends OpenSearchTestCase {
|
|||
|
||||
String[] configNodeIds = new String[] { "n1", "n2" };
|
||||
final ClusterState stateWithOtherNodes = ClusterState.builder(ClusterName.DEFAULT)
|
||||
.nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).add(otherMasterNode).add(otherNonMasterNode))
|
||||
.nodes(
|
||||
DiscoveryNodes.builder()
|
||||
.add(localNode)
|
||||
.localNodeId(localNode.getId())
|
||||
.add(otherClusterManagerNode)
|
||||
.add(otherNonClusterManagerNode)
|
||||
)
|
||||
.metadata(
|
||||
Metadata.builder()
|
||||
.coordinationMetadata(
|
||||
|
@ -897,13 +907,13 @@ public class ClusterFormationFailureHelperTests extends OpenSearchTestCase {
|
|||
+ "discovery will continue using [] from hosts providers and ["
|
||||
+ localNode
|
||||
+ ", "
|
||||
+ otherMasterNode
|
||||
+ otherClusterManagerNode
|
||||
+ "] from last-known cluster state; node term 0, last-accepted version 0 in term 0",
|
||||
|
||||
"cluster-manager not discovered or elected yet, an election requires two nodes with ids [n1, n2], "
|
||||
+ "have discovered [] which is not a quorum; "
|
||||
+ "discovery will continue using [] from hosts providers and ["
|
||||
+ otherMasterNode
|
||||
+ otherClusterManagerNode
|
||||
+ ", "
|
||||
+ localNode
|
||||
+ "] from last-known cluster state; node term 0, last-accepted version 0 in term 0"
|
||||
|
|
|
@ -586,7 +586,7 @@ public class CoordinationStateTests extends OpenSearchTestCase {
|
|||
);
|
||||
}
|
||||
|
||||
// scenario when handling a publish request from a master that we already received a newer state from
|
||||
// scenario when handling a publish request from a cluster-manager that we already received a newer state from
|
||||
public void testHandlePublishRequestWithSameTermButOlderOrSameVersion() {
|
||||
VotingConfiguration initialConfig = VotingConfiguration.of(node1);
|
||||
ClusterState state1 = clusterState(0L, 0L, node1, initialConfig, initialConfig, 42L);
|
||||
|
@ -613,7 +613,7 @@ public class CoordinationStateTests extends OpenSearchTestCase {
|
|||
);
|
||||
}
|
||||
|
||||
// scenario when handling a publish request from a fresh master
|
||||
// scenario when handling a publish request from a fresh cluster-manager
|
||||
public void testHandlePublishRequestWithTermHigherThanLastAcceptedTerm() {
|
||||
VotingConfiguration initialConfig = VotingConfiguration.of(node1);
|
||||
StartJoinRequest startJoinRequest1 = new StartJoinRequest(node1, randomLongBetween(1, 5));
|
||||
|
@ -845,7 +845,7 @@ public class CoordinationStateTests extends OpenSearchTestCase {
|
|||
|
||||
assertFalse(
|
||||
voteCollection.addVote(
|
||||
new DiscoveryNode("master-ineligible", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT)
|
||||
new DiscoveryNode("cluster-manager-ineligible", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT)
|
||||
)
|
||||
);
|
||||
assertTrue(voteCollection.isEmpty());
|
||||
|
|
|
@ -107,7 +107,7 @@ import static org.hamcrest.Matchers.startsWith;
|
|||
public class CoordinatorTests extends AbstractCoordinatorTestCase {
|
||||
|
||||
/**
|
||||
* This test was added to verify that state recovery is properly reset on a node after it has become master and successfully
|
||||
* This test was added to verify that state recovery is properly reset on a node after it has become cluster-manager and successfully
|
||||
* recovered a state (see {@link GatewayService}). The situation which triggers this with a decent likelihood is as follows:
|
||||
* 3 cluster-manager-eligible nodes (leader, follower1, follower2), the followers are shut down (leader remains), when followers come back
|
||||
* one of them becomes leader and publishes first state (with STATE_NOT_RECOVERED_BLOCK) to old leader, which accepts it.
|
||||
|
@ -164,7 +164,7 @@ public class CoordinatorTests extends AbstractCoordinatorTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testDoesNotElectNonMasterNode() {
|
||||
public void testDoesNotElectNonClusterManagerNode() {
|
||||
try (Cluster cluster = new Cluster(randomIntBetween(1, 5), false, Settings.EMPTY)) {
|
||||
cluster.runRandomly();
|
||||
cluster.stabilise();
|
||||
|
@ -191,7 +191,7 @@ public class CoordinatorTests extends AbstractCoordinatorTestCase {
|
|||
cluster.clusterNodes.add(newNode1);
|
||||
cluster.clusterNodes.add(newNode2);
|
||||
cluster.stabilise(
|
||||
// The first pinging discovers the master
|
||||
// The first pinging discovers the cluster-manager
|
||||
defaultMillis(DISCOVERY_FIND_PEERS_INTERVAL_SETTING)
|
||||
// One message delay to send a join
|
||||
+ DEFAULT_DELAY_VARIABILITY
|
||||
|
@ -627,7 +627,7 @@ public class CoordinatorTests extends AbstractCoordinatorTestCase {
|
|||
cluster.clusterNodes.add(newNode2);
|
||||
cluster.clusterNodes.add(newNode3);
|
||||
cluster.stabilise(
|
||||
// The first pinging discovers the master
|
||||
// The first pinging discovers the cluster-manager
|
||||
defaultMillis(DISCOVERY_FIND_PEERS_INTERVAL_SETTING)
|
||||
// One message delay to send a join
|
||||
+ DEFAULT_DELAY_VARIABILITY
|
||||
|
@ -1096,7 +1096,7 @@ public class CoordinatorTests extends AbstractCoordinatorTestCase {
|
|||
* does not notice the node disconnecting, it is important for the node not to be turned back into a follower but try
|
||||
* and join the leader again.
|
||||
*/
|
||||
public void testStayCandidateAfterReceivingFollowerCheckFromKnownMaster() {
|
||||
public void testStayCandidateAfterReceivingFollowerCheckFromKnownClusterManager() {
|
||||
try (Cluster cluster = new Cluster(2, false, Settings.EMPTY)) {
|
||||
cluster.runRandomly();
|
||||
cluster.stabilise();
|
||||
|
@ -1121,23 +1121,23 @@ public class CoordinatorTests extends AbstractCoordinatorTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testAppliesNoMasterBlockWritesByDefault() {
|
||||
testAppliesNoMasterBlock(null, NO_MASTER_BLOCK_WRITES);
|
||||
public void testAppliesNoClusterManagerBlockWritesByDefault() {
|
||||
testAppliesNoClusterManagerBlock(null, NO_MASTER_BLOCK_WRITES);
|
||||
}
|
||||
|
||||
public void testAppliesNoMasterBlockWritesIfConfigured() {
|
||||
testAppliesNoMasterBlock("write", NO_MASTER_BLOCK_WRITES);
|
||||
public void testAppliesNoClusterManagerBlockWritesIfConfigured() {
|
||||
testAppliesNoClusterManagerBlock("write", NO_MASTER_BLOCK_WRITES);
|
||||
}
|
||||
|
||||
public void testAppliesNoMasterBlockAllIfConfigured() {
|
||||
testAppliesNoMasterBlock("all", NO_MASTER_BLOCK_ALL);
|
||||
public void testAppliesNoClusterManagerBlockAllIfConfigured() {
|
||||
testAppliesNoClusterManagerBlock("all", NO_MASTER_BLOCK_ALL);
|
||||
}
|
||||
|
||||
public void testAppliesNoMasterBlockMetadataWritesIfConfigured() {
|
||||
testAppliesNoMasterBlock("metadata_write", NO_MASTER_BLOCK_METADATA_WRITES);
|
||||
public void testAppliesNoClusterManagerBlockMetadataWritesIfConfigured() {
|
||||
testAppliesNoClusterManagerBlock("metadata_write", NO_MASTER_BLOCK_METADATA_WRITES);
|
||||
}
|
||||
|
||||
private void testAppliesNoMasterBlock(String noMasterBlockSetting, ClusterBlock expectedBlock) {
|
||||
private void testAppliesNoClusterManagerBlock(String noClusterManagerBlockSetting, ClusterBlock expectedBlock) {
|
||||
try (Cluster cluster = new Cluster(3)) {
|
||||
cluster.runRandomly();
|
||||
cluster.stabilise();
|
||||
|
@ -1145,7 +1145,7 @@ public class CoordinatorTests extends AbstractCoordinatorTestCase {
|
|||
final ClusterNode leader = cluster.getAnyLeader();
|
||||
leader.submitUpdateTask("update NO_CLUSTER_MANAGER_BLOCK_SETTING", cs -> {
|
||||
final Builder settingsBuilder = Settings.builder().put(cs.metadata().persistentSettings());
|
||||
settingsBuilder.put(NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), noMasterBlockSetting);
|
||||
settingsBuilder.put(NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), noClusterManagerBlockSetting);
|
||||
return ClusterState.builder(cs)
|
||||
.metadata(Metadata.builder(cs.metadata()).persistentSettings(settingsBuilder.build()))
|
||||
.build();
|
||||
|
@ -1175,12 +1175,12 @@ public class CoordinatorTests extends AbstractCoordinatorTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testNodeCannotJoinIfJoinValidationFailsOnMaster() {
|
||||
public void testNodeCannotJoinIfJoinValidationFailsOnClusterManager() {
|
||||
try (Cluster cluster = new Cluster(randomIntBetween(1, 3))) {
|
||||
cluster.runRandomly();
|
||||
cluster.stabilise();
|
||||
|
||||
// check that if node join validation fails on master, the nodes can't join
|
||||
// check that if node join validation fails on cluster-manager, the nodes can't join
|
||||
List<ClusterNode> addedNodes = cluster.addNodes(randomIntBetween(1, 2));
|
||||
final Set<DiscoveryNode> validatedNodes = new HashSet<>();
|
||||
cluster.getAnyLeader().extraJoinValidators.add((discoveryNode, clusterState) -> {
|
||||
|
@ -1305,7 +1305,7 @@ public class CoordinatorTests extends AbstractCoordinatorTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testFollowerRemovedIfUnableToSendRequestsToMaster() {
|
||||
public void testFollowerRemovedIfUnableToSendRequestsToClusterManager() {
|
||||
try (Cluster cluster = new Cluster(3)) {
|
||||
cluster.runRandomly();
|
||||
cluster.stabilise();
|
||||
|
@ -1333,7 +1333,7 @@ public class CoordinatorTests extends AbstractCoordinatorTestCase {
|
|||
cluster.clearBlackholedConnections();
|
||||
|
||||
cluster.stabilise(
|
||||
// time for the disconnected node to find the master again
|
||||
// time for the disconnected node to find the cluster-manager again
|
||||
defaultMillis(DISCOVERY_FIND_PEERS_INTERVAL_SETTING) * 2
|
||||
// time for joining
|
||||
+ 4 * DEFAULT_DELAY_VARIABILITY
|
||||
|
@ -1679,7 +1679,7 @@ public class CoordinatorTests extends AbstractCoordinatorTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testReconfiguresToExcludeMasterIneligibleNodesInVotingConfig() {
|
||||
public void testReconfiguresToExcludeClusterManagerIneligibleNodesInVotingConfig() {
|
||||
try (Cluster cluster = new Cluster(3)) {
|
||||
cluster.runRandomly();
|
||||
cluster.stabilise();
|
||||
|
@ -1698,7 +1698,7 @@ public class CoordinatorTests extends AbstractCoordinatorTestCase {
|
|||
final boolean chosenNodeIsLeader = chosenNode == cluster.getAnyLeader();
|
||||
final long termBeforeRestart = cluster.getAnyNode().coordinator.getCurrentTerm();
|
||||
|
||||
logger.info("--> restarting [{}] as a master-ineligible node", chosenNode);
|
||||
logger.info("--> restarting [{}] as a cluster-manager-ineligible node", chosenNode);
|
||||
|
||||
chosenNode.close();
|
||||
cluster.clusterNodes.replaceAll(
|
||||
|
|
|
@ -685,7 +685,7 @@ public class FollowersCheckerTests extends OpenSearchTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testPreferMasterNodes() {
|
||||
public void testPreferClusterManagerNodes() {
|
||||
List<DiscoveryNode> nodes = randomNodes(10);
|
||||
DiscoveryNodes.Builder discoNodesBuilder = DiscoveryNodes.builder();
|
||||
nodes.forEach(dn -> discoNodesBuilder.add(dn));
|
||||
|
|
|
@ -161,8 +161,9 @@ public class JoinTaskExecutorTests extends OpenSearchTestCase {
|
|||
}
|
||||
|
||||
public void testUpdatesNodeWithNewRoles() throws Exception {
|
||||
// Node roles vary by version, and new roles are suppressed for BWC. This means we can receive a join from a node that's already
|
||||
// in the cluster but with a different set of roles: the node didn't change roles, but the cluster state came via an older master.
|
||||
// Node roles vary by version, and new roles are suppressed for BWC.
|
||||
// This means we can receive a join from a node that's already in the cluster but with a different set of roles:
|
||||
// the node didn't change roles, but the cluster state came via an older cluster-manager.
|
||||
// In this case we must properly process its join to ensure that the roles are correct.
|
||||
|
||||
final AllocationService allocationService = mock(AllocationService.class);
|
||||
|
@ -171,7 +172,7 @@ public class JoinTaskExecutorTests extends OpenSearchTestCase {
|
|||
|
||||
final JoinTaskExecutor joinTaskExecutor = new JoinTaskExecutor(Settings.EMPTY, allocationService, logger, rerouteService, null);
|
||||
|
||||
final DiscoveryNode masterNode = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT);
|
||||
final DiscoveryNode clusterManagerNode = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT);
|
||||
|
||||
final DiscoveryNode actualNode = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT);
|
||||
final DiscoveryNode bwcNode = new DiscoveryNode(
|
||||
|
@ -186,7 +187,13 @@ public class JoinTaskExecutorTests extends OpenSearchTestCase {
|
|||
actualNode.getVersion()
|
||||
);
|
||||
final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT)
|
||||
.nodes(DiscoveryNodes.builder().add(masterNode).localNodeId(masterNode.getId()).masterNodeId(masterNode.getId()).add(bwcNode))
|
||||
.nodes(
|
||||
DiscoveryNodes.builder()
|
||||
.add(clusterManagerNode)
|
||||
.localNodeId(clusterManagerNode.getId())
|
||||
.masterNodeId(clusterManagerNode.getId())
|
||||
.add(bwcNode)
|
||||
)
|
||||
.build();
|
||||
|
||||
final ClusterStateTaskExecutor.ClusterTasksResult<JoinTaskExecutor.Task> result = joinTaskExecutor.execute(
|
||||
|
|
|
@ -44,12 +44,12 @@ import static org.hamcrest.Matchers.sameInstance;
|
|||
|
||||
public class NoMasterBlockServiceTests extends OpenSearchTestCase {
|
||||
|
||||
private NoMasterBlockService noMasterBlockService;
|
||||
private NoMasterBlockService noClusterManagerBlockService;
|
||||
private ClusterSettings clusterSettings;
|
||||
|
||||
private void createService(Settings settings) {
|
||||
clusterSettings = new ClusterSettings(settings, BUILT_IN_CLUSTER_SETTINGS);
|
||||
noMasterBlockService = new NoMasterBlockService(settings, clusterSettings);
|
||||
noClusterManagerBlockService = new NoMasterBlockService(settings, clusterSettings);
|
||||
}
|
||||
|
||||
private void assertDeprecatedWarningEmitted() {
|
||||
|
@ -61,22 +61,22 @@ public class NoMasterBlockServiceTests extends OpenSearchTestCase {
|
|||
|
||||
public void testBlocksWritesByDefault() {
|
||||
createService(Settings.EMPTY);
|
||||
assertThat(noMasterBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_WRITES));
|
||||
assertThat(noClusterManagerBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_WRITES));
|
||||
}
|
||||
|
||||
public void testBlocksWritesIfConfiguredBySetting() {
|
||||
createService(Settings.builder().put(NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "write").build());
|
||||
assertThat(noMasterBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_WRITES));
|
||||
assertThat(noClusterManagerBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_WRITES));
|
||||
}
|
||||
|
||||
public void testBlocksAllIfConfiguredBySetting() {
|
||||
createService(Settings.builder().put(NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "all").build());
|
||||
assertThat(noMasterBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_ALL));
|
||||
assertThat(noClusterManagerBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_ALL));
|
||||
}
|
||||
|
||||
public void testBlocksMetadataWritesIfConfiguredBySetting() {
|
||||
createService(Settings.builder().put(NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "metadata_write").build());
|
||||
assertThat(noMasterBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_METADATA_WRITES));
|
||||
assertThat(noClusterManagerBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_METADATA_WRITES));
|
||||
}
|
||||
|
||||
public void testRejectsInvalidSetting() {
|
||||
|
@ -88,12 +88,12 @@ public class NoMasterBlockServiceTests extends OpenSearchTestCase {
|
|||
|
||||
public void testSettingCanBeUpdated() {
|
||||
createService(Settings.builder().put(NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "all").build());
|
||||
assertThat(noMasterBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_ALL));
|
||||
assertThat(noClusterManagerBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_ALL));
|
||||
|
||||
clusterSettings.applySettings(Settings.builder().put(NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "write").build());
|
||||
assertThat(noMasterBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_WRITES));
|
||||
assertThat(noClusterManagerBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_WRITES));
|
||||
|
||||
clusterSettings.applySettings(Settings.builder().put(NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "metadata_write").build());
|
||||
assertThat(noMasterBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_METADATA_WRITES));
|
||||
assertThat(noClusterManagerBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_METADATA_WRITES));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -98,7 +98,7 @@ public class NodeJoinTests extends OpenSearchTestCase {
|
|||
|
||||
private static ThreadPool threadPool;
|
||||
|
||||
private MasterService masterService;
|
||||
private MasterService clusterManagerService;
|
||||
private Coordinator coordinator;
|
||||
private DeterministicTaskQueue deterministicTaskQueue;
|
||||
private Transport transport;
|
||||
|
@ -117,7 +117,7 @@ public class NodeJoinTests extends OpenSearchTestCase {
|
|||
@After
|
||||
public void tearDown() throws Exception {
|
||||
super.tearDown();
|
||||
masterService.close();
|
||||
clusterManagerService.close();
|
||||
}
|
||||
|
||||
private static ClusterState initialState(DiscoveryNode localNode, long term, long version, VotingConfiguration config) {
|
||||
|
@ -138,61 +138,68 @@ public class NodeJoinTests extends OpenSearchTestCase {
|
|||
.build();
|
||||
}
|
||||
|
||||
private void setupFakeMasterServiceAndCoordinator(long term, ClusterState initialState, NodeHealthService nodeHealthService) {
|
||||
private void setupFakeClusterManagerServiceAndCoordinator(long term, ClusterState initialState, NodeHealthService nodeHealthService) {
|
||||
deterministicTaskQueue = new DeterministicTaskQueue(
|
||||
Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test").build(),
|
||||
random()
|
||||
);
|
||||
final ThreadPool fakeThreadPool = deterministicTaskQueue.getThreadPool();
|
||||
FakeThreadPoolMasterService fakeMasterService = new FakeThreadPoolMasterService(
|
||||
FakeThreadPoolMasterService fakeClusterManagerService = new FakeThreadPoolMasterService(
|
||||
"test_node",
|
||||
"test",
|
||||
fakeThreadPool,
|
||||
deterministicTaskQueue::scheduleNow
|
||||
);
|
||||
setupMasterServiceAndCoordinator(term, initialState, fakeMasterService, fakeThreadPool, Randomness.get(), nodeHealthService);
|
||||
fakeMasterService.setClusterStatePublisher((event, publishListener, ackListener) -> {
|
||||
setupClusterManagerServiceAndCoordinator(
|
||||
term,
|
||||
initialState,
|
||||
fakeClusterManagerService,
|
||||
fakeThreadPool,
|
||||
Randomness.get(),
|
||||
nodeHealthService
|
||||
);
|
||||
fakeClusterManagerService.setClusterStatePublisher((event, publishListener, ackListener) -> {
|
||||
coordinator.handlePublishRequest(new PublishRequest(event.state()));
|
||||
publishListener.onResponse(null);
|
||||
});
|
||||
fakeMasterService.start();
|
||||
fakeClusterManagerService.start();
|
||||
}
|
||||
|
||||
private void setupRealMasterServiceAndCoordinator(long term, ClusterState initialState) {
|
||||
MasterService masterService = new MasterService(
|
||||
private void setupRealClusterManagerServiceAndCoordinator(long term, ClusterState initialState) {
|
||||
MasterService clusterManagerService = new MasterService(
|
||||
Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test_node").build(),
|
||||
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS),
|
||||
threadPool
|
||||
);
|
||||
AtomicReference<ClusterState> clusterStateRef = new AtomicReference<>(initialState);
|
||||
masterService.setClusterStatePublisher((event, publishListener, ackListener) -> {
|
||||
clusterManagerService.setClusterStatePublisher((event, publishListener, ackListener) -> {
|
||||
clusterStateRef.set(event.state());
|
||||
publishListener.onResponse(null);
|
||||
});
|
||||
setupMasterServiceAndCoordinator(
|
||||
setupClusterManagerServiceAndCoordinator(
|
||||
term,
|
||||
initialState,
|
||||
masterService,
|
||||
clusterManagerService,
|
||||
threadPool,
|
||||
new Random(Randomness.get().nextLong()),
|
||||
() -> new StatusInfo(HEALTHY, "healthy-info")
|
||||
);
|
||||
masterService.setClusterStateSupplier(clusterStateRef::get);
|
||||
masterService.start();
|
||||
clusterManagerService.setClusterStateSupplier(clusterStateRef::get);
|
||||
clusterManagerService.start();
|
||||
}
|
||||
|
||||
private void setupMasterServiceAndCoordinator(
|
||||
private void setupClusterManagerServiceAndCoordinator(
|
||||
long term,
|
||||
ClusterState initialState,
|
||||
MasterService masterService,
|
||||
MasterService clusterManagerService,
|
||||
ThreadPool threadPool,
|
||||
Random random,
|
||||
NodeHealthService nodeHealthService
|
||||
) {
|
||||
if (this.masterService != null || coordinator != null) {
|
||||
if (this.clusterManagerService != null || coordinator != null) {
|
||||
throw new IllegalStateException("method setupMasterServiceAndCoordinator can only be called once");
|
||||
}
|
||||
this.masterService = masterService;
|
||||
this.clusterManagerService = clusterManagerService;
|
||||
CapturingTransport capturingTransport = new CapturingTransport() {
|
||||
@Override
|
||||
protected void onSendRequest(long requestId, String action, TransportRequest request, DiscoveryNode destination) {
|
||||
|
@ -224,7 +231,7 @@ public class NodeJoinTests extends OpenSearchTestCase {
|
|||
transportService,
|
||||
writableRegistry(),
|
||||
OpenSearchAllocationTestCase.createAllocationService(Settings.EMPTY),
|
||||
masterService,
|
||||
clusterManagerService,
|
||||
() -> new InMemoryPersistedState(term, initialState),
|
||||
r -> emptyList(),
|
||||
new NoOpClusterApplier(),
|
||||
|
@ -245,14 +252,14 @@ public class NodeJoinTests extends OpenSearchTestCase {
|
|||
return newNode(i, randomBoolean());
|
||||
}
|
||||
|
||||
protected DiscoveryNode newNode(int i, boolean master) {
|
||||
protected DiscoveryNode newNode(int i, boolean clusterManager) {
|
||||
final Set<DiscoveryNodeRole> roles;
|
||||
if (master) {
|
||||
if (clusterManager) {
|
||||
roles = singleton(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE);
|
||||
} else {
|
||||
roles = Collections.emptySet();
|
||||
}
|
||||
final String prefix = master ? "master_" : "data_";
|
||||
final String prefix = clusterManager ? "cluster_manager_" : "data_";
|
||||
return new DiscoveryNode(prefix + i, i + "", buildNewFakeTransportAddress(), emptyMap(), roles, Version.CURRENT);
|
||||
}
|
||||
|
||||
|
@ -323,7 +330,7 @@ public class NodeJoinTests extends OpenSearchTestCase {
|
|||
DiscoveryNode node1 = newNode(1, true);
|
||||
long initialTerm = randomLongBetween(1, 10);
|
||||
long initialVersion = randomLongBetween(1, 10);
|
||||
setupFakeMasterServiceAndCoordinator(
|
||||
setupFakeClusterManagerServiceAndCoordinator(
|
||||
initialTerm,
|
||||
initialState(node0, initialTerm, initialVersion, VotingConfiguration.of(randomFrom(node0, node1))),
|
||||
() -> new StatusInfo(HEALTHY, "healthy-info")
|
||||
|
@ -347,7 +354,7 @@ public class NodeJoinTests extends OpenSearchTestCase {
|
|||
DiscoveryNode node1 = newNode(1, true);
|
||||
long initialTerm = randomLongBetween(1, 10);
|
||||
long initialVersion = randomLongBetween(1, 10);
|
||||
setupFakeMasterServiceAndCoordinator(
|
||||
setupFakeClusterManagerServiceAndCoordinator(
|
||||
initialTerm,
|
||||
initialState(node0, initialTerm, initialVersion, VotingConfiguration.of(node1)),
|
||||
() -> new StatusInfo(HEALTHY, "healthy-info")
|
||||
|
@ -362,12 +369,12 @@ public class NodeJoinTests extends OpenSearchTestCase {
|
|||
assertFalse(isLocalNodeElectedMaster());
|
||||
}
|
||||
|
||||
public void testJoinWithHigherTermButBetterStateStillElectsMasterThroughSelfJoin() {
|
||||
public void testJoinWithHigherTermButBetterStateStillElectsClusterManagerThroughSelfJoin() {
|
||||
DiscoveryNode node0 = newNode(0, true);
|
||||
DiscoveryNode node1 = newNode(1, true);
|
||||
long initialTerm = randomLongBetween(1, 10);
|
||||
long initialVersion = randomLongBetween(1, 10);
|
||||
setupFakeMasterServiceAndCoordinator(
|
||||
setupFakeClusterManagerServiceAndCoordinator(
|
||||
initialTerm,
|
||||
initialState(node0, initialTerm, initialVersion, VotingConfiguration.of(node0)),
|
||||
() -> new StatusInfo(HEALTHY, "healthy-info")
|
||||
|
@ -384,7 +391,7 @@ public class NodeJoinTests extends OpenSearchTestCase {
|
|||
DiscoveryNode node1 = newNode(1, true);
|
||||
long initialTerm = randomLongBetween(1, 10);
|
||||
long initialVersion = randomLongBetween(1, 10);
|
||||
setupFakeMasterServiceAndCoordinator(
|
||||
setupFakeClusterManagerServiceAndCoordinator(
|
||||
initialTerm,
|
||||
initialState(node0, initialTerm, initialVersion, VotingConfiguration.of(node0)),
|
||||
() -> new StatusInfo(HEALTHY, "healthy-info")
|
||||
|
@ -404,7 +411,7 @@ public class NodeJoinTests extends OpenSearchTestCase {
|
|||
DiscoveryNode node1 = newNode(1, true);
|
||||
long initialTerm = randomLongBetween(1, 10);
|
||||
long initialVersion = randomLongBetween(1, 10);
|
||||
setupFakeMasterServiceAndCoordinator(
|
||||
setupFakeClusterManagerServiceAndCoordinator(
|
||||
initialTerm,
|
||||
initialState(node0, initialTerm, initialVersion, VotingConfiguration.of(node0)),
|
||||
() -> new StatusInfo(HEALTHY, "healthy-info")
|
||||
|
@ -426,7 +433,7 @@ public class NodeJoinTests extends OpenSearchTestCase {
|
|||
DiscoveryNode node2 = newNode(2, true);
|
||||
long initialTerm = randomLongBetween(1, 10);
|
||||
long initialVersion = randomLongBetween(1, 10);
|
||||
setupFakeMasterServiceAndCoordinator(
|
||||
setupFakeClusterManagerServiceAndCoordinator(
|
||||
initialTerm,
|
||||
initialState(node0, initialTerm, initialVersion, VotingConfiguration.of(node2)),
|
||||
() -> new StatusInfo(HEALTHY, "healthy-info")
|
||||
|
@ -458,7 +465,7 @@ public class NodeJoinTests extends OpenSearchTestCase {
|
|||
DiscoveryNode node1 = newNode(1, true);
|
||||
long initialTerm = randomLongBetween(1, 10);
|
||||
long initialVersion = randomLongBetween(1, 10);
|
||||
setupFakeMasterServiceAndCoordinator(
|
||||
setupFakeClusterManagerServiceAndCoordinator(
|
||||
initialTerm,
|
||||
initialState(node0, initialTerm, initialVersion, VotingConfiguration.of(node0)),
|
||||
() -> new StatusInfo(HEALTHY, "healthy-info")
|
||||
|
@ -481,7 +488,7 @@ public class NodeJoinTests extends OpenSearchTestCase {
|
|||
"knownNodeName"
|
||||
);
|
||||
|
||||
setupFakeMasterServiceAndCoordinator(
|
||||
setupFakeClusterManagerServiceAndCoordinator(
|
||||
initialTerm,
|
||||
buildStateWithVotingConfigExclusion(initialNode, initialTerm, initialVersion, votingConfigExclusion),
|
||||
() -> new StatusInfo(HEALTHY, "healthy-info")
|
||||
|
@ -507,7 +514,7 @@ public class NodeJoinTests extends OpenSearchTestCase {
|
|||
);
|
||||
|
||||
assertTrue(
|
||||
MasterServiceTests.discoveryState(masterService)
|
||||
MasterServiceTests.discoveryState(clusterManagerService)
|
||||
.getVotingConfigExclusions()
|
||||
.stream()
|
||||
.anyMatch(
|
||||
|
@ -583,7 +590,7 @@ public class NodeJoinTests extends OpenSearchTestCase {
|
|||
DiscoveryNode node1 = newNode(1, true);
|
||||
long initialTerm = randomLongBetween(1, 10);
|
||||
long initialVersion = randomLongBetween(1, 10);
|
||||
setupFakeMasterServiceAndCoordinator(
|
||||
setupFakeClusterManagerServiceAndCoordinator(
|
||||
initialTerm,
|
||||
initialState(node0, initialTerm, initialVersion, VotingConfiguration.of(node0)),
|
||||
() -> new StatusInfo(HEALTHY, "healthy-info")
|
||||
|
@ -604,7 +611,7 @@ public class NodeJoinTests extends OpenSearchTestCase {
|
|||
DiscoveryNode node1 = newNode(1, true);
|
||||
long initialTerm = randomLongBetween(1, 10);
|
||||
long initialVersion = randomLongBetween(1, 10);
|
||||
setupFakeMasterServiceAndCoordinator(
|
||||
setupFakeClusterManagerServiceAndCoordinator(
|
||||
initialTerm,
|
||||
initialState(node0, initialTerm, initialVersion, VotingConfiguration.of(node1)),
|
||||
() -> new StatusInfo(HEALTHY, "healthy-info")
|
||||
|
@ -626,27 +633,31 @@ public class NodeJoinTests extends OpenSearchTestCase {
|
|||
}
|
||||
|
||||
public void testConcurrentJoining() {
|
||||
List<DiscoveryNode> masterNodes = IntStream.rangeClosed(1, randomIntBetween(2, 5))
|
||||
List<DiscoveryNode> clusterManagerNodes = IntStream.rangeClosed(1, randomIntBetween(2, 5))
|
||||
.mapToObj(nodeId -> newNode(nodeId, true))
|
||||
.collect(Collectors.toList());
|
||||
List<DiscoveryNode> otherNodes = IntStream.rangeClosed(masterNodes.size() + 1, masterNodes.size() + 1 + randomIntBetween(0, 5))
|
||||
.mapToObj(nodeId -> newNode(nodeId, false))
|
||||
.collect(Collectors.toList());
|
||||
List<DiscoveryNode> allNodes = Stream.concat(masterNodes.stream(), otherNodes.stream()).collect(Collectors.toList());
|
||||
List<DiscoveryNode> otherNodes = IntStream.rangeClosed(
|
||||
clusterManagerNodes.size() + 1,
|
||||
clusterManagerNodes.size() + 1 + randomIntBetween(0, 5)
|
||||
).mapToObj(nodeId -> newNode(nodeId, false)).collect(Collectors.toList());
|
||||
List<DiscoveryNode> allNodes = Stream.concat(clusterManagerNodes.stream(), otherNodes.stream()).collect(Collectors.toList());
|
||||
|
||||
DiscoveryNode localNode = masterNodes.get(0);
|
||||
DiscoveryNode localNode = clusterManagerNodes.get(0);
|
||||
VotingConfiguration votingConfiguration = new VotingConfiguration(
|
||||
randomValueOtherThan(singletonList(localNode), () -> randomSubsetOf(randomIntBetween(1, masterNodes.size()), masterNodes))
|
||||
.stream()
|
||||
.map(DiscoveryNode::getId)
|
||||
.collect(Collectors.toSet())
|
||||
randomValueOtherThan(
|
||||
singletonList(localNode),
|
||||
() -> randomSubsetOf(randomIntBetween(1, clusterManagerNodes.size()), clusterManagerNodes)
|
||||
).stream().map(DiscoveryNode::getId).collect(Collectors.toSet())
|
||||
);
|
||||
|
||||
logger.info("Voting configuration: {}", votingConfiguration);
|
||||
|
||||
long initialTerm = randomLongBetween(1, 10);
|
||||
long initialVersion = randomLongBetween(1, 10);
|
||||
setupRealMasterServiceAndCoordinator(initialTerm, initialState(localNode, initialTerm, initialVersion, votingConfiguration));
|
||||
setupRealClusterManagerServiceAndCoordinator(
|
||||
initialTerm,
|
||||
initialState(localNode, initialTerm, initialVersion, votingConfiguration)
|
||||
);
|
||||
long newTerm = initialTerm + randomLongBetween(1, 10);
|
||||
|
||||
// we need at least a quorum of voting nodes with a correct term and worse state
|
||||
|
@ -735,10 +746,10 @@ public class NodeJoinTests extends OpenSearchTestCase {
|
|||
throw new RuntimeException(e);
|
||||
}
|
||||
|
||||
assertTrue(MasterServiceTests.discoveryState(masterService).nodes().isLocalNodeElectedMaster());
|
||||
assertTrue(MasterServiceTests.discoveryState(clusterManagerService).nodes().isLocalNodeElectedMaster());
|
||||
for (DiscoveryNode successfulNode : successfulNodes) {
|
||||
assertTrue(successfulNode + " joined cluster", clusterStateHasNode(successfulNode));
|
||||
assertFalse(successfulNode + " voted for master", coordinator.missingJoinVoteFrom(successfulNode));
|
||||
assertFalse(successfulNode + " voted for cluster-manager", coordinator.missingJoinVoteFrom(successfulNode));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -749,7 +760,7 @@ public class NodeJoinTests extends OpenSearchTestCase {
|
|||
DiscoveryNode node1 = new DiscoveryNode("master1", "1", buildNewFakeTransportAddress(), emptyMap(), roles, Version.CURRENT);
|
||||
long initialTerm = 1;
|
||||
long initialVersion = 1;
|
||||
setupFakeMasterServiceAndCoordinator(
|
||||
setupFakeClusterManagerServiceAndCoordinator(
|
||||
initialTerm,
|
||||
initialState(node0, initialTerm, initialVersion, VotingConfiguration.of(node0)),
|
||||
() -> new StatusInfo(HEALTHY, "healthy-info")
|
||||
|
@ -765,10 +776,10 @@ public class NodeJoinTests extends OpenSearchTestCase {
|
|||
}
|
||||
|
||||
private boolean isLocalNodeElectedMaster() {
|
||||
return MasterServiceTests.discoveryState(masterService).nodes().isLocalNodeElectedMaster();
|
||||
return MasterServiceTests.discoveryState(clusterManagerService).nodes().isLocalNodeElectedMaster();
|
||||
}
|
||||
|
||||
private boolean clusterStateHasNode(DiscoveryNode node) {
|
||||
return node.equals(MasterServiceTests.discoveryState(masterService).nodes().get(node.getId()));
|
||||
return node.equals(MasterServiceTests.discoveryState(clusterManagerService).nodes().get(node.getId()));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -447,7 +447,7 @@ public class PublicationTests extends OpenSearchTestCase {
|
|||
);
|
||||
}
|
||||
|
||||
public void testPublishingToMastersFirst() {
|
||||
public void testPublishingToClusterManagersFirst() {
|
||||
VotingConfiguration singleNodeConfig = VotingConfiguration.of(n1);
|
||||
initializeCluster(singleNodeConfig);
|
||||
|
||||
|
|
|
@ -223,14 +223,14 @@ public class ReconfiguratorTests extends OpenSearchTestCase {
|
|||
boolean autoShrinkVotingConfiguration,
|
||||
VotingConfiguration expectedConfig
|
||||
) {
|
||||
final DiscoveryNode master = liveNodes.stream().sorted(Comparator.comparing(DiscoveryNode::getId)).findFirst().get();
|
||||
check(liveNodes, retired, master.getId(), config, autoShrinkVotingConfiguration, expectedConfig);
|
||||
final DiscoveryNode clusterManager = liveNodes.stream().sorted(Comparator.comparing(DiscoveryNode::getId)).findFirst().get();
|
||||
check(liveNodes, retired, clusterManager.getId(), config, autoShrinkVotingConfiguration, expectedConfig);
|
||||
}
|
||||
|
||||
private void check(
|
||||
Set<DiscoveryNode> liveNodes,
|
||||
Set<String> retired,
|
||||
String masterId,
|
||||
String clusterManagerId,
|
||||
VotingConfiguration config,
|
||||
boolean autoShrinkVotingConfiguration,
|
||||
VotingConfiguration expectedConfig
|
||||
|
@ -239,14 +239,14 @@ public class ReconfiguratorTests extends OpenSearchTestCase {
|
|||
Settings.builder().put(CLUSTER_AUTO_SHRINK_VOTING_CONFIGURATION.getKey(), autoShrinkVotingConfiguration).build()
|
||||
);
|
||||
|
||||
final DiscoveryNode master = liveNodes.stream().filter(n -> n.getId().equals(masterId)).findFirst().get();
|
||||
final VotingConfiguration adaptedConfig = reconfigurator.reconfigure(liveNodes, retired, master, config);
|
||||
final DiscoveryNode clusterManager = liveNodes.stream().filter(n -> n.getId().equals(clusterManagerId)).findFirst().get();
|
||||
final VotingConfiguration adaptedConfig = reconfigurator.reconfigure(liveNodes, retired, clusterManager, config);
|
||||
assertEquals(
|
||||
new ParameterizedMessage(
|
||||
"[liveNodes={}, retired={}, master={}, config={}, autoShrinkVotingConfiguration={}]",
|
||||
"[liveNodes={}, retired={}, clusterManager={}, config={}, autoShrinkVotingConfiguration={}]",
|
||||
liveNodes,
|
||||
retired,
|
||||
master,
|
||||
clusterManager,
|
||||
config,
|
||||
autoShrinkVotingConfiguration
|
||||
).getFormattedMessage(),
|
||||
|
|
|
@ -85,10 +85,13 @@ public class ClusterHealthAllocationTests extends OpenSearchAllocationTestCase {
|
|||
assertEquals(ClusterHealthStatus.GREEN, getClusterHealthStatus(clusterState));
|
||||
}
|
||||
|
||||
private ClusterState addNode(ClusterState clusterState, String nodeName, boolean isMaster) {
|
||||
private ClusterState addNode(ClusterState clusterState, String nodeName, boolean isClusterManager) {
|
||||
DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.getNodes());
|
||||
nodeBuilder.add(
|
||||
newNode(nodeName, Collections.singleton(isMaster ? DiscoveryNodeRole.CLUSTER_MANAGER_ROLE : DiscoveryNodeRole.DATA_ROLE))
|
||||
newNode(
|
||||
nodeName,
|
||||
Collections.singleton(isClusterManager ? DiscoveryNodeRole.CLUSTER_MANAGER_ROLE : DiscoveryNodeRole.DATA_ROLE)
|
||||
)
|
||||
);
|
||||
return ClusterState.builder(clusterState).nodes(nodeBuilder).build();
|
||||
}
|
||||
|
|
|
@ -157,11 +157,11 @@ public class ClusterStateHealthTests extends OpenSearchTestCase {
|
|||
}
|
||||
});
|
||||
|
||||
logger.info("--> submit task to restore master");
|
||||
logger.info("--> submit task to restore cluster-manager");
|
||||
ClusterState currentState = clusterService.getClusterApplierService().state();
|
||||
clusterService.getClusterApplierService()
|
||||
.onNewClusterState(
|
||||
"restore master",
|
||||
"restore cluster-manager",
|
||||
() -> ClusterState.builder(currentState)
|
||||
.nodes(DiscoveryNodes.builder(currentState.nodes()).masterNodeId(currentState.nodes().getLocalNodeId()))
|
||||
.build(),
|
||||
|
@ -184,7 +184,7 @@ public class ClusterStateHealthTests extends OpenSearchTestCase {
|
|||
|
||||
assertFalse(listener.isDone());
|
||||
|
||||
logger.info("--> realising task to restore master");
|
||||
logger.info("--> realising task to restore cluster-manager");
|
||||
applyLatch.countDown();
|
||||
listener.get();
|
||||
}
|
||||
|
|
|
@ -145,7 +145,7 @@ public class AutoExpandReplicasTests extends OpenSearchTestCase {
|
|||
|
||||
try {
|
||||
List<DiscoveryNode> allNodes = new ArrayList<>();
|
||||
DiscoveryNode localNode = createNode(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); // local node is the master
|
||||
DiscoveryNode localNode = createNode(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); // local node is the cluster-manager
|
||||
allNodes.add(localNode);
|
||||
int numDataNodes = randomIntBetween(3, 5);
|
||||
List<DiscoveryNode> dataNodes = new ArrayList<>(numDataNodes);
|
||||
|
@ -246,7 +246,7 @@ public class AutoExpandReplicasTests extends OpenSearchTestCase {
|
|||
VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.V_1_2_1),
|
||||
DiscoveryNodeRole.CLUSTER_MANAGER_ROLE,
|
||||
DiscoveryNodeRole.DATA_ROLE
|
||||
); // local node is the master
|
||||
); // local node is the cluster-manager
|
||||
allNodes.add(oldNode);
|
||||
ClusterState state = ClusterStateCreationUtils.state(oldNode, oldNode, allNodes.toArray(new DiscoveryNode[0]));
|
||||
|
||||
|
|
|
@ -108,14 +108,14 @@ public class DiscoveryNodesTests extends OpenSearchTestCase {
|
|||
assertThat(discoveryNodes.resolveNodes(new String[0]), arrayContainingInAnyOrder(allNodes));
|
||||
assertThat(discoveryNodes.resolveNodes("_all"), arrayContainingInAnyOrder(allNodes));
|
||||
|
||||
final String[] nonMasterNodes = StreamSupport.stream(discoveryNodes.getNodes().values().spliterator(), false)
|
||||
final String[] nonClusterManagerNodes = StreamSupport.stream(discoveryNodes.getNodes().values().spliterator(), false)
|
||||
.map(n -> n.value)
|
||||
.filter(n -> n.isMasterNode() == false)
|
||||
.map(DiscoveryNode::getId)
|
||||
.toArray(String[]::new);
|
||||
assertThat(discoveryNodes.resolveNodes("_all", "master:false"), arrayContainingInAnyOrder(nonMasterNodes));
|
||||
assertThat(discoveryNodes.resolveNodes("_all", "cluster_manager:false"), arrayContainingInAnyOrder(nonClusterManagerNodes));
|
||||
|
||||
assertThat(discoveryNodes.resolveNodes("master:false", "_all"), arrayContainingInAnyOrder(allNodes));
|
||||
assertThat(discoveryNodes.resolveNodes("cluster_manager:false", "_all"), arrayContainingInAnyOrder(allNodes));
|
||||
}
|
||||
|
||||
public void testCoordinatorOnlyNodes() {
|
||||
|
@ -135,7 +135,7 @@ public class DiscoveryNodesTests extends OpenSearchTestCase {
|
|||
|
||||
assertThat(discoveryNodes.resolveNodes("coordinating_only:true"), arrayContainingInAnyOrder(coordinatorOnlyNodes));
|
||||
assertThat(
|
||||
discoveryNodes.resolveNodes("_all", "data:false", "ingest:false", "master:false"),
|
||||
discoveryNodes.resolveNodes("_all", "data:false", "ingest:false", "cluster_manager:false"),
|
||||
arrayContainingInAnyOrder(coordinatorOnlyNodes)
|
||||
);
|
||||
assertThat(discoveryNodes.resolveNodes("_all", "coordinating_only:false"), arrayContainingInAnyOrder(nonCoordinatorOnlyNodes));
|
||||
|
@ -175,7 +175,7 @@ public class DiscoveryNodesTests extends OpenSearchTestCase {
|
|||
assertThat(resolvedNodesIds, equalTo(expectedNodesIds));
|
||||
}
|
||||
|
||||
public void testMastersFirst() {
|
||||
public void testClusterManagersFirst() {
|
||||
final List<DiscoveryNode> inputNodes = randomNodes(10);
|
||||
final DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder();
|
||||
inputNodes.forEach(discoBuilder::add);
|
||||
|
@ -254,19 +254,19 @@ public class DiscoveryNodesTests extends OpenSearchTestCase {
|
|||
nodesB.add(node);
|
||||
}
|
||||
|
||||
DiscoveryNode masterA = randomBoolean() ? null : RandomPicks.randomFrom(random(), nodesA);
|
||||
DiscoveryNode masterB = randomBoolean() ? null : RandomPicks.randomFrom(random(), nodesB);
|
||||
DiscoveryNode clusterManagerA = randomBoolean() ? null : RandomPicks.randomFrom(random(), nodesA);
|
||||
DiscoveryNode clusterManagerB = randomBoolean() ? null : RandomPicks.randomFrom(random(), nodesB);
|
||||
|
||||
DiscoveryNodes.Builder builderA = DiscoveryNodes.builder();
|
||||
nodesA.stream().forEach(builderA::add);
|
||||
final String masterAId = masterA == null ? null : masterA.getId();
|
||||
builderA.masterNodeId(masterAId);
|
||||
final String clusterManagerAId = clusterManagerA == null ? null : clusterManagerA.getId();
|
||||
builderA.masterNodeId(clusterManagerAId);
|
||||
builderA.localNodeId(RandomPicks.randomFrom(random(), nodesA).getId());
|
||||
|
||||
DiscoveryNodes.Builder builderB = DiscoveryNodes.builder();
|
||||
nodesB.stream().forEach(builderB::add);
|
||||
final String masterBId = masterB == null ? null : masterB.getId();
|
||||
builderB.masterNodeId(masterBId);
|
||||
final String clusterManagerBId = clusterManagerB == null ? null : clusterManagerB.getId();
|
||||
builderB.masterNodeId(clusterManagerBId);
|
||||
builderB.localNodeId(RandomPicks.randomFrom(random(), nodesB).getId());
|
||||
|
||||
final DiscoveryNodes discoNodesA = builderA.build();
|
||||
|
@ -276,18 +276,18 @@ public class DiscoveryNodesTests extends OpenSearchTestCase {
|
|||
|
||||
DiscoveryNodes.Delta delta = discoNodesB.delta(discoNodesA);
|
||||
|
||||
if (masterA == null) {
|
||||
if (clusterManagerA == null) {
|
||||
assertThat(delta.previousClusterManagerNode(), nullValue());
|
||||
} else {
|
||||
assertThat(delta.previousClusterManagerNode().getId(), equalTo(masterAId));
|
||||
assertThat(delta.previousClusterManagerNode().getId(), equalTo(clusterManagerAId));
|
||||
}
|
||||
if (masterB == null) {
|
||||
if (clusterManagerB == null) {
|
||||
assertThat(delta.newMasterNode(), nullValue());
|
||||
} else {
|
||||
assertThat(delta.newMasterNode().getId(), equalTo(masterBId));
|
||||
assertThat(delta.newMasterNode().getId(), equalTo(clusterManagerBId));
|
||||
}
|
||||
|
||||
if (Objects.equals(masterAId, masterBId)) {
|
||||
if (Objects.equals(clusterManagerAId, clusterManagerBId)) {
|
||||
assertFalse(delta.masterNodeChanged());
|
||||
} else {
|
||||
assertTrue(delta.masterNodeChanged());
|
||||
|
@ -306,6 +306,32 @@ public class DiscoveryNodesTests extends OpenSearchTestCase {
|
|||
assertThat(delta.removedNodes().size(), equalTo(removedNodes.size()));
|
||||
}
|
||||
|
||||
// Validate using the deprecated 'master' role in the node filter can get correct result.
|
||||
public void testDeprecatedMasterNodeFilter() {
|
||||
final DiscoveryNodes discoveryNodes = buildDiscoveryNodes();
|
||||
|
||||
final String[] allNodes = StreamSupport.stream(discoveryNodes.spliterator(), false)
|
||||
.map(DiscoveryNode::getId)
|
||||
.toArray(String[]::new);
|
||||
|
||||
final String[] clusterManagerNodes = StreamSupport.stream(discoveryNodes.getNodes().values().spliterator(), false)
|
||||
.map(n -> n.value)
|
||||
.filter(n -> n.isMasterNode() == true)
|
||||
.map(DiscoveryNode::getId)
|
||||
.toArray(String[]::new);
|
||||
|
||||
final String[] nonClusterManagerNodes = StreamSupport.stream(discoveryNodes.getNodes().values().spliterator(), false)
|
||||
.map(n -> n.value)
|
||||
.filter(n -> n.isMasterNode() == false)
|
||||
.map(DiscoveryNode::getId)
|
||||
.toArray(String[]::new);
|
||||
|
||||
assertThat(discoveryNodes.resolveNodes("cluster_manager:true"), arrayContainingInAnyOrder(clusterManagerNodes));
|
||||
assertThat(discoveryNodes.resolveNodes("master:true"), arrayContainingInAnyOrder(clusterManagerNodes));
|
||||
assertThat(discoveryNodes.resolveNodes("_all", "master:false"), arrayContainingInAnyOrder(nonClusterManagerNodes));
|
||||
assertThat(discoveryNodes.resolveNodes("master:false", "_all"), arrayContainingInAnyOrder(allNodes));
|
||||
}
|
||||
|
||||
private static AtomicInteger idGenerator = new AtomicInteger();
|
||||
|
||||
private static List<DiscoveryNode> randomNodes(final int numNodes) {
|
||||
|
|
|
@ -101,7 +101,7 @@ public class BatchedRerouteServiceTests extends OpenSearchTestCase {
|
|||
|
||||
public void testBatchesReroutesTogetherAtPriorityOfHighestSubmittedReroute() throws BrokenBarrierException, InterruptedException {
|
||||
final CyclicBarrier cyclicBarrier = new CyclicBarrier(2);
|
||||
clusterService.submitStateUpdateTask("block master service", new ClusterStateUpdateTask() {
|
||||
clusterService.submitStateUpdateTask("block cluster-manager service", new ClusterStateUpdateTask() {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) throws Exception {
|
||||
cyclicBarrier.await(); // notify test that we are blocked
|
||||
|
@ -115,7 +115,7 @@ public class BatchedRerouteServiceTests extends OpenSearchTestCase {
|
|||
}
|
||||
});
|
||||
|
||||
cyclicBarrier.await(); // wait for master thread to be blocked
|
||||
cyclicBarrier.await(); // wait for cluster-manager thread to be blocked
|
||||
|
||||
final AtomicBoolean rerouteExecuted = new AtomicBoolean();
|
||||
final BatchedRerouteService batchedRerouteService = new BatchedRerouteService(clusterService, (s, r) -> {
|
||||
|
@ -194,7 +194,7 @@ public class BatchedRerouteServiceTests extends OpenSearchTestCase {
|
|||
actions.forEach(threadPool.generic()::execute);
|
||||
assertTrue(tasksSubmittedCountDown.await(10, TimeUnit.SECONDS));
|
||||
|
||||
cyclicBarrier.await(); // allow master thread to continue;
|
||||
cyclicBarrier.await(); // allow cluster-manager thread to continue;
|
||||
assertTrue(tasksCompletedCountDown.await(10, TimeUnit.SECONDS)); // wait for reroute to complete
|
||||
assertTrue(rerouteExecuted.get()); // see above for assertion that it's only called once
|
||||
}
|
||||
|
|
|
@ -774,14 +774,14 @@ public class OperationRoutingTests extends OpenSearchTestCase {
|
|||
);
|
||||
allNodes[i++] = node;
|
||||
}
|
||||
DiscoveryNode master = new DiscoveryNode(
|
||||
"master",
|
||||
DiscoveryNode clusterManager = new DiscoveryNode(
|
||||
"cluster-manager",
|
||||
buildNewFakeTransportAddress(),
|
||||
Collections.emptyMap(),
|
||||
Collections.singleton(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE),
|
||||
Version.CURRENT
|
||||
);
|
||||
allNodes[i] = master;
|
||||
allNodes[i] = clusterManager;
|
||||
return allNodes;
|
||||
}
|
||||
|
||||
|
|
|
@ -230,7 +230,7 @@ public class FailedNodeRoutingTests extends OpenSearchAllocationTestCase {
|
|||
|
||||
public ClusterState randomInitialClusterState() {
|
||||
List<DiscoveryNode> allNodes = new ArrayList<>();
|
||||
DiscoveryNode localNode = createNode(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); // local node is the master
|
||||
DiscoveryNode localNode = createNode(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); // local node is the cluster-manager
|
||||
allNodes.add(localNode);
|
||||
// at least two nodes that have the data role so that we can allocate shards
|
||||
allNodes.add(createNode(DiscoveryNodeRole.DATA_ROLE));
|
||||
|
|
|
@ -170,7 +170,7 @@ public class InSyncAllocationIdTests extends OpenSearchAllocationTestCase {
|
|||
|
||||
/**
|
||||
* Assume following scenario: indexing request is written to primary, but fails to be replicated to active replica.
|
||||
* The primary instructs master to fail replica before acknowledging write to client. In the meanwhile, the node of the replica was
|
||||
* The primary instructs cluster-manager to fail replica before acknowledging write to client. In the meanwhile, the node of the replica was
|
||||
* removed from the cluster (disassociateDeadNodes). This means that the ShardRouting of the replica was failed, but it's allocation
|
||||
* id is still part of the in-sync set. We have to make sure that the failShard request from the primary removes the allocation id
|
||||
* from the in-sync set.
|
||||
|
@ -204,8 +204,8 @@ public class InSyncAllocationIdTests extends OpenSearchAllocationTestCase {
|
|||
|
||||
/**
|
||||
* Assume following scenario: indexing request is written to primary, but fails to be replicated to active replica.
|
||||
* The primary instructs master to fail replica before acknowledging write to client. In the meanwhile, primary fails for an unrelated
|
||||
* reason. Master now batches both requests to fail primary and replica. We have to make sure that only the allocation id of the primary
|
||||
* The primary instructs cluster-manager to fail replica before acknowledging write to client. In the meanwhile, primary fails for an unrelated
|
||||
* reason. Cluster-manager now batches both requests to fail primary and replica. We have to make sure that only the allocation id of the primary
|
||||
* is kept in the in-sync allocation set before we acknowledge request to client. Otherwise we would acknowledge a write that made it
|
||||
* into the primary but not the replica but the replica is still considered non-stale.
|
||||
*/
|
||||
|
|
|
@ -1072,7 +1072,7 @@ public class DiskThresholdDeciderTests extends OpenSearchAllocationTestCase {
|
|||
|
||||
RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metadata.index("test")).build();
|
||||
|
||||
logger.info("--> adding one master node, one data node");
|
||||
logger.info("--> adding one cluster-manager node, one data node");
|
||||
DiscoveryNode discoveryNode1 = new DiscoveryNode(
|
||||
"",
|
||||
"node1",
|
||||
|
@ -1222,9 +1222,9 @@ public class DiskThresholdDeciderTests extends OpenSearchAllocationTestCase {
|
|||
.build();
|
||||
RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metadata.index("test")).build();
|
||||
|
||||
DiscoveryNode masterNode = new DiscoveryNode(
|
||||
"master",
|
||||
"master",
|
||||
DiscoveryNode clusterManagerNode = new DiscoveryNode(
|
||||
"cluster-manager",
|
||||
"cluster-manager",
|
||||
buildNewFakeTransportAddress(),
|
||||
emptyMap(),
|
||||
singleton(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE),
|
||||
|
@ -1240,7 +1240,7 @@ public class DiskThresholdDeciderTests extends OpenSearchAllocationTestCase {
|
|||
);
|
||||
DiscoveryNodes.Builder discoveryNodesBuilder = DiscoveryNodes.builder().add(dataNode);
|
||||
if (randomBoolean()) {
|
||||
discoveryNodesBuilder.add(masterNode);
|
||||
discoveryNodesBuilder.add(clusterManagerNode);
|
||||
}
|
||||
DiscoveryNodes discoveryNodes = discoveryNodesBuilder.build();
|
||||
|
||||
|
|
|
@ -199,9 +199,9 @@ public class RestoreInProgressAllocationDeciderTests extends OpenSearchAllocatio
|
|||
RoutingTable routingTable = RoutingTable.builder().addAsNew(metadata.index("test")).build();
|
||||
|
||||
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder()
|
||||
.add(newNode("master", Collections.singleton(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)))
|
||||
.localNodeId("master")
|
||||
.masterNodeId("master")
|
||||
.add(newNode("cluster-manager", Collections.singleton(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)))
|
||||
.localNodeId("cluster-manager")
|
||||
.masterNodeId("cluster-manager")
|
||||
.build();
|
||||
|
||||
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT)
|
||||
|
|
|
@ -104,7 +104,7 @@ public class ClusterApplierServiceTests extends OpenSearchTestCase {
|
|||
super.tearDown();
|
||||
}
|
||||
|
||||
private TimedClusterApplierService createTimedClusterService(boolean makeMaster) {
|
||||
private TimedClusterApplierService createTimedClusterService(boolean makeClusterManager) {
|
||||
DiscoveryNode localNode = new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT);
|
||||
TimedClusterApplierService timedClusterApplierService = new TimedClusterApplierService(
|
||||
Settings.builder().put("cluster.name", "ClusterApplierServiceTests").build(),
|
||||
|
@ -118,7 +118,7 @@ public class ClusterApplierServiceTests extends OpenSearchTestCase {
|
|||
DiscoveryNodes.builder()
|
||||
.add(localNode)
|
||||
.localNodeId(localNode.getId())
|
||||
.masterNodeId(makeMaster ? localNode.getId() : null)
|
||||
.masterNodeId(makeClusterManager ? localNode.getId() : null)
|
||||
)
|
||||
.blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK)
|
||||
.build()
|
||||
|
@ -292,19 +292,19 @@ public class ClusterApplierServiceTests extends OpenSearchTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testLocalNodeMasterListenerCallbacks() {
|
||||
public void testLocalNodeClusterManagerListenerCallbacks() {
|
||||
TimedClusterApplierService timedClusterApplierService = createTimedClusterService(false);
|
||||
|
||||
AtomicBoolean isMaster = new AtomicBoolean();
|
||||
AtomicBoolean isClusterManager = new AtomicBoolean();
|
||||
timedClusterApplierService.addLocalNodeMasterListener(new LocalNodeMasterListener() {
|
||||
@Override
|
||||
public void onClusterManager() {
|
||||
isMaster.set(true);
|
||||
isClusterManager.set(true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void offClusterManager() {
|
||||
isMaster.set(false);
|
||||
isClusterManager.set(false);
|
||||
}
|
||||
});
|
||||
|
||||
|
@ -313,7 +313,7 @@ public class ClusterApplierServiceTests extends OpenSearchTestCase {
|
|||
DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(nodes).masterNodeId(nodes.getLocalNodeId());
|
||||
state = ClusterState.builder(state).blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK).nodes(nodesBuilder).build();
|
||||
setState(timedClusterApplierService, state);
|
||||
assertThat(isMaster.get(), is(true));
|
||||
assertThat(isClusterManager.get(), is(true));
|
||||
|
||||
nodes = state.nodes();
|
||||
nodesBuilder = DiscoveryNodes.builder(nodes).masterNodeId(null);
|
||||
|
@ -322,11 +322,11 @@ public class ClusterApplierServiceTests extends OpenSearchTestCase {
|
|||
.nodes(nodesBuilder)
|
||||
.build();
|
||||
setState(timedClusterApplierService, state);
|
||||
assertThat(isMaster.get(), is(false));
|
||||
assertThat(isClusterManager.get(), is(false));
|
||||
nodesBuilder = DiscoveryNodes.builder(nodes).masterNodeId(nodes.getLocalNodeId());
|
||||
state = ClusterState.builder(state).blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK).nodes(nodesBuilder).build();
|
||||
setState(timedClusterApplierService, state);
|
||||
assertThat(isMaster.get(), is(true));
|
||||
assertThat(isClusterManager.get(), is(true));
|
||||
|
||||
timedClusterApplierService.close();
|
||||
}
|
||||
|
|
|
@ -121,9 +121,9 @@ public class MasterServiceTests extends OpenSearchTestCase {
|
|||
relativeTimeInMillis = randomLongBetween(0L, 1L << 62);
|
||||
}
|
||||
|
||||
private MasterService createMasterService(boolean makeMaster) {
|
||||
private MasterService createClusterManagerService(boolean makeClusterManager) {
|
||||
final DiscoveryNode localNode = new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT);
|
||||
final MasterService masterService = new MasterService(
|
||||
final MasterService clusterManagerService = new MasterService(
|
||||
Settings.builder()
|
||||
.put(ClusterName.CLUSTER_NAME_SETTING.getKey(), MasterServiceTests.class.getSimpleName())
|
||||
.put(Node.NODE_NAME_SETTING.getKey(), "test_node")
|
||||
|
@ -133,26 +133,29 @@ public class MasterServiceTests extends OpenSearchTestCase {
|
|||
);
|
||||
final ClusterState initialClusterState = ClusterState.builder(new ClusterName(MasterServiceTests.class.getSimpleName()))
|
||||
.nodes(
|
||||
DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).masterNodeId(makeMaster ? localNode.getId() : null)
|
||||
DiscoveryNodes.builder()
|
||||
.add(localNode)
|
||||
.localNodeId(localNode.getId())
|
||||
.masterNodeId(makeClusterManager ? localNode.getId() : null)
|
||||
)
|
||||
.blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK)
|
||||
.build();
|
||||
final AtomicReference<ClusterState> clusterStateRef = new AtomicReference<>(initialClusterState);
|
||||
masterService.setClusterStatePublisher((event, publishListener, ackListener) -> {
|
||||
clusterManagerService.setClusterStatePublisher((event, publishListener, ackListener) -> {
|
||||
clusterStateRef.set(event.state());
|
||||
publishListener.onResponse(null);
|
||||
});
|
||||
masterService.setClusterStateSupplier(clusterStateRef::get);
|
||||
masterService.start();
|
||||
return masterService;
|
||||
clusterManagerService.setClusterStateSupplier(clusterStateRef::get);
|
||||
clusterManagerService.start();
|
||||
return clusterManagerService;
|
||||
}
|
||||
|
||||
public void testMasterAwareExecution() throws Exception {
|
||||
final MasterService nonMaster = createMasterService(false);
|
||||
public void testClusterManagerAwareExecution() throws Exception {
|
||||
final MasterService nonClusterManager = createClusterManagerService(false);
|
||||
|
||||
final boolean[] taskFailed = { false };
|
||||
final CountDownLatch latch1 = new CountDownLatch(1);
|
||||
nonMaster.submitStateUpdateTask("test", new ClusterStateUpdateTask() {
|
||||
nonClusterManager.submitStateUpdateTask("test", new ClusterStateUpdateTask() {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
latch1.countDown();
|
||||
|
@ -167,10 +170,10 @@ public class MasterServiceTests extends OpenSearchTestCase {
|
|||
});
|
||||
|
||||
latch1.await();
|
||||
assertTrue("cluster state update task was executed on a non-master", taskFailed[0]);
|
||||
assertTrue("cluster state update task was executed on a non-cluster-manager", taskFailed[0]);
|
||||
|
||||
final CountDownLatch latch2 = new CountDownLatch(1);
|
||||
nonMaster.submitStateUpdateTask("test", new LocalClusterUpdateTask() {
|
||||
nonClusterManager.submitStateUpdateTask("test", new LocalClusterUpdateTask() {
|
||||
@Override
|
||||
public ClusterTasksResult<LocalClusterUpdateTask> execute(ClusterState currentState) {
|
||||
taskFailed[0] = false;
|
||||
|
@ -185,13 +188,13 @@ public class MasterServiceTests extends OpenSearchTestCase {
|
|||
}
|
||||
});
|
||||
latch2.await();
|
||||
assertFalse("non-master cluster state update task was not executed", taskFailed[0]);
|
||||
assertFalse("non-cluster-manager cluster state update task was not executed", taskFailed[0]);
|
||||
|
||||
nonMaster.close();
|
||||
nonClusterManager.close();
|
||||
}
|
||||
|
||||
public void testThreadContext() throws InterruptedException {
|
||||
final MasterService master = createMasterService(true);
|
||||
final MasterService clusterManager = createClusterManagerService(true);
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
|
||||
try (ThreadContext.StoredContext ignored = threadPool.getThreadContext().stashContext()) {
|
||||
|
@ -203,9 +206,9 @@ public class MasterServiceTests extends OpenSearchTestCase {
|
|||
threadPool.getThreadContext().putHeader(expectedHeaders);
|
||||
|
||||
final TimeValue ackTimeout = randomBoolean() ? TimeValue.ZERO : TimeValue.timeValueMillis(randomInt(10000));
|
||||
final TimeValue masterTimeout = randomBoolean() ? TimeValue.ZERO : TimeValue.timeValueMillis(randomInt(10000));
|
||||
final TimeValue clusterManagerTimeout = randomBoolean() ? TimeValue.ZERO : TimeValue.timeValueMillis(randomInt(10000));
|
||||
|
||||
master.submitStateUpdateTask("test", new AckedClusterStateUpdateTask<Void>(null, null) {
|
||||
clusterManager.submitStateUpdateTask("test", new AckedClusterStateUpdateTask<Void>(null, null) {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
assertTrue(threadPool.getThreadContext().isSystemContext());
|
||||
|
@ -249,7 +252,7 @@ public class MasterServiceTests extends OpenSearchTestCase {
|
|||
|
||||
@Override
|
||||
public TimeValue timeout() {
|
||||
return masterTimeout;
|
||||
return clusterManagerTimeout;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -277,7 +280,7 @@ public class MasterServiceTests extends OpenSearchTestCase {
|
|||
|
||||
latch.await();
|
||||
|
||||
master.close();
|
||||
clusterManager.close();
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -289,8 +292,8 @@ public class MasterServiceTests extends OpenSearchTestCase {
|
|||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
AtomicBoolean published = new AtomicBoolean();
|
||||
|
||||
try (MasterService masterService = createMasterService(true)) {
|
||||
masterService.submitStateUpdateTask(
|
||||
try (MasterService clusterManagerService = createClusterManagerService(true)) {
|
||||
clusterManagerService.submitStateUpdateTask(
|
||||
"testClusterStateTaskListenerThrowingExceptionIsOkay",
|
||||
new Object(),
|
||||
ClusterStateTaskConfig.build(Priority.NORMAL),
|
||||
|
@ -418,8 +421,8 @@ public class MasterServiceTests extends OpenSearchTestCase {
|
|||
)
|
||||
);
|
||||
|
||||
try (MasterService masterService = createMasterService(true)) {
|
||||
masterService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() {
|
||||
try (MasterService clusterManagerService = createClusterManagerService(true)) {
|
||||
clusterManagerService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
relativeTimeInMillis += TimeValue.timeValueSeconds(1).millis();
|
||||
|
@ -434,7 +437,7 @@ public class MasterServiceTests extends OpenSearchTestCase {
|
|||
fail();
|
||||
}
|
||||
});
|
||||
masterService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() {
|
||||
clusterManagerService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
relativeTimeInMillis += TimeValue.timeValueSeconds(2).millis();
|
||||
|
@ -449,7 +452,7 @@ public class MasterServiceTests extends OpenSearchTestCase {
|
|||
@Override
|
||||
public void onFailure(String source, Exception e) {}
|
||||
});
|
||||
masterService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() {
|
||||
clusterManagerService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
relativeTimeInMillis += TimeValue.timeValueSeconds(3).millis();
|
||||
|
@ -466,7 +469,7 @@ public class MasterServiceTests extends OpenSearchTestCase {
|
|||
fail();
|
||||
}
|
||||
});
|
||||
masterService.submitStateUpdateTask("test4", new ClusterStateUpdateTask() {
|
||||
clusterManagerService.submitStateUpdateTask("test4", new ClusterStateUpdateTask() {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
return currentState;
|
||||
|
@ -614,7 +617,7 @@ public class MasterServiceTests extends OpenSearchTestCase {
|
|||
}
|
||||
};
|
||||
|
||||
try (MasterService masterService = createMasterService(true)) {
|
||||
try (MasterService clusterManagerService = createClusterManagerService(true)) {
|
||||
final ConcurrentMap<String, AtomicInteger> submittedTasksPerThread = new ConcurrentHashMap<>();
|
||||
CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads);
|
||||
for (int i = 0; i < numberOfThreads; i++) {
|
||||
|
@ -629,7 +632,7 @@ public class MasterServiceTests extends OpenSearchTestCase {
|
|||
submittedTasksPerThread.computeIfAbsent(threadName, key -> new AtomicInteger()).addAndGet(tasks.size());
|
||||
final TaskExecutor executor = assignment.v1();
|
||||
if (tasks.size() == 1) {
|
||||
masterService.submitStateUpdateTask(
|
||||
clusterManagerService.submitStateUpdateTask(
|
||||
threadName,
|
||||
tasks.stream().findFirst().get(),
|
||||
ClusterStateTaskConfig.build(randomFrom(Priority.values())),
|
||||
|
@ -639,7 +642,7 @@ public class MasterServiceTests extends OpenSearchTestCase {
|
|||
} else {
|
||||
Map<Task, ClusterStateTaskListener> taskListeners = new HashMap<>();
|
||||
tasks.forEach(t -> taskListeners.put(t, listener));
|
||||
masterService.submitStateUpdateTasks(
|
||||
clusterManagerService.submitStateUpdateTasks(
|
||||
threadName,
|
||||
taskListeners,
|
||||
ClusterStateTaskConfig.build(randomFrom(Priority.values())),
|
||||
|
@ -693,8 +696,8 @@ public class MasterServiceTests extends OpenSearchTestCase {
|
|||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
final AtomicReference<AssertionError> assertionRef = new AtomicReference<>();
|
||||
|
||||
try (MasterService masterService = createMasterService(true)) {
|
||||
masterService.submitStateUpdateTask(
|
||||
try (MasterService clusterManagerService = createClusterManagerService(true)) {
|
||||
clusterManagerService.submitStateUpdateTask(
|
||||
"testBlockingCallInClusterStateTaskListenerFails",
|
||||
new Object(),
|
||||
ClusterStateTaskConfig.build(Priority.NORMAL),
|
||||
|
@ -785,7 +788,7 @@ public class MasterServiceTests extends OpenSearchTestCase {
|
|||
);
|
||||
|
||||
try (
|
||||
MasterService masterService = new MasterService(
|
||||
MasterService clusterManagerService = new MasterService(
|
||||
Settings.builder()
|
||||
.put(ClusterName.CLUSTER_NAME_SETTING.getKey(), MasterServiceTests.class.getSimpleName())
|
||||
.put(Node.NODE_NAME_SETTING.getKey(), "test_node")
|
||||
|
@ -807,7 +810,7 @@ public class MasterServiceTests extends OpenSearchTestCase {
|
|||
.blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK)
|
||||
.build();
|
||||
final AtomicReference<ClusterState> clusterStateRef = new AtomicReference<>(initialClusterState);
|
||||
masterService.setClusterStatePublisher((event, publishListener, ackListener) -> {
|
||||
clusterManagerService.setClusterStatePublisher((event, publishListener, ackListener) -> {
|
||||
if (event.source().contains("test5")) {
|
||||
relativeTimeInMillis += MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(
|
||||
Settings.EMPTY
|
||||
|
@ -822,12 +825,12 @@ public class MasterServiceTests extends OpenSearchTestCase {
|
|||
clusterStateRef.set(event.state());
|
||||
publishListener.onResponse(null);
|
||||
});
|
||||
masterService.setClusterStateSupplier(clusterStateRef::get);
|
||||
masterService.start();
|
||||
clusterManagerService.setClusterStateSupplier(clusterStateRef::get);
|
||||
clusterManagerService.start();
|
||||
|
||||
final CountDownLatch latch = new CountDownLatch(6);
|
||||
final CountDownLatch processedFirstTask = new CountDownLatch(1);
|
||||
masterService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() {
|
||||
clusterManagerService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
relativeTimeInMillis += randomLongBetween(
|
||||
|
@ -850,7 +853,7 @@ public class MasterServiceTests extends OpenSearchTestCase {
|
|||
});
|
||||
|
||||
processedFirstTask.await();
|
||||
masterService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() {
|
||||
clusterManagerService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
relativeTimeInMillis += MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(
|
||||
|
@ -869,7 +872,7 @@ public class MasterServiceTests extends OpenSearchTestCase {
|
|||
latch.countDown();
|
||||
}
|
||||
});
|
||||
masterService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() {
|
||||
clusterManagerService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
relativeTimeInMillis += MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(
|
||||
|
@ -888,7 +891,7 @@ public class MasterServiceTests extends OpenSearchTestCase {
|
|||
fail();
|
||||
}
|
||||
});
|
||||
masterService.submitStateUpdateTask("test4", new ClusterStateUpdateTask() {
|
||||
clusterManagerService.submitStateUpdateTask("test4", new ClusterStateUpdateTask() {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
relativeTimeInMillis += MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(
|
||||
|
@ -907,7 +910,7 @@ public class MasterServiceTests extends OpenSearchTestCase {
|
|||
fail();
|
||||
}
|
||||
});
|
||||
masterService.submitStateUpdateTask("test5", new ClusterStateUpdateTask() {
|
||||
clusterManagerService.submitStateUpdateTask("test5", new ClusterStateUpdateTask() {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
return ClusterState.builder(currentState).incrementVersion().build();
|
||||
|
@ -923,7 +926,7 @@ public class MasterServiceTests extends OpenSearchTestCase {
|
|||
fail();
|
||||
}
|
||||
});
|
||||
masterService.submitStateUpdateTask("test6", new ClusterStateUpdateTask() {
|
||||
clusterManagerService.submitStateUpdateTask("test6", new ClusterStateUpdateTask() {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
return ClusterState.builder(currentState).incrementVersion().build();
|
||||
|
@ -941,7 +944,7 @@ public class MasterServiceTests extends OpenSearchTestCase {
|
|||
});
|
||||
// Additional update task to make sure all previous logging made it to the loggerName
|
||||
// We don't check logging for this on since there is no guarantee that it will occur before our check
|
||||
masterService.submitStateUpdateTask("test7", new ClusterStateUpdateTask() {
|
||||
clusterManagerService.submitStateUpdateTask("test7", new ClusterStateUpdateTask() {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
return currentState;
|
||||
|
@ -968,7 +971,7 @@ public class MasterServiceTests extends OpenSearchTestCase {
|
|||
final DiscoveryNode node2 = new DiscoveryNode("node2", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT);
|
||||
final DiscoveryNode node3 = new DiscoveryNode("node3", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT);
|
||||
try (
|
||||
MasterService masterService = new MasterService(
|
||||
MasterService clusterManagerService = new MasterService(
|
||||
Settings.builder()
|
||||
.put(ClusterName.CLUSTER_NAME_SETTING.getKey(), MasterServiceTests.class.getSimpleName())
|
||||
.put(Node.NODE_NAME_SETTING.getKey(), "test_node")
|
||||
|
@ -983,9 +986,9 @@ public class MasterServiceTests extends OpenSearchTestCase {
|
|||
.blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK)
|
||||
.build();
|
||||
final AtomicReference<ClusterStatePublisher> publisherRef = new AtomicReference<>();
|
||||
masterService.setClusterStatePublisher((e, pl, al) -> publisherRef.get().publish(e, pl, al));
|
||||
masterService.setClusterStateSupplier(() -> initialClusterState);
|
||||
masterService.start();
|
||||
clusterManagerService.setClusterStatePublisher((e, pl, al) -> publisherRef.get().publish(e, pl, al));
|
||||
clusterManagerService.setClusterStateSupplier(() -> initialClusterState);
|
||||
clusterManagerService.start();
|
||||
|
||||
// check that we don't time out before even committing the cluster state
|
||||
{
|
||||
|
@ -997,7 +1000,7 @@ public class MasterServiceTests extends OpenSearchTestCase {
|
|||
)
|
||||
);
|
||||
|
||||
masterService.submitStateUpdateTask("test2", new AckedClusterStateUpdateTask<Void>(null, null) {
|
||||
clusterManagerService.submitStateUpdateTask("test2", new AckedClusterStateUpdateTask<Void>(null, null) {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
return ClusterState.builder(currentState).build();
|
||||
|
@ -1052,7 +1055,7 @@ public class MasterServiceTests extends OpenSearchTestCase {
|
|||
ackListener.onNodeAck(node3, null);
|
||||
});
|
||||
|
||||
masterService.submitStateUpdateTask("test2", new AckedClusterStateUpdateTask<Void>(null, null) {
|
||||
clusterManagerService.submitStateUpdateTask("test2", new AckedClusterStateUpdateTask<Void>(null, null) {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
return ClusterState.builder(currentState).build();
|
||||
|
@ -1096,10 +1099,10 @@ public class MasterServiceTests extends OpenSearchTestCase {
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns the cluster state that the master service uses (and that is provided by the discovery layer)
|
||||
* Returns the cluster state that the cluster-manager service uses (and that is provided by the discovery layer)
|
||||
*/
|
||||
public static ClusterState discoveryState(MasterService masterService) {
|
||||
return masterService.state();
|
||||
public static ClusterState discoveryState(MasterService clusterManagerService) {
|
||||
return clusterManagerService.state();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -155,15 +155,15 @@ public abstract class AbstractDisruptionTestCase extends OpenSearchIntegTestCase
|
|||
return client(node).admin().cluster().prepareState().setLocal(true).get().getState();
|
||||
}
|
||||
|
||||
void assertNoMaster(final String node) throws Exception {
|
||||
assertNoMaster(node, null, TimeValue.timeValueSeconds(30));
|
||||
void assertNoClusterManager(final String node) throws Exception {
|
||||
assertNoClusterManager(node, null, TimeValue.timeValueSeconds(30));
|
||||
}
|
||||
|
||||
void assertNoMaster(final String node, TimeValue maxWaitTime) throws Exception {
|
||||
assertNoMaster(node, null, maxWaitTime);
|
||||
void assertNoClusterManager(final String node, TimeValue maxWaitTime) throws Exception {
|
||||
assertNoClusterManager(node, null, maxWaitTime);
|
||||
}
|
||||
|
||||
void assertNoMaster(final String node, @Nullable final ClusterBlock expectedBlocks, TimeValue maxWaitTime) throws Exception {
|
||||
void assertNoClusterManager(final String node, @Nullable final ClusterBlock expectedBlocks, TimeValue maxWaitTime) throws Exception {
|
||||
assertBusy(() -> {
|
||||
ClusterState state = getNodeClusterState(node);
|
||||
final DiscoveryNodes nodes = state.nodes();
|
||||
|
@ -179,26 +179,34 @@ public abstract class AbstractDisruptionTestCase extends OpenSearchIntegTestCase
|
|||
}, maxWaitTime.getMillis(), TimeUnit.MILLISECONDS);
|
||||
}
|
||||
|
||||
void assertDifferentMaster(final String node, final String oldMasterNode) throws Exception {
|
||||
void assertDifferentClusterManager(final String node, final String oldClusterManagerNode) throws Exception {
|
||||
assertBusy(() -> {
|
||||
ClusterState state = getNodeClusterState(node);
|
||||
String masterNode = null;
|
||||
String clusterManagerNode = null;
|
||||
if (state.nodes().getMasterNode() != null) {
|
||||
masterNode = state.nodes().getMasterNode().getName();
|
||||
clusterManagerNode = state.nodes().getMasterNode().getName();
|
||||
}
|
||||
logger.trace("[{}] master is [{}]", node, state.nodes().getMasterNode());
|
||||
assertThat("node [" + node + "] still has [" + masterNode + "] as master", oldMasterNode, not(equalTo(masterNode)));
|
||||
logger.trace("[{}] cluster-manager is [{}]", node, state.nodes().getMasterNode());
|
||||
assertThat(
|
||||
"node [" + node + "] still has [" + clusterManagerNode + "] as cluster-manager",
|
||||
oldClusterManagerNode,
|
||||
not(equalTo(clusterManagerNode))
|
||||
);
|
||||
}, 30, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
void assertMaster(String masterNode, List<String> nodes) throws Exception {
|
||||
void assertClusterManager(String clusterManagerNode, List<String> nodes) throws Exception {
|
||||
assertBusy(() -> {
|
||||
for (String node : nodes) {
|
||||
ClusterState state = getNodeClusterState(node);
|
||||
String failMsgSuffix = "cluster_state:\n" + state;
|
||||
assertThat("wrong node count on [" + node + "]. " + failMsgSuffix, state.nodes().getSize(), equalTo(nodes.size()));
|
||||
String otherMasterNodeName = state.nodes().getMasterNode() != null ? state.nodes().getMasterNode().getName() : null;
|
||||
assertThat("wrong master on node [" + node + "]. " + failMsgSuffix, otherMasterNodeName, equalTo(masterNode));
|
||||
String otherClusterManagerNodeName = state.nodes().getMasterNode() != null ? state.nodes().getMasterNode().getName() : null;
|
||||
assertThat(
|
||||
"wrong cluster-manager on node [" + node + "]. " + failMsgSuffix,
|
||||
otherClusterManagerNodeName,
|
||||
equalTo(clusterManagerNode)
|
||||
);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
|
|
@ -70,7 +70,7 @@ public class DiscoveryModuleTests extends OpenSearchTestCase {
|
|||
|
||||
private TransportService transportService;
|
||||
private NamedWriteableRegistry namedWriteableRegistry;
|
||||
private MasterService masterService;
|
||||
private MasterService clusterManagerService;
|
||||
private ClusterApplier clusterApplier;
|
||||
private ThreadPool threadPool;
|
||||
private ClusterSettings clusterSettings;
|
||||
|
@ -93,7 +93,7 @@ public class DiscoveryModuleTests extends OpenSearchTestCase {
|
|||
threadPool = mock(ThreadPool.class);
|
||||
when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY));
|
||||
transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null);
|
||||
masterService = mock(MasterService.class);
|
||||
clusterManagerService = mock(MasterService.class);
|
||||
namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList());
|
||||
clusterApplier = mock(ClusterApplier.class);
|
||||
clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
|
||||
|
@ -112,7 +112,7 @@ public class DiscoveryModuleTests extends OpenSearchTestCase {
|
|||
transportService,
|
||||
namedWriteableRegistry,
|
||||
null,
|
||||
masterService,
|
||||
clusterManagerService,
|
||||
clusterApplier,
|
||||
clusterSettings,
|
||||
plugins,
|
||||
|
|
|
@ -137,7 +137,7 @@ public class HandshakingTransportAddressConnectorTests extends OpenSearchTestCas
|
|||
terminate(threadPool);
|
||||
}
|
||||
|
||||
public void testConnectsToMasterNode() throws InterruptedException {
|
||||
public void testConnectsToClustreManagerNode() throws InterruptedException {
|
||||
final CountDownLatch completionLatch = new CountDownLatch(1);
|
||||
final SetOnce<DiscoveryNode> receivedNode = new SetOnce<>();
|
||||
|
||||
|
@ -190,7 +190,7 @@ public class HandshakingTransportAddressConnectorTests extends OpenSearchTestCas
|
|||
}
|
||||
}
|
||||
|
||||
public void testDoesNotConnectToNonMasterNode() throws InterruptedException {
|
||||
public void testDoesNotConnectToNonClusterManagerNode() throws InterruptedException {
|
||||
remoteNode = new DiscoveryNode("remote-node", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT);
|
||||
discoveryAddress = getDiscoveryAddress();
|
||||
remoteClusterName = "local-cluster";
|
||||
|
|
|
@ -147,7 +147,7 @@ public class PeerFinderTests extends OpenSearchTestCase {
|
|||
listener.onResponse(discoveryNode);
|
||||
return;
|
||||
} else {
|
||||
listener.onFailure(new OpenSearchException("non-master node " + discoveryNode));
|
||||
listener.onFailure(new OpenSearchException("non-cluster-manager node " + discoveryNode));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -165,20 +165,20 @@ public class PeerFinderTests extends OpenSearchTestCase {
|
|||
}
|
||||
|
||||
class TestPeerFinder extends PeerFinder {
|
||||
DiscoveryNode discoveredMasterNode;
|
||||
OptionalLong discoveredMasterTerm = OptionalLong.empty();
|
||||
DiscoveryNode discoveredClusterManagerNode;
|
||||
OptionalLong discoveredClusterManagerTerm = OptionalLong.empty();
|
||||
|
||||
TestPeerFinder(Settings settings, TransportService transportService, TransportAddressConnector transportAddressConnector) {
|
||||
super(settings, transportService, transportAddressConnector, PeerFinderTests.this::resolveConfiguredHosts);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onActiveClusterManagerFound(DiscoveryNode masterNode, long term) {
|
||||
protected void onActiveClusterManagerFound(DiscoveryNode clusterManagerNode, long term) {
|
||||
assert holdsLock() == false : "PeerFinder lock held in error";
|
||||
assertThat(discoveredMasterNode, nullValue());
|
||||
assertFalse(discoveredMasterTerm.isPresent());
|
||||
discoveredMasterNode = masterNode;
|
||||
discoveredMasterTerm = OptionalLong.of(term);
|
||||
assertThat(discoveredClusterManagerNode, nullValue());
|
||||
assertFalse(discoveredClusterManagerTerm.isPresent());
|
||||
discoveredClusterManagerNode = clusterManagerNode;
|
||||
discoveredClusterManagerTerm = OptionalLong.of(term);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -335,8 +335,8 @@ public class PeerFinderTests extends OpenSearchTestCase {
|
|||
assertFoundPeers();
|
||||
}
|
||||
|
||||
public void testDoesNotAddNonMasterEligibleNodesFromUnicastHostsList() {
|
||||
final DiscoveryNode nonMasterNode = new DiscoveryNode(
|
||||
public void testDoesNotAddNonClusterManagerEligibleNodesFromUnicastHostsList() {
|
||||
final DiscoveryNode nonClusterManagerNode = new DiscoveryNode(
|
||||
"node-from-hosts-list",
|
||||
buildNewFakeTransportAddress(),
|
||||
emptyMap(),
|
||||
|
@ -344,8 +344,8 @@ public class PeerFinderTests extends OpenSearchTestCase {
|
|||
Version.CURRENT
|
||||
);
|
||||
|
||||
providedAddresses.add(nonMasterNode.getAddress());
|
||||
transportAddressConnector.addReachableNode(nonMasterNode);
|
||||
providedAddresses.add(nonClusterManagerNode.getAddress());
|
||||
transportAddressConnector.addReachableNode(nonClusterManagerNode);
|
||||
|
||||
peerFinder.activate(lastAcceptedNodes);
|
||||
runAllRunnableTasks();
|
||||
|
@ -423,7 +423,7 @@ public class PeerFinderTests extends OpenSearchTestCase {
|
|||
assertFoundPeers(sourceNode, otherKnownNode);
|
||||
}
|
||||
|
||||
public void testDoesNotAddReachableNonMasterEligibleNodesFromIncomingRequests() {
|
||||
public void testDoesNotAddReachableNonClusterManagerEligibleNodesFromIncomingRequests() {
|
||||
final DiscoveryNode sourceNode = new DiscoveryNode(
|
||||
"request-source",
|
||||
buildNewFakeTransportAddress(),
|
||||
|
@ -494,7 +494,7 @@ public class PeerFinderTests extends OpenSearchTestCase {
|
|||
}
|
||||
|
||||
public void testDelegatesRequestHandlingWhenInactive() {
|
||||
final DiscoveryNode masterNode = newDiscoveryNode("master-node");
|
||||
final DiscoveryNode clusterManagerNode = newDiscoveryNode("cluster-manager-node");
|
||||
final DiscoveryNode sourceNode = newDiscoveryNode("request-source");
|
||||
transportAddressConnector.addReachableNode(sourceNode);
|
||||
|
||||
|
@ -502,9 +502,9 @@ public class PeerFinderTests extends OpenSearchTestCase {
|
|||
|
||||
final long term = randomNonNegativeLong();
|
||||
peerFinder.setCurrentTerm(term);
|
||||
peerFinder.deactivate(masterNode);
|
||||
peerFinder.deactivate(clusterManagerNode);
|
||||
|
||||
final PeersResponse expectedResponse = new PeersResponse(Optional.of(masterNode), Collections.emptyList(), term);
|
||||
final PeersResponse expectedResponse = new PeersResponse(Optional.of(clusterManagerNode), Collections.emptyList(), term);
|
||||
final PeersResponse peersResponse = peerFinder.handlePeersRequest(new PeersRequest(sourceNode, Collections.emptyList()));
|
||||
assertThat(peersResponse, equalTo(expectedResponse));
|
||||
}
|
||||
|
@ -590,7 +590,7 @@ public class PeerFinderTests extends OpenSearchTestCase {
|
|||
assertFoundPeers(otherNode, discoveredNode);
|
||||
}
|
||||
|
||||
public void testAddsReachableMasterFromResponse() {
|
||||
public void testAddsReachableClusterManagerFromResponse() {
|
||||
final DiscoveryNode otherNode = newDiscoveryNode("node-from-hosts-list");
|
||||
providedAddresses.add(otherNode.getAddress());
|
||||
transportAddressConnector.addReachableNode(otherNode);
|
||||
|
@ -599,21 +599,21 @@ public class PeerFinderTests extends OpenSearchTestCase {
|
|||
runAllRunnableTasks();
|
||||
|
||||
assertFoundPeers(otherNode);
|
||||
final DiscoveryNode discoveredMaster = newDiscoveryNode("discovered-master");
|
||||
final DiscoveryNode discoveredClusterManager = newDiscoveryNode("discovered-cluster-manager");
|
||||
|
||||
respondToRequests(node -> {
|
||||
assertThat(node, is(otherNode));
|
||||
return new PeersResponse(Optional.of(discoveredMaster), emptyList(), randomNonNegativeLong());
|
||||
return new PeersResponse(Optional.of(discoveredClusterManager), emptyList(), randomNonNegativeLong());
|
||||
});
|
||||
|
||||
transportAddressConnector.addReachableNode(discoveredMaster);
|
||||
transportAddressConnector.addReachableNode(discoveredClusterManager);
|
||||
runAllRunnableTasks();
|
||||
assertFoundPeers(otherNode, discoveredMaster);
|
||||
assertThat(peerFinder.discoveredMasterNode, nullValue());
|
||||
assertFalse(peerFinder.discoveredMasterTerm.isPresent());
|
||||
assertFoundPeers(otherNode, discoveredClusterManager);
|
||||
assertThat(peerFinder.discoveredClusterManagerNode, nullValue());
|
||||
assertFalse(peerFinder.discoveredClusterManagerTerm.isPresent());
|
||||
}
|
||||
|
||||
public void testHandlesDiscoveryOfMasterFromResponseFromMaster() {
|
||||
public void testHandlesDiscoveryOfClusterManagerFromResponseFromClusterManager() {
|
||||
final DiscoveryNode otherNode = newDiscoveryNode("node-from-hosts-list");
|
||||
providedAddresses.add(otherNode.getAddress());
|
||||
transportAddressConnector.addReachableNode(otherNode);
|
||||
|
@ -631,8 +631,8 @@ public class PeerFinderTests extends OpenSearchTestCase {
|
|||
|
||||
runAllRunnableTasks();
|
||||
assertFoundPeers(otherNode);
|
||||
assertThat(peerFinder.discoveredMasterNode, is(otherNode));
|
||||
assertThat(peerFinder.discoveredMasterTerm, is(OptionalLong.of(term)));
|
||||
assertThat(peerFinder.discoveredClusterManagerNode, is(otherNode));
|
||||
assertThat(peerFinder.discoveredClusterManagerTerm, is(OptionalLong.of(term)));
|
||||
}
|
||||
|
||||
public void testOnlyRequestsPeersOncePerRoundButDoesRetryNextRound() {
|
||||
|
|
|
@ -524,8 +524,8 @@ public class NodeEnvironmentTests extends OpenSearchTestCase {
|
|||
Settings settings = buildEnvSettings(Settings.EMPTY);
|
||||
Index index = new Index("test", "testUUID");
|
||||
|
||||
// build settings using same path.data as original but without data and master roles
|
||||
Settings noDataNoMasterSettings = Settings.builder()
|
||||
// build settings using same path.data as original but without data and cluster-manager roles
|
||||
Settings noDataNoClusterManagerSettings = Settings.builder()
|
||||
.put(settings)
|
||||
.put(
|
||||
NodeRoles.removeRoles(
|
||||
|
@ -535,8 +535,8 @@ public class NodeEnvironmentTests extends OpenSearchTestCase {
|
|||
)
|
||||
.build();
|
||||
|
||||
// test that we can create data=false and master=false with no meta information
|
||||
newNodeEnvironment(noDataNoMasterSettings).close();
|
||||
// test that we can create data=false and cluster_manager=false with no meta information
|
||||
newNodeEnvironment(noDataNoClusterManagerSettings).close();
|
||||
|
||||
Path indexPath;
|
||||
try (NodeEnvironment env = newNodeEnvironment(settings)) {
|
||||
|
@ -546,7 +546,7 @@ public class NodeEnvironmentTests extends OpenSearchTestCase {
|
|||
indexPath = env.indexPaths(index)[0];
|
||||
}
|
||||
|
||||
verifyFailsOnMetadata(noDataNoMasterSettings, indexPath);
|
||||
verifyFailsOnMetadata(noDataNoClusterManagerSettings, indexPath);
|
||||
|
||||
// build settings using same path.data as original but without data role
|
||||
Settings noDataSettings = nonDataNode(settings);
|
||||
|
@ -563,15 +563,15 @@ public class NodeEnvironmentTests extends OpenSearchTestCase {
|
|||
verifyFailsOnShardData(noDataSettings, indexPath, shardDataDirName);
|
||||
|
||||
// assert that we get the stricter message on meta-data when both conditions fail
|
||||
verifyFailsOnMetadata(noDataNoMasterSettings, indexPath);
|
||||
verifyFailsOnMetadata(noDataNoClusterManagerSettings, indexPath);
|
||||
|
||||
// build settings using same path.data as original but without master role
|
||||
Settings noMasterSettings = nonMasterNode(settings);
|
||||
// build settings using same path.data as original but without cluster-manager role
|
||||
Settings noClusterManagerSettings = nonMasterNode(settings);
|
||||
|
||||
// test that we can create master=false env regardless of data.
|
||||
newNodeEnvironment(noMasterSettings).close();
|
||||
// test that we can create cluster_manager=false env regardless of data.
|
||||
newNodeEnvironment(noClusterManagerSettings).close();
|
||||
|
||||
// test that we can create data=true, master=true env. Also remove state dir to leave only shard data for following asserts
|
||||
// test that we can create data=true, cluster_manager=true env. Also remove state dir to leave only shard data for following asserts
|
||||
try (NodeEnvironment env = newNodeEnvironment(settings)) {
|
||||
for (Path path : env.indexPaths(index)) {
|
||||
Files.delete(path.resolve(MetadataStateFormat.STATE_DIR_NAME));
|
||||
|
@ -580,7 +580,7 @@ public class NodeEnvironmentTests extends OpenSearchTestCase {
|
|||
|
||||
// assert that we fail on shard data even without the metadata dir.
|
||||
verifyFailsOnShardData(noDataSettings, indexPath, shardDataDirName);
|
||||
verifyFailsOnShardData(noDataNoMasterSettings, indexPath, shardDataDirName);
|
||||
verifyFailsOnShardData(noDataNoClusterManagerSettings, indexPath, shardDataDirName);
|
||||
}
|
||||
|
||||
private void verifyFailsOnShardData(Settings settings, Path indexPath, String shardDataDirName) {
|
||||
|
@ -597,7 +597,7 @@ public class NodeEnvironmentTests extends OpenSearchTestCase {
|
|||
private void verifyFailsOnMetadata(Settings settings, Path indexPath) {
|
||||
IllegalStateException ex = expectThrows(
|
||||
IllegalStateException.class,
|
||||
"Must fail creating NodeEnvironment on a data path that has index metadata if node does not have data and master roles",
|
||||
"Must fail creating NodeEnvironment on a data path that has index metadata if node does not have data and cluster-manager roles",
|
||||
() -> newNodeEnvironment(settings).close()
|
||||
);
|
||||
|
||||
|
|
|
@ -75,18 +75,18 @@ import static org.hamcrest.Matchers.not;
|
|||
public class NodeRepurposeCommandTests extends OpenSearchTestCase {
|
||||
|
||||
private static final Index INDEX = new Index("testIndex", "testUUID");
|
||||
private Settings dataMasterSettings;
|
||||
private Settings dataClusterManagerSettings;
|
||||
private Environment environment;
|
||||
private Path[] nodePaths;
|
||||
private Settings dataNoMasterSettings;
|
||||
private Settings noDataNoMasterSettings;
|
||||
private Settings noDataMasterSettings;
|
||||
private Settings dataNoClusterManagerSettings;
|
||||
private Settings noDataNoClusterManagerSettings;
|
||||
private Settings noDataClusterManagerSettings;
|
||||
|
||||
@Before
|
||||
public void createNodePaths() throws IOException {
|
||||
dataMasterSettings = buildEnvSettings(Settings.EMPTY);
|
||||
environment = TestEnvironment.newEnvironment(dataMasterSettings);
|
||||
try (NodeEnvironment nodeEnvironment = new NodeEnvironment(dataMasterSettings, environment)) {
|
||||
dataClusterManagerSettings = buildEnvSettings(Settings.EMPTY);
|
||||
environment = TestEnvironment.newEnvironment(dataClusterManagerSettings);
|
||||
try (NodeEnvironment nodeEnvironment = new NodeEnvironment(dataClusterManagerSettings, environment)) {
|
||||
nodePaths = nodeEnvironment.nodeDataPaths();
|
||||
final String nodeId = randomAlphaOfLength(10);
|
||||
try (
|
||||
|
@ -95,36 +95,36 @@ public class NodeRepurposeCommandTests extends OpenSearchTestCase {
|
|||
nodeId,
|
||||
xContentRegistry(),
|
||||
BigArrays.NON_RECYCLING_INSTANCE,
|
||||
new ClusterSettings(dataMasterSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS),
|
||||
new ClusterSettings(dataClusterManagerSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS),
|
||||
() -> 0L
|
||||
).createWriter()
|
||||
) {
|
||||
writer.writeFullStateAndCommit(1L, ClusterState.EMPTY_STATE);
|
||||
}
|
||||
}
|
||||
dataNoMasterSettings = nonMasterNode(dataMasterSettings);
|
||||
noDataNoMasterSettings = removeRoles(
|
||||
dataMasterSettings,
|
||||
dataNoClusterManagerSettings = nonMasterNode(dataClusterManagerSettings);
|
||||
noDataNoClusterManagerSettings = removeRoles(
|
||||
dataClusterManagerSettings,
|
||||
Collections.unmodifiableSet(new HashSet<>(Arrays.asList(DiscoveryNodeRole.DATA_ROLE, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)))
|
||||
);
|
||||
|
||||
noDataMasterSettings = masterNode(nonDataNode(dataMasterSettings));
|
||||
noDataClusterManagerSettings = masterNode(nonDataNode(dataClusterManagerSettings));
|
||||
}
|
||||
|
||||
public void testEarlyExitNoCleanup() throws Exception {
|
||||
createIndexDataFiles(dataMasterSettings, randomInt(10), randomBoolean());
|
||||
createIndexDataFiles(dataClusterManagerSettings, randomInt(10), randomBoolean());
|
||||
|
||||
verifyNoQuestions(dataMasterSettings, containsString(NO_CLEANUP));
|
||||
verifyNoQuestions(dataNoMasterSettings, containsString(NO_CLEANUP));
|
||||
verifyNoQuestions(dataClusterManagerSettings, containsString(NO_CLEANUP));
|
||||
verifyNoQuestions(dataNoClusterManagerSettings, containsString(NO_CLEANUP));
|
||||
}
|
||||
|
||||
public void testNothingToCleanup() throws Exception {
|
||||
verifyNoQuestions(noDataNoMasterSettings, containsString(NO_DATA_TO_CLEAN_UP_FOUND));
|
||||
verifyNoQuestions(noDataMasterSettings, containsString(NO_SHARD_DATA_TO_CLEAN_UP_FOUND));
|
||||
verifyNoQuestions(noDataNoClusterManagerSettings, containsString(NO_DATA_TO_CLEAN_UP_FOUND));
|
||||
verifyNoQuestions(noDataClusterManagerSettings, containsString(NO_SHARD_DATA_TO_CLEAN_UP_FOUND));
|
||||
|
||||
Environment environment = TestEnvironment.newEnvironment(noDataMasterSettings);
|
||||
Environment environment = TestEnvironment.newEnvironment(noDataClusterManagerSettings);
|
||||
if (randomBoolean()) {
|
||||
try (NodeEnvironment env = new NodeEnvironment(noDataMasterSettings, environment)) {
|
||||
try (NodeEnvironment env = new NodeEnvironment(noDataClusterManagerSettings, environment)) {
|
||||
try (
|
||||
PersistedClusterStateService.Writer writer = OpenSearchNodeCommand.createPersistedClusterStateService(
|
||||
Settings.EMPTY,
|
||||
|
@ -136,19 +136,24 @@ public class NodeRepurposeCommandTests extends OpenSearchTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
verifyNoQuestions(noDataNoMasterSettings, containsString(NO_DATA_TO_CLEAN_UP_FOUND));
|
||||
verifyNoQuestions(noDataMasterSettings, containsString(NO_SHARD_DATA_TO_CLEAN_UP_FOUND));
|
||||
verifyNoQuestions(noDataNoClusterManagerSettings, containsString(NO_DATA_TO_CLEAN_UP_FOUND));
|
||||
verifyNoQuestions(noDataClusterManagerSettings, containsString(NO_SHARD_DATA_TO_CLEAN_UP_FOUND));
|
||||
|
||||
createIndexDataFiles(dataMasterSettings, 0, randomBoolean());
|
||||
createIndexDataFiles(dataClusterManagerSettings, 0, randomBoolean());
|
||||
|
||||
verifyNoQuestions(noDataMasterSettings, containsString(NO_SHARD_DATA_TO_CLEAN_UP_FOUND));
|
||||
verifyNoQuestions(noDataClusterManagerSettings, containsString(NO_SHARD_DATA_TO_CLEAN_UP_FOUND));
|
||||
|
||||
}
|
||||
|
||||
public void testLocked() throws IOException {
|
||||
try (NodeEnvironment env = new NodeEnvironment(dataMasterSettings, TestEnvironment.newEnvironment(dataMasterSettings))) {
|
||||
try (
|
||||
NodeEnvironment env = new NodeEnvironment(
|
||||
dataClusterManagerSettings,
|
||||
TestEnvironment.newEnvironment(dataClusterManagerSettings)
|
||||
)
|
||||
) {
|
||||
assertThat(
|
||||
expectThrows(OpenSearchException.class, () -> verifyNoQuestions(noDataNoMasterSettings, null)).getMessage(),
|
||||
expectThrows(OpenSearchException.class, () -> verifyNoQuestions(noDataNoClusterManagerSettings, null)).getMessage(),
|
||||
containsString(NodeRepurposeCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG)
|
||||
);
|
||||
}
|
||||
|
@ -158,7 +163,7 @@ public class NodeRepurposeCommandTests extends OpenSearchTestCase {
|
|||
int shardCount = randomIntBetween(1, 10);
|
||||
boolean verbose = randomBoolean();
|
||||
boolean hasClusterState = randomBoolean();
|
||||
createIndexDataFiles(dataMasterSettings, shardCount, hasClusterState);
|
||||
createIndexDataFiles(dataClusterManagerSettings, shardCount, hasClusterState);
|
||||
|
||||
String messageText = NodeRepurposeCommand.noClusterManagerMessage(1, environment.dataFiles().length * shardCount, 0);
|
||||
|
||||
|
@ -168,22 +173,22 @@ public class NodeRepurposeCommandTests extends OpenSearchTestCase {
|
|||
conditionalNot(containsString("no name for uuid: testUUID"), verbose == false || hasClusterState)
|
||||
);
|
||||
|
||||
verifyUnchangedOnAbort(noDataNoMasterSettings, outputMatcher, verbose);
|
||||
verifyUnchangedOnAbort(noDataNoClusterManagerSettings, outputMatcher, verbose);
|
||||
|
||||
// verify test setup
|
||||
expectThrows(IllegalStateException.class, () -> new NodeEnvironment(noDataNoMasterSettings, environment).close());
|
||||
expectThrows(IllegalStateException.class, () -> new NodeEnvironment(noDataNoClusterManagerSettings, environment).close());
|
||||
|
||||
verifySuccess(noDataNoMasterSettings, outputMatcher, verbose);
|
||||
verifySuccess(noDataNoClusterManagerSettings, outputMatcher, verbose);
|
||||
|
||||
// verify cleaned.
|
||||
new NodeEnvironment(noDataNoMasterSettings, environment).close();
|
||||
new NodeEnvironment(noDataNoClusterManagerSettings, environment).close();
|
||||
}
|
||||
|
||||
public void testCleanupShardData() throws Exception {
|
||||
int shardCount = randomIntBetween(1, 10);
|
||||
boolean verbose = randomBoolean();
|
||||
boolean hasClusterState = randomBoolean();
|
||||
createIndexDataFiles(dataMasterSettings, shardCount, hasClusterState);
|
||||
createIndexDataFiles(dataClusterManagerSettings, shardCount, hasClusterState);
|
||||
|
||||
Matcher<String> matcher = allOf(
|
||||
containsString(NodeRepurposeCommand.shardMessage(environment.dataFiles().length * shardCount, 1)),
|
||||
|
@ -192,15 +197,15 @@ public class NodeRepurposeCommandTests extends OpenSearchTestCase {
|
|||
conditionalNot(containsString("no name for uuid: testUUID"), verbose == false || hasClusterState)
|
||||
);
|
||||
|
||||
verifyUnchangedOnAbort(noDataMasterSettings, matcher, verbose);
|
||||
verifyUnchangedOnAbort(noDataClusterManagerSettings, matcher, verbose);
|
||||
|
||||
// verify test setup
|
||||
expectThrows(IllegalStateException.class, () -> new NodeEnvironment(noDataMasterSettings, environment).close());
|
||||
expectThrows(IllegalStateException.class, () -> new NodeEnvironment(noDataClusterManagerSettings, environment).close());
|
||||
|
||||
verifySuccess(noDataMasterSettings, matcher, verbose);
|
||||
verifySuccess(noDataClusterManagerSettings, matcher, verbose);
|
||||
|
||||
// verify clean.
|
||||
new NodeEnvironment(noDataMasterSettings, environment).close();
|
||||
new NodeEnvironment(noDataClusterManagerSettings, environment).close();
|
||||
}
|
||||
|
||||
static void verifySuccess(Settings settings, Matcher<String> outputMatcher, boolean verbose) throws Exception {
|
||||
|
|
|
@ -433,7 +433,7 @@ public class AsyncShardFetchTests extends OpenSearchTestCase {
|
|||
try {
|
||||
entry = simulations.get(nodeId);
|
||||
if (entry == null) {
|
||||
// we are simulating a master node switch, wait for it to not be null
|
||||
// we are simulating a cluster-manager node switch, wait for it to not be null
|
||||
assertBusy(() -> assertTrue(simulations.containsKey(nodeId)));
|
||||
}
|
||||
assert entry != null;
|
||||
|
|
|
@ -129,13 +129,13 @@ public class GatewayServiceTests extends OpenSearchTestCase {
|
|||
GatewayService service = createService(Settings.builder());
|
||||
ClusterStateUpdateTask clusterStateUpdateTask = service.new RecoverStateUpdateTask();
|
||||
String nodeId = randomAlphaOfLength(10);
|
||||
DiscoveryNode masterNode = DiscoveryNode.createLocal(
|
||||
DiscoveryNode clusterManagerNode = DiscoveryNode.createLocal(
|
||||
settings(Version.CURRENT).put(masterNode()).build(),
|
||||
new TransportAddress(TransportAddress.META_ADDRESS, 9300),
|
||||
nodeId
|
||||
);
|
||||
ClusterState stateWithBlock = ClusterState.builder(ClusterName.DEFAULT)
|
||||
.nodes(DiscoveryNodes.builder().localNodeId(nodeId).masterNodeId(nodeId).add(masterNode).build())
|
||||
.nodes(DiscoveryNodes.builder().localNodeId(nodeId).masterNodeId(nodeId).add(clusterManagerNode).build())
|
||||
.blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK).build())
|
||||
.build();
|
||||
|
||||
|
|
|
@ -86,7 +86,7 @@ import static org.mockito.Mockito.when;
|
|||
|
||||
public class IncrementalClusterStateWriterTests extends OpenSearchAllocationTestCase {
|
||||
|
||||
private ClusterState clusterStateWithUnassignedIndex(IndexMetadata indexMetadata, boolean masterEligible) {
|
||||
private ClusterState clusterStateWithUnassignedIndex(IndexMetadata indexMetadata, boolean clusterManagerEligible) {
|
||||
Metadata metadata = Metadata.builder().put(indexMetadata, false).build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder().addAsNew(metadata.index("test")).build();
|
||||
|
@ -94,11 +94,11 @@ public class IncrementalClusterStateWriterTests extends OpenSearchAllocationTest
|
|||
return ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
|
||||
.metadata(metadata)
|
||||
.routingTable(routingTable)
|
||||
.nodes(generateDiscoveryNodes(masterEligible))
|
||||
.nodes(generateDiscoveryNodes(clusterManagerEligible))
|
||||
.build();
|
||||
}
|
||||
|
||||
private ClusterState clusterStateWithAssignedIndex(IndexMetadata indexMetadata, boolean masterEligible) {
|
||||
private ClusterState clusterStateWithAssignedIndex(IndexMetadata indexMetadata, boolean clusterManagerEligible) {
|
||||
AllocationService strategy = createAllocationService(
|
||||
Settings.builder()
|
||||
.put("cluster.routing.allocation.node_concurrent_recoveries", 100)
|
||||
|
@ -108,7 +108,7 @@ public class IncrementalClusterStateWriterTests extends OpenSearchAllocationTest
|
|||
.build()
|
||||
);
|
||||
|
||||
ClusterState oldClusterState = clusterStateWithUnassignedIndex(indexMetadata, masterEligible);
|
||||
ClusterState oldClusterState = clusterStateWithUnassignedIndex(indexMetadata, clusterManagerEligible);
|
||||
RoutingTable routingTable = strategy.reroute(oldClusterState, "reroute").routingTable();
|
||||
|
||||
Metadata metadataNewClusterState = Metadata.builder().put(oldClusterState.metadata().index("test"), false).build();
|
||||
|
@ -120,8 +120,8 @@ public class IncrementalClusterStateWriterTests extends OpenSearchAllocationTest
|
|||
.build();
|
||||
}
|
||||
|
||||
private ClusterState clusterStateWithNonReplicatedClosedIndex(IndexMetadata indexMetadata, boolean masterEligible) {
|
||||
ClusterState oldClusterState = clusterStateWithAssignedIndex(indexMetadata, masterEligible);
|
||||
private ClusterState clusterStateWithNonReplicatedClosedIndex(IndexMetadata indexMetadata, boolean clusterManagerEligible) {
|
||||
ClusterState oldClusterState = clusterStateWithAssignedIndex(indexMetadata, clusterManagerEligible);
|
||||
|
||||
Metadata metadataNewClusterState = Metadata.builder()
|
||||
.put(
|
||||
|
@ -142,8 +142,12 @@ public class IncrementalClusterStateWriterTests extends OpenSearchAllocationTest
|
|||
.build();
|
||||
}
|
||||
|
||||
private ClusterState clusterStateWithReplicatedClosedIndex(IndexMetadata indexMetadata, boolean masterEligible, boolean assigned) {
|
||||
ClusterState oldClusterState = clusterStateWithAssignedIndex(indexMetadata, masterEligible);
|
||||
private ClusterState clusterStateWithReplicatedClosedIndex(
|
||||
IndexMetadata indexMetadata,
|
||||
boolean clusterManagerEligible,
|
||||
boolean assigned
|
||||
) {
|
||||
ClusterState oldClusterState = clusterStateWithAssignedIndex(indexMetadata, clusterManagerEligible);
|
||||
|
||||
Metadata metadataNewClusterState = Metadata.builder()
|
||||
.put(
|
||||
|
@ -178,20 +182,20 @@ public class IncrementalClusterStateWriterTests extends OpenSearchAllocationTest
|
|||
.build();
|
||||
}
|
||||
|
||||
private DiscoveryNodes.Builder generateDiscoveryNodes(boolean masterEligible) {
|
||||
private DiscoveryNodes.Builder generateDiscoveryNodes(boolean clusterManagerEligible) {
|
||||
Set<DiscoveryNodeRole> dataOnlyRoles = Collections.singleton(DiscoveryNodeRole.DATA_ROLE);
|
||||
return DiscoveryNodes.builder()
|
||||
.add(newNode("node1", masterEligible ? CLUSTER_MANAGER_DATA_ROLES : dataOnlyRoles))
|
||||
.add(newNode("master_node", CLUSTER_MANAGER_DATA_ROLES))
|
||||
.add(newNode("node1", clusterManagerEligible ? CLUSTER_MANAGER_DATA_ROLES : dataOnlyRoles))
|
||||
.add(newNode("cluster_manager_node", CLUSTER_MANAGER_DATA_ROLES))
|
||||
.localNodeId("node1")
|
||||
.masterNodeId(masterEligible ? "node1" : "master_node");
|
||||
.masterNodeId(clusterManagerEligible ? "node1" : "cluster_manager_node");
|
||||
}
|
||||
|
||||
private IndexMetadata createIndexMetadata(String name) {
|
||||
return IndexMetadata.builder(name).settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(2).build();
|
||||
}
|
||||
|
||||
public void testGetRelevantIndicesWithUnassignedShardsOnMasterEligibleNode() {
|
||||
public void testGetRelevantIndicesWithUnassignedShardsOnClusterManagerEligibleNode() {
|
||||
IndexMetadata indexMetadata = createIndexMetadata("test");
|
||||
Set<Index> indices = IncrementalClusterStateWriter.getRelevantIndices(clusterStateWithUnassignedIndex(indexMetadata, true));
|
||||
assertThat(indices.size(), equalTo(0));
|
||||
|
@ -205,8 +209,10 @@ public class IncrementalClusterStateWriterTests extends OpenSearchAllocationTest
|
|||
|
||||
public void testGetRelevantIndicesWithAssignedShards() {
|
||||
IndexMetadata indexMetadata = createIndexMetadata("test");
|
||||
boolean masterEligible = randomBoolean();
|
||||
Set<Index> indices = IncrementalClusterStateWriter.getRelevantIndices(clusterStateWithAssignedIndex(indexMetadata, masterEligible));
|
||||
boolean clusterManagerEligible = randomBoolean();
|
||||
Set<Index> indices = IncrementalClusterStateWriter.getRelevantIndices(
|
||||
clusterStateWithAssignedIndex(indexMetadata, clusterManagerEligible)
|
||||
);
|
||||
assertThat(indices.size(), equalTo(1));
|
||||
}
|
||||
|
||||
|
|
|
@ -160,7 +160,7 @@ public class ReplicationTrackerTests extends ReplicationTrackerTestCase {
|
|||
// now insert an unknown active/insync id , the checkpoint shouldn't change but a refresh should be requested.
|
||||
final AllocationId extraId = AllocationId.newInitializing();
|
||||
|
||||
// first check that adding it without the master blessing doesn't change anything.
|
||||
// first check that adding it without the cluster-manager blessing doesn't change anything.
|
||||
updateLocalCheckpoint(tracker, extraId.getId(), minLocalCheckpointAfterUpdates + 1 + randomInt(4));
|
||||
assertNull(tracker.checkpoints.get(extraId.getId()));
|
||||
expectThrows(IllegalStateException.class, () -> tracker.initiateTracking(extraId.getId()));
|
||||
|
@ -292,7 +292,7 @@ public class ReplicationTrackerTests extends ReplicationTrackerTestCase {
|
|||
assertThat(updatedGlobalCheckpoint.get(), not(equalTo(UNASSIGNED_SEQ_NO)));
|
||||
}
|
||||
|
||||
public void testInSyncIdsAreIgnoredIfNotValidatedByMaster() {
|
||||
public void testInSyncIdsAreIgnoredIfNotValidatedByClusterManager() {
|
||||
final Map<AllocationId, Long> active = randomAllocationsWithLocalCheckpoints(1, 5);
|
||||
final Map<AllocationId, Long> initializing = randomAllocationsWithLocalCheckpoints(1, 5);
|
||||
final Map<AllocationId, Long> nonApproved = randomAllocationsWithLocalCheckpoints(1, 5);
|
||||
|
@ -313,7 +313,7 @@ public class ReplicationTrackerTests extends ReplicationTrackerTestCase {
|
|||
assertThat(tracker.getGlobalCheckpoint(), not(equalTo(UNASSIGNED_SEQ_NO)));
|
||||
}
|
||||
|
||||
public void testInSyncIdsAreRemovedIfNotValidatedByMaster() {
|
||||
public void testInSyncIdsAreRemovedIfNotValidatedByClusterManager() {
|
||||
final long initialClusterStateVersion = randomNonNegativeLong();
|
||||
final Map<AllocationId, Long> activeToStay = randomAllocationsWithLocalCheckpoints(1, 5);
|
||||
final Map<AllocationId, Long> initializingToStay = randomAllocationsWithLocalCheckpoints(1, 5);
|
||||
|
@ -421,7 +421,7 @@ public class ReplicationTrackerTests extends ReplicationTrackerTestCase {
|
|||
assertTrue(complete.get());
|
||||
assertTrue(tracker.getTrackedLocalCheckpointForShard(trackingAllocationId.getId()).inSync);
|
||||
} else {
|
||||
// master changes its mind and cancels the allocation
|
||||
// cluster-manager changes its mind and cancels the allocation
|
||||
tracker.updateFromMaster(
|
||||
clusterStateVersion + 1,
|
||||
Collections.singleton(inSyncAllocationId.getId()),
|
||||
|
@ -492,7 +492,7 @@ public class ReplicationTrackerTests extends ReplicationTrackerTestCase {
|
|||
thread.join();
|
||||
}
|
||||
|
||||
public void testUpdateAllocationIdsFromMaster() throws Exception {
|
||||
public void testUpdateAllocationIdsFromClusterManager() throws Exception {
|
||||
final long initialClusterStateVersion = randomNonNegativeLong();
|
||||
final int numberOfActiveAllocationsIds = randomIntBetween(2, 16);
|
||||
final int numberOfInitializingIds = randomIntBetween(2, 16);
|
||||
|
@ -645,7 +645,7 @@ public class ReplicationTrackerTests extends ReplicationTrackerTestCase {
|
|||
assertTrue(tracker.getTrackedLocalCheckpointForShard(newSyncingAllocationId.getId()).inSync);
|
||||
|
||||
/*
|
||||
* The new in-sync allocation ID is in the in-sync set now yet the master does not know this; the allocation ID should still be in
|
||||
* The new in-sync allocation ID is in the in-sync set now yet the cluster-manager does not know this; the allocation ID should still be in
|
||||
* the in-sync set even if we receive a cluster state update that does not reflect this.
|
||||
*
|
||||
*/
|
||||
|
|
|
@ -438,7 +438,7 @@ public class IndicesServiceTests extends OpenSearchSingleNodeTestCase {
|
|||
final ClusterService clusterService = getInstanceFromNode(ClusterService.class);
|
||||
final ClusterState originalState = clusterService.state();
|
||||
|
||||
// import an index with minor version incremented by one over cluster master version, it should be ignored
|
||||
// import an index with minor version incremented by one over cluster cluster-manager version, it should be ignored
|
||||
final LocalAllocateDangledIndices dangling = getInstanceFromNode(LocalAllocateDangledIndices.class);
|
||||
final Settings idxSettingsLater = Settings.builder()
|
||||
.put(IndexMetadata.SETTING_VERSION_CREATED, Version.fromId(Version.CURRENT.id + 10000))
|
||||
|
|
|
@ -470,7 +470,12 @@ public class ClusterStateChanges {
|
|||
) {
|
||||
return executeClusterStateUpdateTask(clusterState, () -> {
|
||||
try {
|
||||
TransportMasterNodeActionUtils.runMasterOperation(masterNodeAction, request, clusterState, new PlainActionFuture<>());
|
||||
TransportMasterNodeActionUtils.runClusterManagerOperation(
|
||||
masterNodeAction,
|
||||
request,
|
||||
clusterState,
|
||||
new PlainActionFuture<>()
|
||||
);
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
|
|
@ -134,7 +134,7 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice
|
|||
}
|
||||
}
|
||||
|
||||
// apply cluster state to nodes (incl. master)
|
||||
// apply cluster state to nodes (incl. cluster-manager)
|
||||
for (DiscoveryNode node : state.nodes()) {
|
||||
IndicesClusterStateService indicesClusterStateService = clusterStateServiceMap.get(node);
|
||||
ClusterState localState = adaptClusterStateToLocalNode(state, node);
|
||||
|
@ -328,7 +328,7 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice
|
|||
Supplier<MockIndicesService> indicesServiceSupplier
|
||||
) {
|
||||
List<DiscoveryNode> allNodes = new ArrayList<>();
|
||||
DiscoveryNode localNode = createNode(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); // local node is the master
|
||||
DiscoveryNode localNode = createNode(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); // local node is the cluster-manager
|
||||
allNodes.add(localNode);
|
||||
// at least two nodes that have the data role so that we can allocate shards
|
||||
allNodes.add(createNode(DiscoveryNodeRole.DATA_ROLE));
|
||||
|
@ -368,20 +368,20 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice
|
|||
Map<DiscoveryNode, IndicesClusterStateService> clusterStateServiceMap,
|
||||
Supplier<MockIndicesService> indicesServiceSupplier
|
||||
) {
|
||||
// randomly remove no_master blocks
|
||||
// randomly remove no_cluster_manager blocks
|
||||
if (randomBoolean() && state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)) {
|
||||
state = ClusterState.builder(state)
|
||||
.blocks(ClusterBlocks.builder().blocks(state.blocks()).removeGlobalBlock(NoMasterBlockService.NO_MASTER_BLOCK_ID))
|
||||
.build();
|
||||
}
|
||||
|
||||
// randomly add no_master blocks
|
||||
// randomly add no_cluster_manager blocks
|
||||
if (rarely() && state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID) == false) {
|
||||
ClusterBlock block = randomBoolean() ? NoMasterBlockService.NO_MASTER_BLOCK_ALL : NoMasterBlockService.NO_MASTER_BLOCK_WRITES;
|
||||
state = ClusterState.builder(state).blocks(ClusterBlocks.builder().blocks(state.blocks()).addGlobalBlock(block)).build();
|
||||
}
|
||||
|
||||
// if no_master block is in place, make no other cluster state changes
|
||||
// if no_cluster_manager block is in place, make no other cluster state changes
|
||||
if (state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)) {
|
||||
return state;
|
||||
}
|
||||
|
@ -481,7 +481,7 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice
|
|||
state = cluster.applyFailedShards(state, failedShards);
|
||||
state = cluster.applyStartedShards(state, startedShards);
|
||||
|
||||
// randomly add and remove nodes (except current master)
|
||||
// randomly add and remove nodes (except current cluster-manager)
|
||||
if (rarely()) {
|
||||
if (randomBoolean()) {
|
||||
// add node
|
||||
|
@ -506,7 +506,7 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice
|
|||
}
|
||||
}
|
||||
|
||||
// TODO: go masterless?
|
||||
// TODO: go cluster-managerless?
|
||||
|
||||
return state;
|
||||
}
|
||||
|
|
|
@ -494,7 +494,7 @@ public class PersistentTasksClusterServiceTests extends OpenSearchTestCase {
|
|||
});
|
||||
}
|
||||
|
||||
public void testPeriodicRecheckOffMaster() {
|
||||
public void testPeriodicRecheckOffClusterManager() {
|
||||
ClusterState initialState = initialState();
|
||||
ClusterState.Builder builder = ClusterState.builder(initialState);
|
||||
PersistentTasksCustomMetadata.Builder tasks = PersistentTasksCustomMetadata.builder(
|
||||
|
@ -528,20 +528,20 @@ public class PersistentTasksClusterServiceTests extends OpenSearchTestCase {
|
|||
assertThat(tasksInProgress.tasks().size(), equalTo(1));
|
||||
}
|
||||
|
||||
// The rechecker should recheck indefinitely on the master node as the
|
||||
// The rechecker should recheck indefinitely on the cluster-manager node as the
|
||||
// task can never be assigned while nonClusterStateCondition = false
|
||||
assertTrue(service.getPeriodicRechecker().isScheduled());
|
||||
|
||||
// Now simulate the node ceasing to be the master
|
||||
// Now simulate the node ceasing to be the cluster-manager
|
||||
builder = ClusterState.builder(clusterState);
|
||||
nodes = DiscoveryNodes.builder(clusterState.nodes());
|
||||
nodes.add(DiscoveryNode.createLocal(Settings.EMPTY, buildNewFakeTransportAddress(), "a_new_master_node"));
|
||||
nodes.masterNodeId("a_new_master_node");
|
||||
nodes.add(DiscoveryNode.createLocal(Settings.EMPTY, buildNewFakeTransportAddress(), "a_new_cluster_manager_node"));
|
||||
nodes.masterNodeId("a_new_cluster_manager_node");
|
||||
ClusterState nonMasterClusterState = builder.nodes(nodes).build();
|
||||
event = new ClusterChangedEvent("test", nonMasterClusterState, clusterState);
|
||||
service.clusterChanged(event);
|
||||
|
||||
// The service should have cancelled the rechecker on learning it is no longer running on the master node
|
||||
// The service should have cancelled the rechecker on learning it is no longer running on the cluster-manager node
|
||||
assertFalse(service.getPeriodicRechecker().isScheduled());
|
||||
}
|
||||
|
||||
|
@ -796,7 +796,7 @@ public class PersistentTasksClusterServiceTests extends OpenSearchTestCase {
|
|||
}
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
// remove a node that doesn't have any tasks assigned to it and it's not the master node
|
||||
// remove a node that doesn't have any tasks assigned to it and it's not the cluster-manager node
|
||||
for (DiscoveryNode node : clusterState.nodes()) {
|
||||
if (hasTasksAssignedTo(tasks, node.getId()) == false && "this_node".equals(node.getId()) == false) {
|
||||
logger.info("removed unassigned node {}", node.getId());
|
||||
|
|
|
@ -54,11 +54,11 @@ public class RestNodesInfoActionTests extends OpenSearchTestCase {
|
|||
|
||||
public void testDuplicatedFiltersAreNotRemoved() {
|
||||
Map<String, String> params = new HashMap<>();
|
||||
params.put("nodeId", "_all,master:false,_all");
|
||||
params.put("nodeId", "_all,cluster_manager:false,_all");
|
||||
|
||||
RestRequest restRequest = buildRestRequest(params);
|
||||
NodesInfoRequest actual = RestNodesInfoAction.prepareRequest(restRequest);
|
||||
assertArrayEquals(new String[] { "_all", "master:false", "_all" }, actual.nodesIds());
|
||||
assertArrayEquals(new String[] { "_all", "cluster_manager:false", "_all" }, actual.nodesIds());
|
||||
}
|
||||
|
||||
public void testOnlyMetrics() {
|
||||
|
|
|
@ -290,7 +290,7 @@ public class InternalSnapshotsInfoServiceTests extends OpenSearchTestCase {
|
|||
assertThat("Expecting all snapshot shard size fetches to execute a Reroute", reroutes.get(), equalTo(maxShardsToCreate));
|
||||
}
|
||||
|
||||
public void testNoLongerMaster() throws Exception {
|
||||
public void testNoLongerClusterManager() throws Exception {
|
||||
final InternalSnapshotsInfoService snapshotsInfoService = new InternalSnapshotsInfoService(
|
||||
Settings.EMPTY,
|
||||
clusterService,
|
||||
|
@ -310,18 +310,18 @@ public class InternalSnapshotsInfoServiceTests extends OpenSearchTestCase {
|
|||
final String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
|
||||
final int nbShards = randomIntBetween(1, 5);
|
||||
applyClusterState(
|
||||
"restore-indices-when-master-" + indexName,
|
||||
"restore-indices-when-cluster-manager-" + indexName,
|
||||
clusterState -> addUnassignedShards(clusterState, indexName, nbShards)
|
||||
);
|
||||
}
|
||||
|
||||
applyClusterState("demote-current-master", this::demoteMasterNode);
|
||||
applyClusterState("demote-current-cluster-manager", this::demoteClusterManagerNode);
|
||||
|
||||
for (int i = 0; i < randomIntBetween(1, 10); i++) {
|
||||
final String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
|
||||
final int nbShards = randomIntBetween(1, 5);
|
||||
applyClusterState(
|
||||
"restore-indices-when-no-longer-master-" + indexName,
|
||||
"restore-indices-when-no-longer-cluster-manager-" + indexName,
|
||||
clusterState -> addUnassignedShards(clusterState, indexName, nbShards)
|
||||
);
|
||||
}
|
||||
|
@ -484,7 +484,7 @@ public class InternalSnapshotsInfoServiceTests extends OpenSearchTestCase {
|
|||
.build();
|
||||
}
|
||||
|
||||
private ClusterState demoteMasterNode(final ClusterState currentState) {
|
||||
private ClusterState demoteClusterManagerNode(final ClusterState currentState) {
|
||||
final DiscoveryNode node = new DiscoveryNode(
|
||||
"other",
|
||||
OpenSearchTestCase.buildNewFakeTransportAddress(),
|
||||
|
|
|
@ -306,7 +306,7 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
blobStoreContext.forceConsistent();
|
||||
}
|
||||
BlobStoreTestUtil.assertConsistency(
|
||||
(BlobStoreRepository) testClusterNodes.randomMasterNodeSafe().repositoriesService.repository("repo"),
|
||||
(BlobStoreRepository) testClusterNodes.randomClusterManagerNodeSafe().repositoriesService.repository("repo"),
|
||||
Runnable::run
|
||||
);
|
||||
} finally {
|
||||
|
@ -323,7 +323,7 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
final int shards = randomIntBetween(1, 10);
|
||||
final int documents = randomIntBetween(0, 100);
|
||||
|
||||
final TestClusterNodes.TestClusterNode masterNode = testClusterNodes.currentMaster(
|
||||
final TestClusterNodes.TestClusterNode clusterManagerNode = testClusterNodes.currentClusterManager(
|
||||
testClusterNodes.nodes.values().iterator().next().clusterService.state()
|
||||
);
|
||||
|
||||
|
@ -389,9 +389,9 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
assertNotNull(createSnapshotResponseListener.result());
|
||||
assertNotNull(restoreSnapshotResponseListener.result());
|
||||
assertTrue(documentCountVerified.get());
|
||||
SnapshotsInProgress finalSnapshotsInProgress = masterNode.clusterService.state().custom(SnapshotsInProgress.TYPE);
|
||||
SnapshotsInProgress finalSnapshotsInProgress = clusterManagerNode.clusterService.state().custom(SnapshotsInProgress.TYPE);
|
||||
assertFalse(finalSnapshotsInProgress.entries().stream().anyMatch(entry -> entry.state().completed() == false));
|
||||
final Repository repository = masterNode.repositoriesService.repository(repoName);
|
||||
final Repository repository = clusterManagerNode.repositoriesService.repository(repoName);
|
||||
Collection<SnapshotId> snapshotIds = getRepositoryData(repository).getSnapshotIds();
|
||||
assertThat(snapshotIds, hasSize(1));
|
||||
|
||||
|
@ -404,8 +404,8 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
|
||||
public void testSnapshotWithNodeDisconnects() {
|
||||
final int dataNodes = randomIntBetween(2, 10);
|
||||
final int masterNodes = randomFrom(1, 3, 5);
|
||||
setupTestCluster(masterNodes, dataNodes);
|
||||
final int clusterManagerNodes = randomFrom(1, 3, 5);
|
||||
setupTestCluster(clusterManagerNodes, dataNodes);
|
||||
|
||||
String repoName = "repo";
|
||||
String snapshotName = "snapshot";
|
||||
|
@ -422,7 +422,7 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
if (randomBoolean()) {
|
||||
scheduleNow(() -> testClusterNodes.clearNetworkDisruptions());
|
||||
}
|
||||
testClusterNodes.randomMasterNodeSafe().client.admin()
|
||||
testClusterNodes.randomClusterManagerNodeSafe().client.admin()
|
||||
.cluster()
|
||||
.prepareCreateSnapshot(repoName, snapshotName)
|
||||
.setPartial(partial)
|
||||
|
@ -435,12 +435,12 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
for (int i = 0; i < randomIntBetween(0, dataNodes); ++i) {
|
||||
scheduleNow(this::disconnectOrRestartDataNode);
|
||||
}
|
||||
// Only disconnect master if we have more than a single master and can simulate a failover
|
||||
final boolean disconnectedMaster = randomBoolean() && masterNodes > 1;
|
||||
if (disconnectedMaster) {
|
||||
scheduleNow(this::disconnectOrRestartMasterNode);
|
||||
// Only disconnect cluster-manager if we have more than a single cluster-manager and can simulate a failover
|
||||
final boolean disconnectedClusterManager = randomBoolean() && clusterManagerNodes > 1;
|
||||
if (disconnectedClusterManager) {
|
||||
scheduleNow(this::disconnectOrRestartClusterManagerNode);
|
||||
}
|
||||
if (disconnectedMaster || randomBoolean()) {
|
||||
if (disconnectedClusterManager || randomBoolean()) {
|
||||
scheduleSoon(() -> testClusterNodes.clearNetworkDisruptions());
|
||||
} else if (randomBoolean()) {
|
||||
scheduleNow(() -> testClusterNodes.clearNetworkDisruptions());
|
||||
|
@ -456,22 +456,22 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
}
|
||||
});
|
||||
|
||||
runUntil(() -> testClusterNodes.randomMasterNode().map(master -> {
|
||||
runUntil(() -> testClusterNodes.randomClusterManagerNode().map(clusterManager -> {
|
||||
if (snapshotNeverStarted.get()) {
|
||||
return true;
|
||||
}
|
||||
final SnapshotsInProgress snapshotsInProgress = master.clusterService.state().custom(SnapshotsInProgress.TYPE);
|
||||
final SnapshotsInProgress snapshotsInProgress = clusterManager.clusterService.state().custom(SnapshotsInProgress.TYPE);
|
||||
return snapshotsInProgress != null && snapshotsInProgress.entries().isEmpty();
|
||||
}).orElse(false), TimeUnit.MINUTES.toMillis(1L));
|
||||
|
||||
clearDisruptionsAndAwaitSync();
|
||||
|
||||
final TestClusterNodes.TestClusterNode randomMaster = testClusterNodes.randomMasterNode()
|
||||
.orElseThrow(() -> new AssertionError("expected to find at least one active master node"));
|
||||
SnapshotsInProgress finalSnapshotsInProgress = randomMaster.clusterService.state()
|
||||
final TestClusterNodes.TestClusterNode randomClusterManager = testClusterNodes.randomClusterManagerNode()
|
||||
.orElseThrow(() -> new AssertionError("expected to find at least one active cluster-manager node"));
|
||||
SnapshotsInProgress finalSnapshotsInProgress = randomClusterManager.clusterService.state()
|
||||
.custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY);
|
||||
assertThat(finalSnapshotsInProgress.entries(), empty());
|
||||
final Repository repository = randomMaster.repositoriesService.repository(repoName);
|
||||
final Repository repository = randomClusterManager.repositoriesService.repository(repoName);
|
||||
Collection<SnapshotId> snapshotIds = getRepositoryData(repository).getSnapshotIds();
|
||||
if (snapshotNeverStarted.get()) {
|
||||
assertThat(snapshotIds, empty());
|
||||
|
@ -480,10 +480,10 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testSnapshotDeleteWithMasterFailover() {
|
||||
public void testSnapshotDeleteWithClusterManagerFailover() {
|
||||
final int dataNodes = randomIntBetween(2, 10);
|
||||
final int masterNodes = randomFrom(3, 5);
|
||||
setupTestCluster(masterNodes, dataNodes);
|
||||
final int clusterManagerNodes = randomFrom(3, 5);
|
||||
setupTestCluster(clusterManagerNodes, dataNodes);
|
||||
|
||||
String repoName = "repo";
|
||||
String snapshotName = "snapshot";
|
||||
|
@ -494,7 +494,7 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
final StepListener<CreateSnapshotResponse> createSnapshotResponseStepListener = new StepListener<>();
|
||||
continueOrDie(
|
||||
createRepoAndIndex(repoName, index, shards),
|
||||
createIndexResponse -> testClusterNodes.randomMasterNodeSafe().client.admin()
|
||||
createIndexResponse -> testClusterNodes.randomClusterManagerNodeSafe().client.admin()
|
||||
.cluster()
|
||||
.prepareCreateSnapshot(repoName, snapshotName)
|
||||
.setWaitForCompletion(waitForSnapshot)
|
||||
|
@ -503,7 +503,7 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
|
||||
final AtomicBoolean snapshotDeleteResponded = new AtomicBoolean(false);
|
||||
continueOrDie(createSnapshotResponseStepListener, createSnapshotResponse -> {
|
||||
scheduleNow(this::disconnectOrRestartMasterNode);
|
||||
scheduleNow(this::disconnectOrRestartClusterManagerNode);
|
||||
testClusterNodes.randomDataNodeSafe().client.admin()
|
||||
.cluster()
|
||||
.prepareDeleteSnapshot(repoName, snapshotName)
|
||||
|
@ -511,10 +511,10 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
});
|
||||
|
||||
runUntil(
|
||||
() -> testClusterNodes.randomMasterNode()
|
||||
() -> testClusterNodes.randomClusterManagerNode()
|
||||
.map(
|
||||
master -> snapshotDeleteResponded.get()
|
||||
&& master.clusterService.state()
|
||||
clusterManager -> snapshotDeleteResponded.get()
|
||||
&& clusterManager.clusterService.state()
|
||||
.custom(SnapshotDeletionsInProgress.TYPE, SnapshotDeletionsInProgress.EMPTY)
|
||||
.getEntries()
|
||||
.isEmpty()
|
||||
|
@ -525,11 +525,11 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
|
||||
clearDisruptionsAndAwaitSync();
|
||||
|
||||
final TestClusterNodes.TestClusterNode randomMaster = testClusterNodes.randomMasterNode()
|
||||
.orElseThrow(() -> new AssertionError("expected to find at least one active master node"));
|
||||
SnapshotsInProgress finalSnapshotsInProgress = randomMaster.clusterService.state().custom(SnapshotsInProgress.TYPE);
|
||||
final TestClusterNodes.TestClusterNode randomClusterManager = testClusterNodes.randomClusterManagerNode()
|
||||
.orElseThrow(() -> new AssertionError("expected to find at least one active cluster-manager node"));
|
||||
SnapshotsInProgress finalSnapshotsInProgress = randomClusterManager.clusterService.state().custom(SnapshotsInProgress.TYPE);
|
||||
assertThat(finalSnapshotsInProgress.entries(), empty());
|
||||
final Repository repository = randomMaster.repositoriesService.repository(repoName);
|
||||
final Repository repository = randomClusterManager.repositoriesService.repository(repoName);
|
||||
Collection<SnapshotId> snapshotIds = getRepositoryData(repository).getSnapshotIds();
|
||||
assertThat(snapshotIds, hasSize(0));
|
||||
}
|
||||
|
@ -542,7 +542,7 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
final String index = "test";
|
||||
final int shards = randomIntBetween(1, 10);
|
||||
|
||||
TestClusterNodes.TestClusterNode masterNode = testClusterNodes.currentMaster(
|
||||
TestClusterNodes.TestClusterNode clusterManagerNode = testClusterNodes.currentClusterManager(
|
||||
testClusterNodes.nodes.values().iterator().next().clusterService.state()
|
||||
);
|
||||
|
||||
|
@ -558,12 +558,12 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
|
||||
final StepListener<AcknowledgedResponse> deleteSnapshotStepListener = new StepListener<>();
|
||||
|
||||
masterNode.clusterService.addListener(new ClusterStateListener() {
|
||||
clusterManagerNode.clusterService.addListener(new ClusterStateListener() {
|
||||
@Override
|
||||
public void clusterChanged(ClusterChangedEvent event) {
|
||||
if (event.state().custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY).entries().isEmpty() == false) {
|
||||
client().admin().cluster().prepareDeleteSnapshot(repoName, snapshotName).execute(deleteSnapshotStepListener);
|
||||
masterNode.clusterService.removeListener(this);
|
||||
clusterManagerNode.clusterService.removeListener(this);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
@ -587,9 +587,9 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
|
||||
assertNotNull(createSnapshotResponseStepListener.result());
|
||||
assertNotNull(createAnotherSnapshotResponseStepListener.result());
|
||||
SnapshotsInProgress finalSnapshotsInProgress = masterNode.clusterService.state().custom(SnapshotsInProgress.TYPE);
|
||||
SnapshotsInProgress finalSnapshotsInProgress = clusterManagerNode.clusterService.state().custom(SnapshotsInProgress.TYPE);
|
||||
assertFalse(finalSnapshotsInProgress.entries().stream().anyMatch(entry -> entry.state().completed() == false));
|
||||
final Repository repository = masterNode.repositoriesService.repository(repoName);
|
||||
final Repository repository = clusterManagerNode.repositoriesService.repository(repoName);
|
||||
Collection<SnapshotId> snapshotIds = getRepositoryData(repository).getSnapshotIds();
|
||||
assertThat(snapshotIds, hasSize(1));
|
||||
|
||||
|
@ -608,7 +608,7 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
final String index = "test";
|
||||
final int shards = randomIntBetween(1, 10);
|
||||
|
||||
TestClusterNodes.TestClusterNode masterNode = testClusterNodes.currentMaster(
|
||||
TestClusterNodes.TestClusterNode clusterManagerNode = testClusterNodes.currentClusterManager(
|
||||
testClusterNodes.nodes.values().iterator().next().clusterService.state()
|
||||
);
|
||||
|
||||
|
@ -659,9 +659,9 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
|
||||
deterministicTaskQueue.runAllRunnableTasks();
|
||||
|
||||
SnapshotsInProgress finalSnapshotsInProgress = masterNode.clusterService.state().custom(SnapshotsInProgress.TYPE);
|
||||
SnapshotsInProgress finalSnapshotsInProgress = clusterManagerNode.clusterService.state().custom(SnapshotsInProgress.TYPE);
|
||||
assertFalse(finalSnapshotsInProgress.entries().stream().anyMatch(entry -> entry.state().completed() == false));
|
||||
final Repository repository = masterNode.repositoriesService.repository(repoName);
|
||||
final Repository repository = clusterManagerNode.repositoriesService.repository(repoName);
|
||||
Collection<SnapshotId> snapshotIds = getRepositoryData(repository).getSnapshotIds();
|
||||
// We end up with two snapshots no matter if the delete worked out or not
|
||||
assertThat(snapshotIds, hasSize(2));
|
||||
|
@ -683,7 +683,7 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
final String index = "test";
|
||||
final int shards = randomIntBetween(1, 10);
|
||||
|
||||
TestClusterNodes.TestClusterNode masterNode = testClusterNodes.currentMaster(
|
||||
TestClusterNodes.TestClusterNode clusterManagerNode = testClusterNodes.currentClusterManager(
|
||||
testClusterNodes.nodes.values().iterator().next().clusterService.state()
|
||||
);
|
||||
|
||||
|
@ -722,9 +722,9 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
|
||||
deterministicTaskQueue.runAllRunnableTasks();
|
||||
|
||||
SnapshotsInProgress finalSnapshotsInProgress = masterNode.clusterService.state().custom(SnapshotsInProgress.TYPE);
|
||||
SnapshotsInProgress finalSnapshotsInProgress = clusterManagerNode.clusterService.state().custom(SnapshotsInProgress.TYPE);
|
||||
assertFalse(finalSnapshotsInProgress.entries().stream().anyMatch(entry -> entry.state().completed() == false));
|
||||
final Repository repository = masterNode.repositoriesService.repository(repoName);
|
||||
final Repository repository = clusterManagerNode.repositoriesService.repository(repoName);
|
||||
Collection<SnapshotId> snapshotIds = getRepositoryData(repository).getSnapshotIds();
|
||||
// No snapshots should be left in the repository
|
||||
assertThat(snapshotIds, empty());
|
||||
|
@ -738,7 +738,7 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
final String index = "test";
|
||||
final int shards = randomIntBetween(1, 10);
|
||||
|
||||
TestClusterNodes.TestClusterNode masterNode = testClusterNodes.currentMaster(
|
||||
TestClusterNodes.TestClusterNode clusterManagerNode = testClusterNodes.currentClusterManager(
|
||||
testClusterNodes.nodes.values().iterator().next().clusterService.state()
|
||||
);
|
||||
|
||||
|
@ -812,7 +812,7 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
assertThat(deleteSnapshotStepListener.result().isAcknowledged(), is(true));
|
||||
assertThat(restoreSnapshotResponseListener.result().getRestoreInfo().failedShards(), is(0));
|
||||
|
||||
final Repository repository = masterNode.repositoriesService.repository(repoName);
|
||||
final Repository repository = clusterManagerNode.repositoriesService.repository(repoName);
|
||||
Collection<SnapshotId> snapshotIds = getRepositoryData(repository).getSnapshotIds();
|
||||
assertThat(snapshotIds, contains(createOtherSnapshotResponseStepListener.result().getSnapshotInfo().snapshotId()));
|
||||
|
||||
|
@ -850,7 +850,7 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
String snapshotName = "snapshot";
|
||||
final String index = "test";
|
||||
|
||||
TestClusterNodes.TestClusterNode masterNode = testClusterNodes.currentMaster(
|
||||
TestClusterNodes.TestClusterNode clusterManagerNode = testClusterNodes.currentClusterManager(
|
||||
testClusterNodes.nodes.values().iterator().next().clusterService.state()
|
||||
);
|
||||
|
||||
|
@ -859,7 +859,7 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
|
||||
final SetOnce<Index> firstIndex = new SetOnce<>();
|
||||
continueOrDie(createRepoAndIndex(repoName, index, 1), createIndexResponse -> {
|
||||
firstIndex.set(masterNode.clusterService.state().metadata().index(index).getIndex());
|
||||
firstIndex.set(clusterManagerNode.clusterService.state().metadata().index(index).getIndex());
|
||||
// create a few more indices to make it more likely that the subsequent index delete operation happens before snapshot
|
||||
// finalization
|
||||
final GroupedActionListener<CreateIndexResponse> listener = new GroupedActionListener<>(createIndicesListener, indices);
|
||||
|
@ -907,9 +907,9 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
|
||||
deterministicTaskQueue.runAllRunnableTasks();
|
||||
|
||||
SnapshotsInProgress finalSnapshotsInProgress = masterNode.clusterService.state().custom(SnapshotsInProgress.TYPE);
|
||||
SnapshotsInProgress finalSnapshotsInProgress = clusterManagerNode.clusterService.state().custom(SnapshotsInProgress.TYPE);
|
||||
assertFalse(finalSnapshotsInProgress.entries().stream().anyMatch(entry -> entry.state().completed() == false));
|
||||
final Repository repository = masterNode.repositoriesService.repository(repoName);
|
||||
final Repository repository = clusterManagerNode.repositoriesService.repository(repoName);
|
||||
final RepositoryData repositoryData = getRepositoryData(repository);
|
||||
Collection<SnapshotId> snapshotIds = repositoryData.getSnapshotIds();
|
||||
assertThat(snapshotIds, hasSize(1));
|
||||
|
@ -944,7 +944,7 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
final String index = "test";
|
||||
final int shards = randomIntBetween(1, 10);
|
||||
|
||||
TestClusterNodes.TestClusterNode masterNode = testClusterNodes.currentMaster(
|
||||
TestClusterNodes.TestClusterNode clusterManagerNode = testClusterNodes.currentClusterManager(
|
||||
testClusterNodes.nodes.values().iterator().next().clusterService.state()
|
||||
);
|
||||
|
||||
|
@ -990,9 +990,10 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
|
||||
deterministicTaskQueue.runAllRunnableTasks();
|
||||
|
||||
SnapshotDeletionsInProgress deletionsInProgress = masterNode.clusterService.state().custom(SnapshotDeletionsInProgress.TYPE);
|
||||
SnapshotDeletionsInProgress deletionsInProgress = clusterManagerNode.clusterService.state()
|
||||
.custom(SnapshotDeletionsInProgress.TYPE);
|
||||
assertFalse(deletionsInProgress.hasDeletionsInProgress());
|
||||
final Repository repository = masterNode.repositoriesService.repository(repoName);
|
||||
final Repository repository = clusterManagerNode.repositoriesService.repository(repoName);
|
||||
final RepositoryData repositoryData = getRepositoryData(repository);
|
||||
Collection<SnapshotId> snapshotIds = repositoryData.getSnapshotIds();
|
||||
// We end up with no snapshots since at least one of the deletes worked out
|
||||
|
@ -1003,12 +1004,12 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
}
|
||||
|
||||
/**
|
||||
* Simulates concurrent restarts of data and master nodes as well as relocating a primary shard, while starting and subsequently
|
||||
* Simulates concurrent restarts of data and cluster-manager nodes as well as relocating a primary shard, while starting and subsequently
|
||||
* deleting a snapshot.
|
||||
*/
|
||||
public void testSnapshotPrimaryRelocations() {
|
||||
final int masterNodeCount = randomFrom(1, 3, 5);
|
||||
setupTestCluster(masterNodeCount, randomIntBetween(2, 5));
|
||||
final int clusterManagerNodeCount = randomFrom(1, 3, 5);
|
||||
setupTestCluster(clusterManagerNodeCount, randomIntBetween(2, 5));
|
||||
|
||||
String repoName = "repo";
|
||||
String snapshotName = "snapshot";
|
||||
|
@ -1016,11 +1017,11 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
|
||||
final int shards = randomIntBetween(1, 5);
|
||||
|
||||
final TestClusterNodes.TestClusterNode masterNode = testClusterNodes.currentMaster(
|
||||
final TestClusterNodes.TestClusterNode clusterManagerNode = testClusterNodes.currentClusterManager(
|
||||
testClusterNodes.nodes.values().iterator().next().clusterService.state()
|
||||
);
|
||||
final AtomicBoolean createdSnapshot = new AtomicBoolean();
|
||||
final AdminClient masterAdminClient = masterNode.client.admin();
|
||||
final AdminClient clusterManagerAdminClient = clusterManagerNode.client.admin();
|
||||
|
||||
final StepListener<ClusterStateResponse> clusterStateResponseStepListener = new StepListener<>();
|
||||
|
||||
|
@ -1038,15 +1039,15 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
@Override
|
||||
public void run() {
|
||||
final StepListener<ClusterStateResponse> updatedClusterStateResponseStepListener = new StepListener<>();
|
||||
masterAdminClient.cluster().state(new ClusterStateRequest(), updatedClusterStateResponseStepListener);
|
||||
clusterManagerAdminClient.cluster().state(new ClusterStateRequest(), updatedClusterStateResponseStepListener);
|
||||
continueOrDie(updatedClusterStateResponseStepListener, updatedClusterState -> {
|
||||
final ShardRouting shardRouting = updatedClusterState.getState()
|
||||
.routingTable()
|
||||
.shardRoutingTable(shardToRelocate.shardId())
|
||||
.primaryShard();
|
||||
if (shardRouting.unassigned() && shardRouting.unassignedInfo().getReason() == UnassignedInfo.Reason.NODE_LEFT) {
|
||||
if (masterNodeCount > 1) {
|
||||
scheduleNow(() -> testClusterNodes.stopNode(masterNode));
|
||||
if (clusterManagerNodeCount > 1) {
|
||||
scheduleNow(() -> testClusterNodes.stopNode(clusterManagerNode));
|
||||
}
|
||||
testClusterNodes.randomDataNodeSafe().client.admin()
|
||||
.cluster()
|
||||
|
@ -1058,7 +1059,7 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
.deleteSnapshot(new DeleteSnapshotRequest(repoName, snapshotName), noopListener());
|
||||
}));
|
||||
scheduleNow(
|
||||
() -> testClusterNodes.randomMasterNodeSafe().client.admin()
|
||||
() -> testClusterNodes.randomClusterManagerNodeSafe().client.admin()
|
||||
.cluster()
|
||||
.reroute(
|
||||
new ClusterRerouteRequest().add(
|
||||
|
@ -1080,11 +1081,11 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
});
|
||||
});
|
||||
|
||||
runUntil(() -> testClusterNodes.randomMasterNode().map(master -> {
|
||||
runUntil(() -> testClusterNodes.randomClusterManagerNode().map(clusterManager -> {
|
||||
if (createdSnapshot.get() == false) {
|
||||
return false;
|
||||
}
|
||||
return master.clusterService.state().custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY).entries().isEmpty();
|
||||
return clusterManager.clusterService.state().custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY).entries().isEmpty();
|
||||
}).orElse(false), TimeUnit.MINUTES.toMillis(1L));
|
||||
|
||||
clearDisruptionsAndAwaitSync();
|
||||
|
@ -1096,7 +1097,7 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
.entries(),
|
||||
empty()
|
||||
);
|
||||
final Repository repository = testClusterNodes.randomMasterNodeSafe().repositoriesService.repository(repoName);
|
||||
final Repository repository = testClusterNodes.randomClusterManagerNodeSafe().repositoriesService.repository(repoName);
|
||||
Collection<SnapshotId> snapshotIds = getRepositoryData(repository).getSnapshotIds();
|
||||
assertThat(snapshotIds, either(hasSize(1)).or(hasSize(0)));
|
||||
}
|
||||
|
@ -1110,7 +1111,7 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
|
||||
final int shards = randomIntBetween(1, 10);
|
||||
final int documents = randomIntBetween(2, 100);
|
||||
TestClusterNodes.TestClusterNode masterNode = testClusterNodes.currentMaster(
|
||||
TestClusterNodes.TestClusterNode clusterManagerNode = testClusterNodes.currentClusterManager(
|
||||
testClusterNodes.nodes.values().iterator().next().clusterService.state()
|
||||
);
|
||||
|
||||
|
@ -1171,7 +1172,7 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
"Documents were restored but the restored index mapping was older than some documents and misses some of their fields",
|
||||
(int) hitCount,
|
||||
lessThanOrEqualTo(
|
||||
((Map<?, ?>) masterNode.clusterService.state()
|
||||
((Map<?, ?>) clusterManagerNode.clusterService.state()
|
||||
.metadata()
|
||||
.index(restoredIndex)
|
||||
.mapping()
|
||||
|
@ -1186,9 +1187,9 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
|
||||
assertNotNull(createSnapshotResponseStepListener.result());
|
||||
assertNotNull(restoreSnapshotResponseStepListener.result());
|
||||
SnapshotsInProgress finalSnapshotsInProgress = masterNode.clusterService.state().custom(SnapshotsInProgress.TYPE);
|
||||
SnapshotsInProgress finalSnapshotsInProgress = clusterManagerNode.clusterService.state().custom(SnapshotsInProgress.TYPE);
|
||||
assertFalse(finalSnapshotsInProgress.entries().stream().anyMatch(entry -> entry.state().completed() == false));
|
||||
final Repository repository = masterNode.repositoriesService.repository(repoName);
|
||||
final Repository repository = clusterManagerNode.repositoriesService.repository(repoName);
|
||||
Collection<SnapshotId> snapshotIds = getRepositoryData(repository).getSnapshotIds();
|
||||
assertThat(snapshotIds, hasSize(1));
|
||||
|
||||
|
@ -1210,7 +1211,7 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
final int shards = randomIntBetween(1, 10);
|
||||
final int documents = randomIntBetween(1, 100);
|
||||
|
||||
final TestClusterNodes.TestClusterNode masterNode = testClusterNodes.currentMaster(
|
||||
final TestClusterNodes.TestClusterNode clusterManagerNode = testClusterNodes.currentClusterManager(
|
||||
testClusterNodes.nodes.values().iterator().next().clusterService.state()
|
||||
);
|
||||
|
||||
|
@ -1253,9 +1254,9 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
});
|
||||
|
||||
runUntil(() -> doneIndexing.get() && doneSnapshotting.get(), TimeUnit.MINUTES.toMillis(5L));
|
||||
SnapshotsInProgress finalSnapshotsInProgress = masterNode.clusterService.state().custom(SnapshotsInProgress.TYPE);
|
||||
SnapshotsInProgress finalSnapshotsInProgress = clusterManagerNode.clusterService.state().custom(SnapshotsInProgress.TYPE);
|
||||
assertFalse(finalSnapshotsInProgress.entries().stream().anyMatch(entry -> entry.state().completed() == false));
|
||||
final Repository repository = masterNode.repositoriesService.repository(repoName);
|
||||
final Repository repository = clusterManagerNode.repositoriesService.repository(repoName);
|
||||
Collection<SnapshotId> snapshotIds = getRepositoryData(repository).getSnapshotIds();
|
||||
assertThat(snapshotIds, hasSize(snapshotNames.size()));
|
||||
|
||||
|
@ -1314,12 +1315,12 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
private void disconnectOrRestartMasterNode() {
|
||||
testClusterNodes.randomMasterNode().ifPresent(masterNode -> {
|
||||
private void disconnectOrRestartClusterManagerNode() {
|
||||
testClusterNodes.randomClusterManagerNode().ifPresent(clusterManagerNode -> {
|
||||
if (randomBoolean()) {
|
||||
testClusterNodes.disconnectNode(masterNode);
|
||||
testClusterNodes.disconnectNode(clusterManagerNode);
|
||||
} else {
|
||||
masterNode.restart();
|
||||
clusterManagerNode.restart();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
@ -1374,12 +1375,15 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
.stream()
|
||||
.map(node -> node.clusterService.state())
|
||||
.collect(Collectors.toList());
|
||||
final Set<String> masterNodeIds = clusterStates.stream()
|
||||
final Set<String> clusterManagerNodeIds = clusterStates.stream()
|
||||
.map(clusterState -> clusterState.nodes().getMasterNodeId())
|
||||
.collect(Collectors.toSet());
|
||||
final Set<Long> terms = clusterStates.stream().map(ClusterState::term).collect(Collectors.toSet());
|
||||
final List<Long> versions = clusterStates.stream().map(ClusterState::version).distinct().collect(Collectors.toList());
|
||||
return versions.size() == 1 && masterNodeIds.size() == 1 && masterNodeIds.contains(null) == false && terms.size() == 1;
|
||||
return versions.size() == 1
|
||||
&& clusterManagerNodeIds.size() == 1
|
||||
&& clusterManagerNodeIds.contains(null) == false
|
||||
&& terms.size() == 1;
|
||||
}, TimeUnit.MINUTES.toMillis(1L));
|
||||
}
|
||||
|
||||
|
@ -1395,8 +1399,8 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
fail("Condition wasn't fulfilled.");
|
||||
}
|
||||
|
||||
private void setupTestCluster(int masterNodes, int dataNodes) {
|
||||
testClusterNodes = new TestClusterNodes(masterNodes, dataNodes);
|
||||
private void setupTestCluster(int clusterManagerNodes, int dataNodes) {
|
||||
testClusterNodes = new TestClusterNodes(clusterManagerNodes, dataNodes);
|
||||
startCluster();
|
||||
}
|
||||
|
||||
|
@ -1472,11 +1476,11 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
*/
|
||||
private final Set<String> disconnectedNodes = new HashSet<>();
|
||||
|
||||
TestClusterNodes(int masterNodes, int dataNodes) {
|
||||
for (int i = 0; i < masterNodes; ++i) {
|
||||
TestClusterNodes(int clusterManagerNodes, int dataNodes) {
|
||||
for (int i = 0; i < clusterManagerNodes; ++i) {
|
||||
nodes.computeIfAbsent("node" + i, nodeName -> {
|
||||
try {
|
||||
return newMasterNode(nodeName);
|
||||
return newClusterManagerNode(nodeName);
|
||||
} catch (IOException e) {
|
||||
throw new AssertionError(e);
|
||||
}
|
||||
|
@ -1501,7 +1505,7 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
.orElseThrow(() -> new AssertionError("Could not find node by id [" + nodeId + ']'));
|
||||
}
|
||||
|
||||
private TestClusterNode newMasterNode(String nodeName) throws IOException {
|
||||
private TestClusterNode newClusterManagerNode(String nodeName) throws IOException {
|
||||
return newNode(nodeName, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE);
|
||||
}
|
||||
|
||||
|
@ -1522,19 +1526,21 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
);
|
||||
}
|
||||
|
||||
public TestClusterNode randomMasterNodeSafe() {
|
||||
return randomMasterNode().orElseThrow(() -> new AssertionError("Expected to find at least one connected master node"));
|
||||
public TestClusterNode randomClusterManagerNodeSafe() {
|
||||
return randomClusterManagerNode().orElseThrow(
|
||||
() -> new AssertionError("Expected to find at least one connected cluster-manager node")
|
||||
);
|
||||
}
|
||||
|
||||
public Optional<TestClusterNode> randomMasterNode() {
|
||||
public Optional<TestClusterNode> randomClusterManagerNode() {
|
||||
// Select from sorted list of data-nodes here to not have deterministic behaviour
|
||||
final List<TestClusterNode> masterNodes = testClusterNodes.nodes.values()
|
||||
final List<TestClusterNode> clusterManagerNodes = testClusterNodes.nodes.values()
|
||||
.stream()
|
||||
.filter(n -> n.node.isMasterNode())
|
||||
.filter(n -> disconnectedNodes.contains(n.node.getName()) == false)
|
||||
.sorted(Comparator.comparing(n -> n.node.getName()))
|
||||
.collect(Collectors.toList());
|
||||
return masterNodes.isEmpty() ? Optional.empty() : Optional.of(randomFrom(masterNodes));
|
||||
return clusterManagerNodes.isEmpty() ? Optional.empty() : Optional.of(randomFrom(clusterManagerNodes));
|
||||
}
|
||||
|
||||
public void stopNode(TestClusterNode node) {
|
||||
|
@ -1596,15 +1602,15 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns the {@link TestClusterNode} for the master node in the given {@link ClusterState}.
|
||||
* Returns the {@link TestClusterNode} for the cluster-manager node in the given {@link ClusterState}.
|
||||
* @param state ClusterState
|
||||
* @return Master Node
|
||||
* @return Cluster Manager Node
|
||||
*/
|
||||
public TestClusterNode currentMaster(ClusterState state) {
|
||||
TestClusterNode master = nodes.get(state.nodes().getMasterNode().getName());
|
||||
assertNotNull(master);
|
||||
assertTrue(master.node.isMasterNode());
|
||||
return master;
|
||||
public TestClusterNode currentClusterManager(ClusterState state) {
|
||||
TestClusterNode clusterManager = nodes.get(state.nodes().getMasterNode().getName());
|
||||
assertNotNull(clusterManager);
|
||||
assertTrue(clusterManager.node.isMasterNode());
|
||||
return clusterManager;
|
||||
}
|
||||
|
||||
private final class TestClusterNode {
|
||||
|
@ -1636,7 +1642,7 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
|
||||
private final DiscoveryNode node;
|
||||
|
||||
private final MasterService masterService;
|
||||
private final MasterService clusterManagerService;
|
||||
|
||||
private final AllocationService allocationService;
|
||||
|
||||
|
@ -1656,13 +1662,18 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
this.node = node;
|
||||
final Environment environment = createEnvironment(node.getName());
|
||||
threadPool = deterministicTaskQueue.getThreadPool(runnable -> CoordinatorTests.onNodeLog(node, runnable));
|
||||
masterService = new FakeThreadPoolMasterService(node.getName(), "test", threadPool, deterministicTaskQueue::scheduleNow);
|
||||
clusterManagerService = new FakeThreadPoolMasterService(
|
||||
node.getName(),
|
||||
"test",
|
||||
threadPool,
|
||||
deterministicTaskQueue::scheduleNow
|
||||
);
|
||||
final Settings settings = environment.settings();
|
||||
final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
|
||||
clusterService = new ClusterService(
|
||||
settings,
|
||||
clusterSettings,
|
||||
masterService,
|
||||
clusterManagerService,
|
||||
new ClusterApplierService(node.getName(), settings, clusterSettings, threadPool) {
|
||||
@Override
|
||||
protected PrioritizedOpenSearchThreadPoolExecutor createThreadPoolExecutor() {
|
||||
|
@ -2192,7 +2203,7 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
transportService,
|
||||
namedWriteableRegistry,
|
||||
allocationService,
|
||||
masterService,
|
||||
clusterManagerService,
|
||||
() -> persistedState,
|
||||
hostsResolver -> nodes.values()
|
||||
.stream()
|
||||
|
@ -2206,7 +2217,7 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
|
|||
ElectionStrategy.DEFAULT_INSTANCE,
|
||||
() -> new StatusInfo(HEALTHY, "healthy-info")
|
||||
);
|
||||
masterService.setClusterStatePublisher(coordinator);
|
||||
clusterManagerService.setClusterStatePublisher(coordinator);
|
||||
coordinator.start();
|
||||
clusterService.getClusterApplierService().setNodeConnectionsService(nodeConnectionsService);
|
||||
nodeConnectionsService.start();
|
||||
|
|
|
@ -371,11 +371,11 @@ public class SnapshotsServiceTests extends OpenSearchTestCase {
|
|||
final String indexName1 = "index-1";
|
||||
final IndexId indexId1 = indexId(indexName1);
|
||||
final RepositoryShardId shardId1 = new RepositoryShardId(indexId1, 0);
|
||||
final String masterNodeId = uuid();
|
||||
final String clusterManagerNodeId = uuid();
|
||||
final SnapshotsInProgress.Entry cloneSingleShard = cloneEntry(
|
||||
targetSnapshot,
|
||||
sourceSnapshot.getSnapshotId(),
|
||||
clonesMap(shardId1, initShardStatus(masterNodeId))
|
||||
clonesMap(shardId1, initShardStatus(clusterManagerNodeId))
|
||||
);
|
||||
|
||||
final Snapshot queuedTargetSnapshot = snapshot(repoName, "test-snapshot");
|
||||
|
@ -388,11 +388,11 @@ public class SnapshotsServiceTests extends OpenSearchTestCase {
|
|||
assertThat(cloneSingleShard.state(), is(SnapshotsInProgress.State.STARTED));
|
||||
|
||||
final ClusterState stateWithUnassignedRoutingShard = stateWithSnapshots(
|
||||
ClusterState.builder(ClusterState.EMPTY_STATE).nodes(discoveryNodes(masterNodeId)).build(),
|
||||
ClusterState.builder(ClusterState.EMPTY_STATE).nodes(discoveryNodes(clusterManagerNodeId)).build(),
|
||||
cloneSingleShard,
|
||||
queuedClone
|
||||
);
|
||||
final SnapshotsService.ShardSnapshotUpdate completeShardClone = successUpdate(targetSnapshot, shardId1, masterNodeId);
|
||||
final SnapshotsService.ShardSnapshotUpdate completeShardClone = successUpdate(targetSnapshot, shardId1, clusterManagerNodeId);
|
||||
|
||||
final ClusterState updatedClusterState = applyUpdates(stateWithUnassignedRoutingShard, completeShardClone);
|
||||
final SnapshotsInProgress snapshotsInProgress = updatedClusterState.custom(SnapshotsInProgress.TYPE);
|
||||
|
|
|
@ -373,7 +373,7 @@ public class MockEventuallyConsistentRepository extends BlobStoreRepository {
|
|||
new BytesArray(data)
|
||||
);
|
||||
// If the existing snapshotInfo differs only in the timestamps it stores, then the overwrite is not
|
||||
// a problem and could be the result of a correctly handled master failover.
|
||||
// a problem and could be the result of a correctly handled cluster-manager failover.
|
||||
final SnapshotInfo existingInfo = SNAPSHOT_FORMAT.deserialize(
|
||||
blobName,
|
||||
namedXContentRegistry,
|
||||
|
|
|
@ -553,11 +553,11 @@ public class RemoteClusterServiceTests extends OpenSearchTestCase {
|
|||
final Settings settings = Settings.EMPTY;
|
||||
final List<DiscoveryNode> knownNodes = new CopyOnWriteArrayList<>();
|
||||
final Settings data = nonMasterNode();
|
||||
final Settings dedicatedMaster = clusterManagerOnlyNode();
|
||||
final Settings dedicatedClusterManager = clusterManagerOnlyNode();
|
||||
try (
|
||||
MockTransportService c1N1 = startTransport("cluster_1_node_1", knownNodes, Version.CURRENT, dedicatedMaster);
|
||||
MockTransportService c1N1 = startTransport("cluster_1_node_1", knownNodes, Version.CURRENT, dedicatedClusterManager);
|
||||
MockTransportService c1N2 = startTransport("cluster_1_node_2", knownNodes, Version.CURRENT, data);
|
||||
MockTransportService c2N1 = startTransport("cluster_2_node_1", knownNodes, Version.CURRENT, dedicatedMaster);
|
||||
MockTransportService c2N1 = startTransport("cluster_2_node_1", knownNodes, Version.CURRENT, dedicatedClusterManager);
|
||||
MockTransportService c2N2 = startTransport("cluster_2_node_2", knownNodes, Version.CURRENT, data)
|
||||
) {
|
||||
final DiscoveryNode c1N1Node = c1N1.getLocalDiscoNode();
|
||||
|
|
|
@ -743,24 +743,24 @@ public class SniffConnectionStrategyTests extends OpenSearchTestCase {
|
|||
assertTrue(nodePredicate.test(all));
|
||||
}
|
||||
{
|
||||
DiscoveryNode dataMaster = new DiscoveryNode(
|
||||
DiscoveryNode dataClusterManager = new DiscoveryNode(
|
||||
"id",
|
||||
address,
|
||||
Collections.emptyMap(),
|
||||
new HashSet<>(Arrays.asList(DiscoveryNodeRole.DATA_ROLE, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)),
|
||||
Version.CURRENT
|
||||
);
|
||||
assertTrue(nodePredicate.test(dataMaster));
|
||||
assertTrue(nodePredicate.test(dataClusterManager));
|
||||
}
|
||||
{
|
||||
DiscoveryNode dedicatedMaster = new DiscoveryNode(
|
||||
DiscoveryNode dedicatedClusterManager = new DiscoveryNode(
|
||||
"id",
|
||||
address,
|
||||
Collections.emptyMap(),
|
||||
new HashSet<>(Arrays.asList(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)),
|
||||
Version.CURRENT
|
||||
);
|
||||
assertFalse(nodePredicate.test(dedicatedMaster));
|
||||
assertFalse(nodePredicate.test(dedicatedClusterManager));
|
||||
}
|
||||
{
|
||||
DiscoveryNode dedicatedIngest = new DiscoveryNode(
|
||||
|
@ -773,14 +773,14 @@ public class SniffConnectionStrategyTests extends OpenSearchTestCase {
|
|||
assertTrue(nodePredicate.test(dedicatedIngest));
|
||||
}
|
||||
{
|
||||
DiscoveryNode masterIngest = new DiscoveryNode(
|
||||
DiscoveryNode clusterManagerIngest = new DiscoveryNode(
|
||||
"id",
|
||||
address,
|
||||
Collections.emptyMap(),
|
||||
new HashSet<>(Arrays.asList(DiscoveryNodeRole.INGEST_ROLE, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)),
|
||||
Version.CURRENT
|
||||
);
|
||||
assertTrue(nodePredicate.test(masterIngest));
|
||||
assertTrue(nodePredicate.test(clusterManagerIngest));
|
||||
}
|
||||
{
|
||||
DiscoveryNode dedicatedData = new DiscoveryNode(
|
||||
|
@ -855,14 +855,14 @@ public class SniffConnectionStrategyTests extends OpenSearchTestCase {
|
|||
TransportAddress address = new TransportAddress(TransportAddress.META_ADDRESS, 0);
|
||||
Settings settings = Settings.builder().put("cluster.remote.node.attr", "gateway").build();
|
||||
Predicate<DiscoveryNode> nodePredicate = SniffConnectionStrategy.getNodePredicate(settings);
|
||||
Set<DiscoveryNodeRole> dedicatedMasterRoles = new HashSet<>(Arrays.asList(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE));
|
||||
Set<DiscoveryNodeRole> dedicatedClusterManagerRoles = new HashSet<>(Arrays.asList(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE));
|
||||
Set<DiscoveryNodeRole> allRoles = DiscoveryNodeRole.BUILT_IN_ROLES;
|
||||
{
|
||||
DiscoveryNode node = new DiscoveryNode(
|
||||
"id",
|
||||
address,
|
||||
Collections.singletonMap("gateway", "true"),
|
||||
dedicatedMasterRoles,
|
||||
dedicatedClusterManagerRoles,
|
||||
Version.CURRENT
|
||||
);
|
||||
assertFalse(nodePredicate.test(node));
|
||||
|
@ -872,7 +872,7 @@ public class SniffConnectionStrategyTests extends OpenSearchTestCase {
|
|||
"id",
|
||||
address,
|
||||
Collections.singletonMap("gateway", "false"),
|
||||
dedicatedMasterRoles,
|
||||
dedicatedClusterManagerRoles,
|
||||
Version.CURRENT
|
||||
);
|
||||
assertFalse(nodePredicate.test(node));
|
||||
|
@ -882,7 +882,7 @@ public class SniffConnectionStrategyTests extends OpenSearchTestCase {
|
|||
"id",
|
||||
address,
|
||||
Collections.singletonMap("gateway", "false"),
|
||||
dedicatedMasterRoles,
|
||||
dedicatedClusterManagerRoles,
|
||||
Version.CURRENT
|
||||
);
|
||||
assertFalse(nodePredicate.test(node));
|
||||
|
|
Loading…
Reference in New Issue