Replaced "master" terminology in Log message (#2575)
Changed the log message to cluster-manager from master Signed-off-by: Owais Kazi <owaiskazi19@gmail.com>
This commit is contained in:
parent
b6ca0d1f78
commit
cc0e66b1dc
|
@ -176,7 +176,7 @@ public class Zen2RestApiIT extends OpenSearchNetty4IntegTestCase {
|
||||||
assertThat(e.getResponse().getStatusLine().getStatusCode(), is(400));
|
assertThat(e.getResponse().getStatusLine().getStatusCode(), is(400));
|
||||||
assertThat(
|
assertThat(
|
||||||
e.getMessage(),
|
e.getMessage(),
|
||||||
Matchers.containsString("add voting config exclusions request for [invalid] matched no master-eligible nodes")
|
Matchers.containsString("add voting config exclusions request for [invalid] matched no cluster-manager-eligible nodes")
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -287,7 +287,7 @@ public class UnsafeBootstrapAndDetachCommandIT extends OpenSearchIntegTestCase {
|
||||||
internalCluster().setBootstrapMasterNodeIndex(2);
|
internalCluster().setBootstrapMasterNodeIndex(2);
|
||||||
List<String> masterNodes = new ArrayList<>();
|
List<String> masterNodes = new ArrayList<>();
|
||||||
|
|
||||||
logger.info("--> start 1st master-eligible node");
|
logger.info("--> start 1st cluster-manager-eligible node");
|
||||||
masterNodes.add(
|
masterNodes.add(
|
||||||
internalCluster().startMasterOnlyNode(
|
internalCluster().startMasterOnlyNode(
|
||||||
Settings.builder().put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s").build()
|
Settings.builder().put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s").build()
|
||||||
|
@ -299,7 +299,7 @@ public class UnsafeBootstrapAndDetachCommandIT extends OpenSearchIntegTestCase {
|
||||||
Settings.builder().put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s").build()
|
Settings.builder().put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s").build()
|
||||||
); // node ordinal 1
|
); // node ordinal 1
|
||||||
|
|
||||||
logger.info("--> start 2nd and 3rd master-eligible nodes and bootstrap");
|
logger.info("--> start 2nd and 3rd cluster-manager-eligible nodes and bootstrap");
|
||||||
masterNodes.addAll(internalCluster().startMasterOnlyNodes(2)); // node ordinals 2 and 3
|
masterNodes.addAll(internalCluster().startMasterOnlyNodes(2)); // node ordinals 2 and 3
|
||||||
|
|
||||||
logger.info("--> wait for all nodes to join the cluster");
|
logger.info("--> wait for all nodes to join the cluster");
|
||||||
|
@ -335,19 +335,19 @@ public class UnsafeBootstrapAndDetachCommandIT extends OpenSearchIntegTestCase {
|
||||||
assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID));
|
assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID));
|
||||||
});
|
});
|
||||||
|
|
||||||
logger.info("--> try to unsafely bootstrap 1st master-eligible node, while node lock is held");
|
logger.info("--> try to unsafely bootstrap 1st cluster-manager-eligible node, while node lock is held");
|
||||||
Environment environmentMaster1 = TestEnvironment.newEnvironment(
|
Environment environmentMaster1 = TestEnvironment.newEnvironment(
|
||||||
Settings.builder().put(internalCluster().getDefaultSettings()).put(master1DataPathSettings).build()
|
Settings.builder().put(internalCluster().getDefaultSettings()).put(master1DataPathSettings).build()
|
||||||
);
|
);
|
||||||
expectThrows(() -> unsafeBootstrap(environmentMaster1), UnsafeBootstrapMasterCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG);
|
expectThrows(() -> unsafeBootstrap(environmentMaster1), UnsafeBootstrapMasterCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG);
|
||||||
|
|
||||||
logger.info("--> stop 1st master-eligible node and data-only node");
|
logger.info("--> stop 1st cluster-manager-eligible node and data-only node");
|
||||||
NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class);
|
NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class);
|
||||||
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(masterNodes.get(0)));
|
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(masterNodes.get(0)));
|
||||||
assertBusy(() -> internalCluster().getInstance(GatewayMetaState.class, dataNode).allPendingAsyncStatesWritten());
|
assertBusy(() -> internalCluster().getInstance(GatewayMetaState.class, dataNode).allPendingAsyncStatesWritten());
|
||||||
internalCluster().stopRandomDataNode();
|
internalCluster().stopRandomDataNode();
|
||||||
|
|
||||||
logger.info("--> unsafely-bootstrap 1st master-eligible node");
|
logger.info("--> unsafely-bootstrap 1st cluster-manager-eligible node");
|
||||||
MockTerminal terminal = unsafeBootstrap(environmentMaster1, false, true);
|
MockTerminal terminal = unsafeBootstrap(environmentMaster1, false, true);
|
||||||
Metadata metadata = OpenSearchNodeCommand.createPersistedClusterStateService(Settings.EMPTY, nodeEnvironment.nodeDataPaths())
|
Metadata metadata = OpenSearchNodeCommand.createPersistedClusterStateService(Settings.EMPTY, nodeEnvironment.nodeDataPaths())
|
||||||
.loadBestOnDiskState().metadata;
|
.loadBestOnDiskState().metadata;
|
||||||
|
@ -363,7 +363,7 @@ public class UnsafeBootstrapAndDetachCommandIT extends OpenSearchIntegTestCase {
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
|
|
||||||
logger.info("--> start 1st master-eligible node");
|
logger.info("--> start 1st cluster-manager-eligible node");
|
||||||
String masterNode2 = internalCluster().startMasterOnlyNode(master1DataPathSettings);
|
String masterNode2 = internalCluster().startMasterOnlyNode(master1DataPathSettings);
|
||||||
|
|
||||||
logger.info("--> detach-cluster on data-only node");
|
logger.info("--> detach-cluster on data-only node");
|
||||||
|
@ -399,7 +399,7 @@ public class UnsafeBootstrapAndDetachCommandIT extends OpenSearchIntegTestCase {
|
||||||
IndexMetadata indexMetadata = clusterService().state().metadata().index("test");
|
IndexMetadata indexMetadata = clusterService().state().metadata().index("test");
|
||||||
assertThat(indexMetadata.getSettings().get(IndexMetadata.SETTING_HISTORY_UUID), notNullValue());
|
assertThat(indexMetadata.getSettings().get(IndexMetadata.SETTING_HISTORY_UUID), notNullValue());
|
||||||
|
|
||||||
logger.info("--> detach-cluster on 2nd and 3rd master-eligible nodes");
|
logger.info("--> detach-cluster on 2nd and 3rd cluster-manager-eligible nodes");
|
||||||
Environment environmentMaster2 = TestEnvironment.newEnvironment(
|
Environment environmentMaster2 = TestEnvironment.newEnvironment(
|
||||||
Settings.builder().put(internalCluster().getDefaultSettings()).put(master2DataPathSettings).build()
|
Settings.builder().put(internalCluster().getDefaultSettings()).put(master2DataPathSettings).build()
|
||||||
);
|
);
|
||||||
|
@ -409,7 +409,7 @@ public class UnsafeBootstrapAndDetachCommandIT extends OpenSearchIntegTestCase {
|
||||||
);
|
);
|
||||||
detachCluster(environmentMaster3, false);
|
detachCluster(environmentMaster3, false);
|
||||||
|
|
||||||
logger.info("--> start 2nd and 3rd master-eligible nodes and ensure 4 nodes stable cluster");
|
logger.info("--> start 2nd and 3rd cluster-manager-eligible nodes and ensure 4 nodes stable cluster");
|
||||||
bootstrappedNodes.add(internalCluster().startMasterOnlyNode(master2DataPathSettings));
|
bootstrappedNodes.add(internalCluster().startMasterOnlyNode(master2DataPathSettings));
|
||||||
bootstrappedNodes.add(internalCluster().startMasterOnlyNode(master3DataPathSettings));
|
bootstrappedNodes.add(internalCluster().startMasterOnlyNode(master3DataPathSettings));
|
||||||
ensureStableCluster(4);
|
ensureStableCluster(4);
|
||||||
|
@ -422,7 +422,7 @@ public class UnsafeBootstrapAndDetachCommandIT extends OpenSearchIntegTestCase {
|
||||||
|
|
||||||
Settings settings = Settings.builder().put(AUTO_IMPORT_DANGLING_INDICES_SETTING.getKey(), true).build();
|
Settings settings = Settings.builder().put(AUTO_IMPORT_DANGLING_INDICES_SETTING.getKey(), true).build();
|
||||||
|
|
||||||
logger.info("--> start mixed data and master-eligible node and bootstrap cluster");
|
logger.info("--> start mixed data and cluster-manager-eligible node and bootstrap cluster");
|
||||||
String masterNode = internalCluster().startNode(settings); // node ordinal 0
|
String masterNode = internalCluster().startNode(settings); // node ordinal 0
|
||||||
|
|
||||||
logger.info("--> start data-only node and ensure 2 nodes stable cluster");
|
logger.info("--> start data-only node and ensure 2 nodes stable cluster");
|
||||||
|
@ -457,7 +457,7 @@ public class UnsafeBootstrapAndDetachCommandIT extends OpenSearchIntegTestCase {
|
||||||
);
|
);
|
||||||
detachCluster(environment, false);
|
detachCluster(environment, false);
|
||||||
|
|
||||||
logger.info("--> stop master-eligible node, clear its data and start it again - new cluster should form");
|
logger.info("--> stop cluster-manager-eligible node, clear its data and start it again - new cluster should form");
|
||||||
internalCluster().restartNode(masterNode, new InternalTestCluster.RestartCallback() {
|
internalCluster().restartNode(masterNode, new InternalTestCluster.RestartCallback() {
|
||||||
@Override
|
@Override
|
||||||
public boolean clearData(String nodeName) {
|
public boolean clearData(String nodeName) {
|
||||||
|
|
|
@ -482,8 +482,8 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
|
||||||
try {
|
try {
|
||||||
assertAcked(deleteSnapshotResponseFuture.actionGet());
|
assertAcked(deleteSnapshotResponseFuture.actionGet());
|
||||||
} catch (SnapshotMissingException ex) {
|
} catch (SnapshotMissingException ex) {
|
||||||
// When master node is closed during this test, it sometime manages to delete the snapshot files before
|
// When cluster-manager node is closed during this test, it sometime manages to delete the snapshot files before
|
||||||
// completely stopping. In this case the retried delete snapshot operation on the new master can fail
|
// completely stopping. In this case the retried delete snapshot operation on the new cluster-manager can fail
|
||||||
// with SnapshotMissingException
|
// with SnapshotMissingException
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -759,7 +759,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
|
||||||
logger.info("--> start first node");
|
logger.info("--> start first node");
|
||||||
internalCluster().startNode();
|
internalCluster().startNode();
|
||||||
logger.info("--> start second node");
|
logger.info("--> start second node");
|
||||||
// Make sure the first node is elected as master
|
// Make sure the first node is elected as cluster-manager
|
||||||
internalCluster().startNode(nonMasterNode());
|
internalCluster().startNode(nonMasterNode());
|
||||||
// Register mock repositories
|
// Register mock repositories
|
||||||
for (int i = 0; i < 5; i++) {
|
for (int i = 0; i < 5; i++) {
|
||||||
|
@ -836,7 +836,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testMasterShutdownDuringSnapshot() throws Exception {
|
public void testMasterShutdownDuringSnapshot() throws Exception {
|
||||||
logger.info("--> starting two master nodes and two data nodes");
|
logger.info("--> starting two cluster-manager nodes and two data nodes");
|
||||||
internalCluster().startMasterOnlyNodes(2);
|
internalCluster().startMasterOnlyNodes(2);
|
||||||
internalCluster().startDataOnlyNodes(2);
|
internalCluster().startDataOnlyNodes(2);
|
||||||
|
|
||||||
|
@ -859,7 +859,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
|
||||||
.setIndices("test-idx")
|
.setIndices("test-idx")
|
||||||
.get();
|
.get();
|
||||||
|
|
||||||
logger.info("--> stopping master node");
|
logger.info("--> stopping cluster-manager node");
|
||||||
internalCluster().stopCurrentMasterNode();
|
internalCluster().stopCurrentMasterNode();
|
||||||
|
|
||||||
logger.info("--> wait until the snapshot is done");
|
logger.info("--> wait until the snapshot is done");
|
||||||
|
@ -874,7 +874,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testMasterAndDataShutdownDuringSnapshot() throws Exception {
|
public void testMasterAndDataShutdownDuringSnapshot() throws Exception {
|
||||||
logger.info("--> starting three master nodes and two data nodes");
|
logger.info("--> starting three cluster-manager nodes and two data nodes");
|
||||||
internalCluster().startMasterOnlyNodes(3);
|
internalCluster().startMasterOnlyNodes(3);
|
||||||
internalCluster().startDataOnlyNodes(2);
|
internalCluster().startDataOnlyNodes(2);
|
||||||
|
|
||||||
|
@ -902,7 +902,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
|
||||||
|
|
||||||
logger.info("--> stopping data node {}", dataNode);
|
logger.info("--> stopping data node {}", dataNode);
|
||||||
stopNode(dataNode);
|
stopNode(dataNode);
|
||||||
logger.info("--> stopping master node {} ", masterNode);
|
logger.info("--> stopping cluster-manager node {} ", masterNode);
|
||||||
internalCluster().stopCurrentMasterNode();
|
internalCluster().stopCurrentMasterNode();
|
||||||
|
|
||||||
logger.info("--> wait until the snapshot is done");
|
logger.info("--> wait until the snapshot is done");
|
||||||
|
@ -925,7 +925,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
|
||||||
* the cluster.
|
* the cluster.
|
||||||
*/
|
*/
|
||||||
public void testRestoreShrinkIndex() throws Exception {
|
public void testRestoreShrinkIndex() throws Exception {
|
||||||
logger.info("--> starting a master node and a data node");
|
logger.info("--> starting a cluster-manager node and a data node");
|
||||||
internalCluster().startMasterOnlyNode();
|
internalCluster().startMasterOnlyNode();
|
||||||
internalCluster().startDataOnlyNode();
|
internalCluster().startDataOnlyNode();
|
||||||
|
|
||||||
|
@ -1144,7 +1144,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testDataNodeRestartWithBusyMasterDuringSnapshot() throws Exception {
|
public void testDataNodeRestartWithBusyMasterDuringSnapshot() throws Exception {
|
||||||
logger.info("--> starting a master node and two data nodes");
|
logger.info("--> starting a cluster-manager node and two data nodes");
|
||||||
internalCluster().startMasterOnlyNode();
|
internalCluster().startMasterOnlyNode();
|
||||||
internalCluster().startDataOnlyNodes(2);
|
internalCluster().startDataOnlyNodes(2);
|
||||||
final Path repoPath = randomRepoPath();
|
final Path repoPath = randomRepoPath();
|
||||||
|
@ -1200,7 +1200,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testDataNodeRestartAfterShardSnapshotFailure() throws Exception {
|
public void testDataNodeRestartAfterShardSnapshotFailure() throws Exception {
|
||||||
logger.info("--> starting a master node and two data nodes");
|
logger.info("--> starting a cluster-manager node and two data nodes");
|
||||||
internalCluster().startMasterOnlyNode();
|
internalCluster().startMasterOnlyNode();
|
||||||
final List<String> dataNodes = internalCluster().startDataOnlyNodes(2);
|
final List<String> dataNodes = internalCluster().startDataOnlyNodes(2);
|
||||||
final Path repoPath = randomRepoPath();
|
final Path repoPath = randomRepoPath();
|
||||||
|
|
|
@ -54,7 +54,7 @@ import java.util.stream.Collectors;
|
||||||
import java.util.stream.StreamSupport;
|
import java.util.stream.StreamSupport;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A request to add voting config exclusions for certain master-eligible nodes, and wait for these nodes to be removed from the voting
|
* A request to add voting config exclusions for certain cluster-manager-eligible nodes, and wait for these nodes to be removed from the voting
|
||||||
* configuration.
|
* configuration.
|
||||||
*/
|
*/
|
||||||
public class AddVotingConfigExclusionsRequest extends MasterNodeRequest<AddVotingConfigExclusionsRequest> {
|
public class AddVotingConfigExclusionsRequest extends MasterNodeRequest<AddVotingConfigExclusionsRequest> {
|
||||||
|
@ -66,7 +66,7 @@ public class AddVotingConfigExclusionsRequest extends MasterNodeRequest<AddVotin
|
||||||
private final TimeValue timeout;
|
private final TimeValue timeout;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Construct a request to add voting config exclusions for master-eligible nodes matching the given node names, and wait for a
|
* Construct a request to add voting config exclusions for cluster-manager-eligible nodes matching the given node names, and wait for a
|
||||||
* default 30 seconds for these exclusions to take effect, removing the nodes from the voting configuration.
|
* default 30 seconds for these exclusions to take effect, removing the nodes from the voting configuration.
|
||||||
* @param nodeNames Names of the nodes to add - see {@link AddVotingConfigExclusionsRequest#resolveVotingConfigExclusions(ClusterState)}
|
* @param nodeNames Names of the nodes to add - see {@link AddVotingConfigExclusionsRequest#resolveVotingConfigExclusions(ClusterState)}
|
||||||
*/
|
*/
|
||||||
|
@ -75,7 +75,7 @@ public class AddVotingConfigExclusionsRequest extends MasterNodeRequest<AddVotin
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Construct a request to add voting config exclusions for master-eligible nodes matching the given descriptions, and wait for these
|
* Construct a request to add voting config exclusions for cluster-manager-eligible nodes matching the given descriptions, and wait for these
|
||||||
* nodes to be removed from the voting configuration.
|
* nodes to be removed from the voting configuration.
|
||||||
* @param nodeDescriptions Descriptions of the nodes whose exclusions to add - see {@link DiscoveryNodes#resolveNodes(String...)}.
|
* @param nodeDescriptions Descriptions of the nodes whose exclusions to add - see {@link DiscoveryNodes#resolveNodes(String...)}.
|
||||||
* @param nodeIds Ids of the nodes whose exclusions to add - see
|
* @param nodeIds Ids of the nodes whose exclusions to add - see
|
||||||
|
@ -136,7 +136,9 @@ public class AddVotingConfigExclusionsRequest extends MasterNodeRequest<AddVotin
|
||||||
|
|
||||||
if (newVotingConfigExclusions.isEmpty()) {
|
if (newVotingConfigExclusions.isEmpty()) {
|
||||||
throw new IllegalArgumentException(
|
throw new IllegalArgumentException(
|
||||||
"add voting config exclusions request for " + Arrays.asList(nodeDescriptions) + " matched no master-eligible nodes"
|
"add voting config exclusions request for "
|
||||||
|
+ Arrays.asList(nodeDescriptions)
|
||||||
|
+ " matched no cluster-manager-eligible nodes"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
} else if (nodeIds.length >= 1) {
|
} else if (nodeIds.length >= 1) {
|
||||||
|
|
|
@ -77,7 +77,7 @@ import java.util.function.Consumer;
|
||||||
* InternalClusterInfoService provides the ClusterInfoService interface,
|
* InternalClusterInfoService provides the ClusterInfoService interface,
|
||||||
* routinely updated on a timer. The timer can be dynamically changed by
|
* routinely updated on a timer. The timer can be dynamically changed by
|
||||||
* setting the <code>cluster.info.update.interval</code> setting (defaulting
|
* setting the <code>cluster.info.update.interval</code> setting (defaulting
|
||||||
* to 30 seconds). The InternalClusterInfoService only runs on the master node.
|
* to 30 seconds). The InternalClusterInfoService only runs on the cluster-manager node.
|
||||||
* Listens for changes in the number of data nodes and immediately submits a
|
* Listens for changes in the number of data nodes and immediately submits a
|
||||||
* ClusterInfoUpdateJob if a node has been added.
|
* ClusterInfoUpdateJob if a node has been added.
|
||||||
*
|
*
|
||||||
|
@ -109,7 +109,7 @@ public class InternalClusterInfoService implements ClusterInfoService, ClusterSt
|
||||||
private volatile ImmutableOpenMap<String, DiskUsage> leastAvailableSpaceUsages;
|
private volatile ImmutableOpenMap<String, DiskUsage> leastAvailableSpaceUsages;
|
||||||
private volatile ImmutableOpenMap<String, DiskUsage> mostAvailableSpaceUsages;
|
private volatile ImmutableOpenMap<String, DiskUsage> mostAvailableSpaceUsages;
|
||||||
private volatile IndicesStatsSummary indicesStatsSummary;
|
private volatile IndicesStatsSummary indicesStatsSummary;
|
||||||
// null if this node is not currently the master
|
// null if this node is not currently the cluster-manager
|
||||||
private final AtomicReference<RefreshAndRescheduleRunnable> refreshAndRescheduleRunnable = new AtomicReference<>();
|
private final AtomicReference<RefreshAndRescheduleRunnable> refreshAndRescheduleRunnable = new AtomicReference<>();
|
||||||
private volatile boolean enabled;
|
private volatile boolean enabled;
|
||||||
private volatile TimeValue fetchTimeout;
|
private volatile TimeValue fetchTimeout;
|
||||||
|
@ -150,8 +150,8 @@ public class InternalClusterInfoService implements ClusterInfoService, ClusterSt
|
||||||
@Override
|
@Override
|
||||||
public void clusterChanged(ClusterChangedEvent event) {
|
public void clusterChanged(ClusterChangedEvent event) {
|
||||||
if (event.localNodeMaster() && refreshAndRescheduleRunnable.get() == null) {
|
if (event.localNodeMaster() && refreshAndRescheduleRunnable.get() == null) {
|
||||||
logger.trace("elected as master, scheduling cluster info update tasks");
|
logger.trace("elected as cluster-manager, scheduling cluster info update tasks");
|
||||||
executeRefresh(event.state(), "became master");
|
executeRefresh(event.state(), "became cluster-manager");
|
||||||
|
|
||||||
final RefreshAndRescheduleRunnable newRunnable = new RefreshAndRescheduleRunnable();
|
final RefreshAndRescheduleRunnable newRunnable = new RefreshAndRescheduleRunnable();
|
||||||
refreshAndRescheduleRunnable.set(newRunnable);
|
refreshAndRescheduleRunnable.set(newRunnable);
|
||||||
|
@ -535,7 +535,7 @@ public class InternalClusterInfoService implements ClusterInfoService, ClusterSt
|
||||||
if (this == refreshAndRescheduleRunnable.get()) {
|
if (this == refreshAndRescheduleRunnable.get()) {
|
||||||
super.doRun();
|
super.doRun();
|
||||||
} else {
|
} else {
|
||||||
logger.trace("master changed, scheduled refresh job is stale");
|
logger.trace("cluster-manager changed, scheduled refresh job is stale");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -135,7 +135,7 @@ public class ClusterBootstrapService {
|
||||||
+ DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey()
|
+ DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey()
|
||||||
+ "] set to ["
|
+ "] set to ["
|
||||||
+ DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE
|
+ DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE
|
||||||
+ "] must be master-eligible"
|
+ "] must be cluster-manager-eligible"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
bootstrapRequirements = Collections.singleton(Node.NODE_NAME_SETTING.get(settings));
|
bootstrapRequirements = Collections.singleton(Node.NODE_NAME_SETTING.get(settings));
|
||||||
|
@ -219,7 +219,7 @@ public class ClusterBootstrapService {
|
||||||
|
|
||||||
logger.info(
|
logger.info(
|
||||||
"no discovery configuration found, will perform best-effort cluster bootstrapping after [{}] "
|
"no discovery configuration found, will perform best-effort cluster bootstrapping after [{}] "
|
||||||
+ "unless existing master is discovered",
|
+ "unless existing cluster-manager is discovered",
|
||||||
unconfiguredBootstrapTimeout
|
unconfiguredBootstrapTimeout
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
|
@ -192,7 +192,7 @@ public class ClusterFormationFailureHelper {
|
||||||
);
|
);
|
||||||
|
|
||||||
if (clusterState.nodes().getLocalNode().isMasterNode() == false) {
|
if (clusterState.nodes().getLocalNode().isMasterNode() == false) {
|
||||||
return String.format(Locale.ROOT, "master not discovered yet: %s", discoveryStateIgnoringQuorum);
|
return String.format(Locale.ROOT, "cluster-manager not discovered yet: %s", discoveryStateIgnoringQuorum);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (clusterState.getLastAcceptedConfiguration().isEmpty()) {
|
if (clusterState.getLastAcceptedConfiguration().isEmpty()) {
|
||||||
|
@ -203,14 +203,14 @@ public class ClusterFormationFailureHelper {
|
||||||
} else {
|
} else {
|
||||||
bootstrappingDescription = String.format(
|
bootstrappingDescription = String.format(
|
||||||
Locale.ROOT,
|
Locale.ROOT,
|
||||||
"this node must discover master-eligible nodes %s to bootstrap a cluster",
|
"this node must discover cluster-manager-eligible nodes %s to bootstrap a cluster",
|
||||||
INITIAL_CLUSTER_MANAGER_NODES_SETTING.get(settings)
|
INITIAL_CLUSTER_MANAGER_NODES_SETTING.get(settings)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
return String.format(
|
return String.format(
|
||||||
Locale.ROOT,
|
Locale.ROOT,
|
||||||
"master not discovered yet, this node has not previously joined a bootstrapped cluster, and %s: %s",
|
"cluster-manager not discovered yet, this node has not previously joined a bootstrapped cluster, and %s: %s",
|
||||||
bootstrappingDescription,
|
bootstrappingDescription,
|
||||||
discoveryStateIgnoringQuorum
|
discoveryStateIgnoringQuorum
|
||||||
);
|
);
|
||||||
|
@ -221,7 +221,7 @@ public class ClusterFormationFailureHelper {
|
||||||
if (clusterState.getLastCommittedConfiguration().equals(VotingConfiguration.MUST_JOIN_ELECTED_MASTER)) {
|
if (clusterState.getLastCommittedConfiguration().equals(VotingConfiguration.MUST_JOIN_ELECTED_MASTER)) {
|
||||||
return String.format(
|
return String.format(
|
||||||
Locale.ROOT,
|
Locale.ROOT,
|
||||||
"master not discovered yet and this node was detached from its previous cluster, have discovered %s; %s",
|
"cluster-manager not discovered yet and this node was detached from its previous cluster, have discovered %s; %s",
|
||||||
foundPeers,
|
foundPeers,
|
||||||
discoveryWillContinueDescription
|
discoveryWillContinueDescription
|
||||||
);
|
);
|
||||||
|
@ -250,7 +250,7 @@ public class ClusterFormationFailureHelper {
|
||||||
|
|
||||||
return String.format(
|
return String.format(
|
||||||
Locale.ROOT,
|
Locale.ROOT,
|
||||||
"master not discovered or elected yet, an election requires %s, have discovered %s which %s; %s",
|
"cluster-manager not discovered or elected yet, an election requires %s, have discovered %s which %s; %s",
|
||||||
quorumDescription,
|
quorumDescription,
|
||||||
foundPeers,
|
foundPeers,
|
||||||
isQuorumOrNot,
|
isQuorumOrNot,
|
||||||
|
@ -269,8 +269,8 @@ public class ClusterFormationFailureHelper {
|
||||||
|
|
||||||
if (nodeIds.size() == 1) {
|
if (nodeIds.size() == 1) {
|
||||||
if (nodeIds.contains(GatewayMetaState.STALE_STATE_CONFIG_NODE_ID)) {
|
if (nodeIds.contains(GatewayMetaState.STALE_STATE_CONFIG_NODE_ID)) {
|
||||||
return "one or more nodes that have already participated as master-eligible nodes in the cluster but this node was "
|
return "one or more nodes that have already participated as cluster-manager-eligible nodes in the cluster but this node was "
|
||||||
+ "not master-eligible the last time it joined the cluster";
|
+ "not cluster-manager-eligible the last time it joined the cluster";
|
||||||
} else {
|
} else {
|
||||||
return "a node with id " + realNodeIds;
|
return "a node with id " + realNodeIds;
|
||||||
}
|
}
|
||||||
|
|
|
@ -510,7 +510,7 @@ public class Coordinator extends AbstractLifecycleComponent implements Discovery
|
||||||
private void abdicateTo(DiscoveryNode newMaster) {
|
private void abdicateTo(DiscoveryNode newMaster) {
|
||||||
assert Thread.holdsLock(mutex);
|
assert Thread.holdsLock(mutex);
|
||||||
assert mode == Mode.LEADER : "expected to be leader on abdication but was " + mode;
|
assert mode == Mode.LEADER : "expected to be leader on abdication but was " + mode;
|
||||||
assert newMaster.isMasterNode() : "should only abdicate to master-eligible node but was " + newMaster;
|
assert newMaster.isMasterNode() : "should only abdicate to cluster-manager-eligible node but was " + newMaster;
|
||||||
final StartJoinRequest startJoinRequest = new StartJoinRequest(newMaster, Math.max(getCurrentTerm(), maxTermSeen) + 1);
|
final StartJoinRequest startJoinRequest = new StartJoinRequest(newMaster, Math.max(getCurrentTerm(), maxTermSeen) + 1);
|
||||||
logger.info("abdicating to {} with term {}", newMaster, startJoinRequest.getTerm());
|
logger.info("abdicating to {} with term {}", newMaster, startJoinRequest.getTerm());
|
||||||
getLastAcceptedState().nodes().mastersFirstStream().forEach(node -> {
|
getLastAcceptedState().nodes().mastersFirstStream().forEach(node -> {
|
||||||
|
@ -563,7 +563,7 @@ public class Coordinator extends AbstractLifecycleComponent implements Discovery
|
||||||
|
|
||||||
private void handleJoinRequest(JoinRequest joinRequest, JoinHelper.JoinCallback joinCallback) {
|
private void handleJoinRequest(JoinRequest joinRequest, JoinHelper.JoinCallback joinCallback) {
|
||||||
assert Thread.holdsLock(mutex) == false;
|
assert Thread.holdsLock(mutex) == false;
|
||||||
assert getLocalNode().isMasterNode() : getLocalNode() + " received a join but is not master-eligible";
|
assert getLocalNode().isMasterNode() : getLocalNode() + " received a join but is not cluster-manager-eligible";
|
||||||
logger.trace("handleJoinRequest: as {}, handling {}", mode, joinRequest);
|
logger.trace("handleJoinRequest: as {}, handling {}", mode, joinRequest);
|
||||||
|
|
||||||
if (singleNodeDiscovery && joinRequest.getSourceNode().equals(getLocalNode()) == false) {
|
if (singleNodeDiscovery && joinRequest.getSourceNode().equals(getLocalNode()) == false) {
|
||||||
|
@ -683,7 +683,7 @@ public class Coordinator extends AbstractLifecycleComponent implements Discovery
|
||||||
void becomeLeader(String method) {
|
void becomeLeader(String method) {
|
||||||
assert Thread.holdsLock(mutex) : "Coordinator mutex not held";
|
assert Thread.holdsLock(mutex) : "Coordinator mutex not held";
|
||||||
assert mode == Mode.CANDIDATE : "expected candidate but was " + mode;
|
assert mode == Mode.CANDIDATE : "expected candidate but was " + mode;
|
||||||
assert getLocalNode().isMasterNode() : getLocalNode() + " became a leader but is not master-eligible";
|
assert getLocalNode().isMasterNode() : getLocalNode() + " became a leader but is not cluster-manager-eligible";
|
||||||
|
|
||||||
logger.debug(
|
logger.debug(
|
||||||
"{}: coordinator becoming LEADER in term {} (was {}, lastKnownLeader was [{}])",
|
"{}: coordinator becoming LEADER in term {} (was {}, lastKnownLeader was [{}])",
|
||||||
|
@ -709,7 +709,7 @@ public class Coordinator extends AbstractLifecycleComponent implements Discovery
|
||||||
|
|
||||||
void becomeFollower(String method, DiscoveryNode leaderNode) {
|
void becomeFollower(String method, DiscoveryNode leaderNode) {
|
||||||
assert Thread.holdsLock(mutex) : "Coordinator mutex not held";
|
assert Thread.holdsLock(mutex) : "Coordinator mutex not held";
|
||||||
assert leaderNode.isMasterNode() : leaderNode + " became a leader but is not master-eligible";
|
assert leaderNode.isMasterNode() : leaderNode + " became a leader but is not cluster-manager-eligible";
|
||||||
assert mode != Mode.LEADER : "do not switch to follower from leader (should be candidate first)";
|
assert mode != Mode.LEADER : "do not switch to follower from leader (should be candidate first)";
|
||||||
|
|
||||||
if (mode == Mode.FOLLOWER && Optional.of(leaderNode).equals(lastKnownLeader)) {
|
if (mode == Mode.FOLLOWER && Optional.of(leaderNode).equals(lastKnownLeader)) {
|
||||||
|
@ -751,11 +751,11 @@ public class Coordinator extends AbstractLifecycleComponent implements Discovery
|
||||||
}
|
}
|
||||||
|
|
||||||
private void cleanMasterService() {
|
private void cleanMasterService() {
|
||||||
masterService.submitStateUpdateTask("clean-up after stepping down as master", new LocalClusterUpdateTask() {
|
masterService.submitStateUpdateTask("clean-up after stepping down as cluster-manager", new LocalClusterUpdateTask() {
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(String source, Exception e) {
|
public void onFailure(String source, Exception e) {
|
||||||
// ignore
|
// ignore
|
||||||
logger.trace("failed to clean-up after stepping down as master", e);
|
logger.trace("failed to clean-up after stepping down as cluster-manager", e);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -987,9 +987,9 @@ public class Coordinator extends AbstractLifecycleComponent implements Discovery
|
||||||
}
|
}
|
||||||
|
|
||||||
if (getLocalNode().isMasterNode() == false) {
|
if (getLocalNode().isMasterNode() == false) {
|
||||||
logger.debug("skip setting initial configuration as local node is not a master-eligible node");
|
logger.debug("skip setting initial configuration as local node is not a cluster-manager-eligible node");
|
||||||
throw new CoordinationStateRejectedException(
|
throw new CoordinationStateRejectedException(
|
||||||
"this node is not master-eligible, but cluster bootstrapping can only happen on a master-eligible node"
|
"this node is not cluster-manager-eligible, but cluster bootstrapping can only happen on a cluster-manager-eligible node"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1046,8 +1046,10 @@ public class Coordinator extends AbstractLifecycleComponent implements Discovery
|
||||||
|
|
||||||
// exclude any nodes whose ID is in the voting config exclusions list ...
|
// exclude any nodes whose ID is in the voting config exclusions list ...
|
||||||
final Stream<String> excludedNodeIds = clusterState.getVotingConfigExclusions().stream().map(VotingConfigExclusion::getNodeId);
|
final Stream<String> excludedNodeIds = clusterState.getVotingConfigExclusions().stream().map(VotingConfigExclusion::getNodeId);
|
||||||
// ... and also automatically exclude the node IDs of master-ineligible nodes that were previously master-eligible and are still in
|
// ... and also automatically exclude the node IDs of cluster-manager-ineligible nodes that were previously cluster-manager-eligible
|
||||||
// the voting config. We could exclude all the master-ineligible nodes here, but there could be quite a few of them and that makes
|
// and are still in
|
||||||
|
// the voting config. We could exclude all the cluster-manager-ineligible nodes here, but there could be quite a few of them and
|
||||||
|
// that makes
|
||||||
// the logging much harder to follow.
|
// the logging much harder to follow.
|
||||||
final Stream<String> masterIneligibleNodeIdsInVotingConfig = StreamSupport.stream(clusterState.nodes().spliterator(), false)
|
final Stream<String> masterIneligibleNodeIdsInVotingConfig = StreamSupport.stream(clusterState.nodes().spliterator(), false)
|
||||||
.filter(
|
.filter(
|
||||||
|
|
|
@ -47,9 +47,9 @@ public class DetachClusterCommand extends OpenSearchNodeCommand {
|
||||||
static final String CONFIRMATION_MSG = DELIMITER
|
static final String CONFIRMATION_MSG = DELIMITER
|
||||||
+ "\n"
|
+ "\n"
|
||||||
+ "You should only run this tool if you have permanently lost all of the\n"
|
+ "You should only run this tool if you have permanently lost all of the\n"
|
||||||
+ "master-eligible nodes in this cluster and you cannot restore the cluster\n"
|
+ "cluster-manager-eligible nodes in this cluster and you cannot restore the cluster\n"
|
||||||
+ "from a snapshot, or you have already unsafely bootstrapped a new cluster\n"
|
+ "from a snapshot, or you have already unsafely bootstrapped a new cluster\n"
|
||||||
+ "by running `opensearch-node unsafe-bootstrap` on a master-eligible\n"
|
+ "by running `opensearch-node unsafe-bootstrap` on a cluster-manager-eligible\n"
|
||||||
+ "node that belonged to the same cluster as this node. This tool can cause\n"
|
+ "node that belonged to the same cluster as this node. This tool can cause\n"
|
||||||
+ "arbitrary data loss and its use should be your last resort.\n"
|
+ "arbitrary data loss and its use should be your last resort.\n"
|
||||||
+ "\n"
|
+ "\n"
|
||||||
|
|
|
@ -489,7 +489,7 @@ public class JoinHelper {
|
||||||
pendingAsTasks.put(task, new JoinTaskListener(task, value));
|
pendingAsTasks.put(task, new JoinTaskListener(task, value));
|
||||||
});
|
});
|
||||||
|
|
||||||
final String stateUpdateSource = "elected-as-master ([" + pendingAsTasks.size() + "] nodes joined)";
|
final String stateUpdateSource = "elected-as-cluster-manager ([" + pendingAsTasks.size() + "] nodes joined)";
|
||||||
|
|
||||||
pendingAsTasks.put(JoinTaskExecutor.newBecomeMasterTask(), (source, e) -> {});
|
pendingAsTasks.put(JoinTaskExecutor.newBecomeMasterTask(), (source, e) -> {});
|
||||||
pendingAsTasks.put(JoinTaskExecutor.newFinishElectionTask(), (source, e) -> {});
|
pendingAsTasks.put(JoinTaskExecutor.newFinishElectionTask(), (source, e) -> {});
|
||||||
|
|
|
@ -57,14 +57,14 @@ public class Reconfigurator {
|
||||||
* the best resilience it makes automatic adjustments to the voting configuration as master nodes join or leave the cluster. Adjustments
|
* the best resilience it makes automatic adjustments to the voting configuration as master nodes join or leave the cluster. Adjustments
|
||||||
* that fix or increase the size of the voting configuration are always a good idea, but the wisdom of reducing the voting configuration
|
* that fix or increase the size of the voting configuration are always a good idea, but the wisdom of reducing the voting configuration
|
||||||
* size is less clear. For instance, automatically reducing the voting configuration down to a single node means the cluster requires
|
* size is less clear. For instance, automatically reducing the voting configuration down to a single node means the cluster requires
|
||||||
* this node to operate, which is not resilient: if it broke we could restore every other master-eligible node in the cluster to health
|
* this node to operate, which is not resilient: if it broke we could restore every other cluster-manager-eligible node in the cluster to health
|
||||||
* and still the cluster would be unavailable. However not reducing the voting configuration size can also hamper resilience: in a
|
* and still the cluster would be unavailable. However not reducing the voting configuration size can also hamper resilience: in a
|
||||||
* five-node cluster we could lose two nodes and by reducing the voting configuration to the remaining three nodes we could tolerate the
|
* five-node cluster we could lose two nodes and by reducing the voting configuration to the remaining three nodes we could tolerate the
|
||||||
* loss of a further node before failing.
|
* loss of a further node before failing.
|
||||||
*
|
*
|
||||||
* We offer two options: either we auto-shrink the voting configuration as long as it contains more than three nodes, or we don't and we
|
* We offer two options: either we auto-shrink the voting configuration as long as it contains more than three nodes, or we don't and we
|
||||||
* require the user to control the voting configuration manually using the retirement API. The former, default, option, guarantees that
|
* require the user to control the voting configuration manually using the retirement API. The former, default, option, guarantees that
|
||||||
* as long as there have been at least three master-eligible nodes in the cluster and no more than one of them is currently unavailable,
|
* as long as there have been at least three cluster-manager-eligible nodes in the cluster and no more than one of them is currently unavailable,
|
||||||
* then the cluster will still operate, which is what almost everyone wants. Manual control is for users who want different guarantees.
|
* then the cluster will still operate, which is what almost everyone wants. Manual control is for users who want different guarantees.
|
||||||
*/
|
*/
|
||||||
public static final Setting<Boolean> CLUSTER_AUTO_SHRINK_VOTING_CONFIGURATION = Setting.boolSetting(
|
public static final Setting<Boolean> CLUSTER_AUTO_SHRINK_VOTING_CONFIGURATION = Setting.boolSetting(
|
||||||
|
|
|
@ -60,15 +60,15 @@ public class UnsafeBootstrapMasterCommand extends OpenSearchNodeCommand {
|
||||||
static final String CONFIRMATION_MSG = DELIMITER
|
static final String CONFIRMATION_MSG = DELIMITER
|
||||||
+ "\n"
|
+ "\n"
|
||||||
+ "You should only run this tool if you have permanently lost half or more\n"
|
+ "You should only run this tool if you have permanently lost half or more\n"
|
||||||
+ "of the master-eligible nodes in this cluster, and you cannot restore the\n"
|
+ "of the cluster-manager-eligible nodes in this cluster, and you cannot restore the\n"
|
||||||
+ "cluster from a snapshot. This tool can cause arbitrary data loss and its\n"
|
+ "cluster from a snapshot. This tool can cause arbitrary data loss and its\n"
|
||||||
+ "use should be your last resort. If you have multiple surviving master\n"
|
+ "use should be your last resort. If you have multiple surviving cluster-manager\n"
|
||||||
+ "eligible nodes, you should run this tool on the node with the highest\n"
|
+ "eligible nodes, you should run this tool on the node with the highest\n"
|
||||||
+ "cluster state (term, version) pair.\n"
|
+ "cluster state (term, version) pair.\n"
|
||||||
+ "\n"
|
+ "\n"
|
||||||
+ "Do you want to proceed?\n";
|
+ "Do you want to proceed?\n";
|
||||||
|
|
||||||
static final String NOT_MASTER_NODE_MSG = "unsafe-bootstrap tool can only be run on master eligible node";
|
static final String NOT_MASTER_NODE_MSG = "unsafe-bootstrap tool can only be run on cluster-manager eligible node";
|
||||||
|
|
||||||
static final String EMPTY_LAST_COMMITTED_VOTING_CONFIG_MSG =
|
static final String EMPTY_LAST_COMMITTED_VOTING_CONFIG_MSG =
|
||||||
"last committed voting voting configuration is empty, cluster has never been bootstrapped?";
|
"last committed voting voting configuration is empty, cluster has never been bootstrapped?";
|
||||||
|
@ -81,7 +81,9 @@ public class UnsafeBootstrapMasterCommand extends OpenSearchNodeCommand {
|
||||||
private OptionSpec<Boolean> applyClusterReadOnlyBlockOption;
|
private OptionSpec<Boolean> applyClusterReadOnlyBlockOption;
|
||||||
|
|
||||||
UnsafeBootstrapMasterCommand() {
|
UnsafeBootstrapMasterCommand() {
|
||||||
super("Forces the successful election of the current node after the permanent loss of the half or more master-eligible nodes");
|
super(
|
||||||
|
"Forces the successful election of the current node after the permanent loss of the half or more cluster-manager-eligible nodes"
|
||||||
|
);
|
||||||
applyClusterReadOnlyBlockOption = parser.accepts("apply-cluster-read-only-block", "Optional cluster.blocks.read_only setting")
|
applyClusterReadOnlyBlockOption = parser.accepts("apply-cluster-read-only-block", "Optional cluster.blocks.read_only setting")
|
||||||
.withOptionalArg()
|
.withOptionalArg()
|
||||||
.ofType(Boolean.class);
|
.ofType(Boolean.class);
|
||||||
|
|
|
@ -206,7 +206,7 @@ public abstract class DiscoveryNodeRole implements Comparable<DiscoveryNodeRole>
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Represents the role for a master-eligible node.
|
* Represents the role for a cluster-manager-eligible node.
|
||||||
* @deprecated As of 2.0, because promoting inclusive language, replaced by {@link #CLUSTER_MANAGER_ROLE}
|
* @deprecated As of 2.0, because promoting inclusive language, replaced by {@link #CLUSTER_MANAGER_ROLE}
|
||||||
*/
|
*/
|
||||||
@Deprecated
|
@Deprecated
|
||||||
|
|
|
@ -574,7 +574,7 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
||||||
public String shortSummary() {
|
public String shortSummary() {
|
||||||
final StringBuilder summary = new StringBuilder();
|
final StringBuilder summary = new StringBuilder();
|
||||||
if (masterNodeChanged()) {
|
if (masterNodeChanged()) {
|
||||||
summary.append("master node changed {previous [");
|
summary.append("cluster-manager node changed {previous [");
|
||||||
if (previousMasterNode() != null) {
|
if (previousMasterNode() != null) {
|
||||||
summary.append(previousMasterNode());
|
summary.append(previousMasterNode());
|
||||||
}
|
}
|
||||||
|
@ -799,7 +799,7 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Check if the given name of the node role is 'cluster_manger' or 'master'.
|
* Check if the given name of the node role is 'cluster_manager' or 'master'.
|
||||||
* The method is added for {@link #resolveNodes} to keep the code clear, when support the both above roles.
|
* The method is added for {@link #resolveNodes} to keep the code clear, when support the both above roles.
|
||||||
* @deprecated As of 2.0, because promoting inclusive language. MASTER_ROLE is deprecated.
|
* @deprecated As of 2.0, because promoting inclusive language. MASTER_ROLE is deprecated.
|
||||||
* @param matchAttrName a given String for a name of the node role.
|
* @param matchAttrName a given String for a name of the node role.
|
||||||
|
|
|
@ -137,7 +137,9 @@ public class HandshakingTransportAddressConnector implements TransportAddressCon
|
||||||
if (remoteNode.equals(transportService.getLocalNode())) {
|
if (remoteNode.equals(transportService.getLocalNode())) {
|
||||||
listener.onFailure(new ConnectTransportException(remoteNode, "local node found"));
|
listener.onFailure(new ConnectTransportException(remoteNode, "local node found"));
|
||||||
} else if (remoteNode.isMasterNode() == false) {
|
} else if (remoteNode.isMasterNode() == false) {
|
||||||
listener.onFailure(new ConnectTransportException(remoteNode, "non-master-eligible node found"));
|
listener.onFailure(
|
||||||
|
new ConnectTransportException(remoteNode, "non-cluster-manager-eligible node found")
|
||||||
|
);
|
||||||
} else {
|
} else {
|
||||||
transportService.connectToNode(remoteNode, new ActionListener<Void>() {
|
transportService.connectToNode(remoteNode, new ActionListener<Void>() {
|
||||||
@Override
|
@Override
|
||||||
|
@ -153,7 +155,8 @@ public class HandshakingTransportAddressConnector implements TransportAddressCon
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(Exception e) {
|
public void onFailure(Exception e) {
|
||||||
// we opened a connection and successfully performed a handshake, so we're definitely
|
// we opened a connection and successfully performed a handshake, so we're definitely
|
||||||
// talking to a master-eligible node with a matching cluster name and a good version,
|
// talking to a cluster-manager-eligible node with a matching cluster name and a good
|
||||||
|
// version,
|
||||||
// but the attempt to open a full connection to its publish address failed; a common
|
// but the attempt to open a full connection to its publish address failed; a common
|
||||||
// reason is that the remote node is listening on 0.0.0.0 but has made an inappropriate
|
// reason is that the remote node is listening on 0.0.0.0 but has made an inappropriate
|
||||||
// choice for its publish address.
|
// choice for its publish address.
|
||||||
|
|
|
@ -225,7 +225,7 @@ public abstract class PeerFinder {
|
||||||
|
|
||||||
public interface TransportAddressConnector {
|
public interface TransportAddressConnector {
|
||||||
/**
|
/**
|
||||||
* Identify the node at the given address and, if it is a master node and not the local node then establish a full connection to it.
|
* Identify the node at the given address and, if it is a cluster-manager node and not the local node then establish a full connection to it.
|
||||||
*/
|
*/
|
||||||
void connectToRemoteMasterNode(TransportAddress transportAddress, ActionListener<DiscoveryNode> listener);
|
void connectToRemoteMasterNode(TransportAddress transportAddress, ActionListener<DiscoveryNode> listener);
|
||||||
}
|
}
|
||||||
|
@ -275,7 +275,7 @@ public abstract class PeerFinder {
|
||||||
return peersRemoved;
|
return peersRemoved;
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.trace("probing master nodes from cluster state: {}", lastAcceptedNodes);
|
logger.trace("probing cluster-manager nodes from cluster state: {}", lastAcceptedNodes);
|
||||||
for (ObjectCursor<DiscoveryNode> discoveryNodeObjectCursor : lastAcceptedNodes.getMasterNodes().values()) {
|
for (ObjectCursor<DiscoveryNode> discoveryNodeObjectCursor : lastAcceptedNodes.getMasterNodes().values()) {
|
||||||
startProbe(discoveryNodeObjectCursor.value.getAddress());
|
startProbe(discoveryNodeObjectCursor.value.getAddress());
|
||||||
}
|
}
|
||||||
|
@ -381,7 +381,7 @@ public abstract class PeerFinder {
|
||||||
transportAddressConnector.connectToRemoteMasterNode(transportAddress, new ActionListener<DiscoveryNode>() {
|
transportAddressConnector.connectToRemoteMasterNode(transportAddress, new ActionListener<DiscoveryNode>() {
|
||||||
@Override
|
@Override
|
||||||
public void onResponse(DiscoveryNode remoteNode) {
|
public void onResponse(DiscoveryNode remoteNode) {
|
||||||
assert remoteNode.isMasterNode() : remoteNode + " is not master-eligible";
|
assert remoteNode.isMasterNode() : remoteNode + " is not cluster-manager-eligible";
|
||||||
assert remoteNode.equals(getLocalNode()) == false : remoteNode + " is the local node";
|
assert remoteNode.equals(getLocalNode()) == false : remoteNode + " is the local node";
|
||||||
synchronized (mutex) {
|
synchronized (mutex) {
|
||||||
if (active == false) {
|
if (active == false) {
|
||||||
|
|
|
@ -68,7 +68,7 @@ public class NodeRepurposeCommand extends OpenSearchNodeCommand {
|
||||||
static final String NO_SHARD_DATA_TO_CLEAN_UP_FOUND = "No shard data to clean-up found";
|
static final String NO_SHARD_DATA_TO_CLEAN_UP_FOUND = "No shard data to clean-up found";
|
||||||
|
|
||||||
public NodeRepurposeCommand() {
|
public NodeRepurposeCommand() {
|
||||||
super("Repurpose this node to another master/data role, cleaning up any excess persisted data");
|
super("Repurpose this node to another cluster-manager/data role, cleaning up any excess persisted data");
|
||||||
}
|
}
|
||||||
|
|
||||||
void testExecute(Terminal terminal, OptionSet options, Environment env) throws Exception {
|
void testExecute(Terminal terminal, OptionSet options, Environment env) throws Exception {
|
||||||
|
@ -129,7 +129,7 @@ public class NodeRepurposeCommand extends OpenSearchNodeCommand {
|
||||||
terminal.println(noMasterMessage(indexUUIDs.size(), shardDataPaths.size(), indexMetadataPaths.size()));
|
terminal.println(noMasterMessage(indexUUIDs.size(), shardDataPaths.size(), indexMetadataPaths.size()));
|
||||||
outputHowToSeeVerboseInformation(terminal);
|
outputHowToSeeVerboseInformation(terminal);
|
||||||
|
|
||||||
terminal.println("Node is being re-purposed as no-master and no-data. Clean-up of index data will be performed.");
|
terminal.println("Node is being re-purposed as no-cluster-manager and no-data. Clean-up of index data will be performed.");
|
||||||
confirm(terminal, "Do you want to proceed?");
|
confirm(terminal, "Do you want to proceed?");
|
||||||
|
|
||||||
removePaths(terminal, indexPaths); // clean-up shard dirs
|
removePaths(terminal, indexPaths); // clean-up shard dirs
|
||||||
|
@ -137,7 +137,7 @@ public class NodeRepurposeCommand extends OpenSearchNodeCommand {
|
||||||
MetadataStateFormat.deleteMetaState(dataPaths);
|
MetadataStateFormat.deleteMetaState(dataPaths);
|
||||||
IOUtils.rm(Stream.of(dataPaths).map(path -> path.resolve(INDICES_FOLDER)).toArray(Path[]::new));
|
IOUtils.rm(Stream.of(dataPaths).map(path -> path.resolve(INDICES_FOLDER)).toArray(Path[]::new));
|
||||||
|
|
||||||
terminal.println("Node successfully repurposed to no-master and no-data.");
|
terminal.println("Node successfully repurposed to no-cluster-manager and no-data.");
|
||||||
}
|
}
|
||||||
|
|
||||||
private void processMasterNoDataNode(Terminal terminal, Path[] dataPaths, Environment env) throws IOException {
|
private void processMasterNoDataNode(Terminal terminal, Path[] dataPaths, Environment env) throws IOException {
|
||||||
|
@ -162,12 +162,12 @@ public class NodeRepurposeCommand extends OpenSearchNodeCommand {
|
||||||
terminal.println(shardMessage(shardDataPaths.size(), indexUUIDs.size()));
|
terminal.println(shardMessage(shardDataPaths.size(), indexUUIDs.size()));
|
||||||
outputHowToSeeVerboseInformation(terminal);
|
outputHowToSeeVerboseInformation(terminal);
|
||||||
|
|
||||||
terminal.println("Node is being re-purposed as master and no-data. Clean-up of shard data will be performed.");
|
terminal.println("Node is being re-purposed as cluster-manager and no-data. Clean-up of shard data will be performed.");
|
||||||
confirm(terminal, "Do you want to proceed?");
|
confirm(terminal, "Do you want to proceed?");
|
||||||
|
|
||||||
removePaths(terminal, shardDataPaths); // clean-up shard dirs
|
removePaths(terminal, shardDataPaths); // clean-up shard dirs
|
||||||
|
|
||||||
terminal.println("Node successfully repurposed to master and no-data.");
|
terminal.println("Node successfully repurposed to cluster-manager and no-data.");
|
||||||
}
|
}
|
||||||
|
|
||||||
private ClusterState loadClusterState(Terminal terminal, Environment env, PersistedClusterStateService psf) throws IOException {
|
private ClusterState loadClusterState(Terminal terminal, Environment env, PersistedClusterStateService psf) throws IOException {
|
||||||
|
|
|
@ -89,7 +89,7 @@ import static org.opensearch.common.util.concurrent.OpenSearchExecutors.daemonTh
|
||||||
*
|
*
|
||||||
* When started, ensures that this version is compatible with the state stored on disk, and performs a state upgrade if necessary. Note that
|
* When started, ensures that this version is compatible with the state stored on disk, and performs a state upgrade if necessary. Note that
|
||||||
* the state being loaded when constructing the instance of this class is not necessarily the state that will be used as {@link
|
* the state being loaded when constructing the instance of this class is not necessarily the state that will be used as {@link
|
||||||
* ClusterState#metadata()} because it might be stale or incomplete. Master-eligible nodes must perform an election to find a complete and
|
* ClusterState#metadata()} because it might be stale or incomplete. Cluster-manager-eligible nodes must perform an election to find a complete and
|
||||||
* non-stale state, and master-ineligible nodes receive the real cluster state from the elected master after joining the cluster.
|
* non-stale state, and master-ineligible nodes receive the real cluster state from the elected master after joining the cluster.
|
||||||
*/
|
*/
|
||||||
public class GatewayMetaState implements Closeable {
|
public class GatewayMetaState implements Closeable {
|
||||||
|
@ -97,7 +97,7 @@ public class GatewayMetaState implements Closeable {
|
||||||
/**
|
/**
|
||||||
* Fake node ID for a voting configuration written by a master-ineligible data node to indicate that its on-disk state is potentially
|
* Fake node ID for a voting configuration written by a master-ineligible data node to indicate that its on-disk state is potentially
|
||||||
* stale (since it is written asynchronously after application, rather than before acceptance). This node ID means that if the node is
|
* stale (since it is written asynchronously after application, rather than before acceptance). This node ID means that if the node is
|
||||||
* restarted as a master-eligible node then it does not win any elections until it has received a fresh cluster state.
|
* restarted as a cluster-manager-eligible node then it does not win any elections until it has received a fresh cluster state.
|
||||||
*/
|
*/
|
||||||
public static final String STALE_STATE_CONFIG_NODE_ID = "STALE_STATE_CONFIG";
|
public static final String STALE_STATE_CONFIG_NODE_ID = "STALE_STATE_CONFIG";
|
||||||
|
|
||||||
|
@ -310,7 +310,7 @@ public class GatewayMetaState implements Closeable {
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// Hack: This is to ensure that non-master-eligible Zen2 nodes always store a current term
|
// Hack: This is to ensure that non-cluster-manager-eligible Zen2 nodes always store a current term
|
||||||
// that's higher than the last accepted term.
|
// that's higher than the last accepted term.
|
||||||
// TODO: can we get rid of this hack?
|
// TODO: can we get rid of this hack?
|
||||||
if (event.state().term() > incrementalClusterStateWriter.getPreviousManifest().getCurrentTerm()) {
|
if (event.state().term() > incrementalClusterStateWriter.getPreviousManifest().getCurrentTerm()) {
|
||||||
|
|
|
@ -333,7 +333,8 @@ public class IncrementalClusterStateWriter {
|
||||||
} catch (WriteStateException e) {
|
} catch (WriteStateException e) {
|
||||||
// If the Manifest write results in a dirty WriteStateException it's not safe to roll back, removing the new metadata files,
|
// If the Manifest write results in a dirty WriteStateException it's not safe to roll back, removing the new metadata files,
|
||||||
// because if the Manifest was actually written to disk and its deletion fails it will reference these new metadata files.
|
// because if the Manifest was actually written to disk and its deletion fails it will reference these new metadata files.
|
||||||
// On master-eligible nodes a dirty WriteStateException here is fatal to the node since we no longer really have any idea
|
// On cluster-manager-eligible nodes a dirty WriteStateException here is fatal to the node since we no longer really have
|
||||||
|
// any idea
|
||||||
// what the state on disk is and the only sensible response is to start again from scratch.
|
// what the state on disk is and the only sensible response is to start again from scratch.
|
||||||
if (e.isDirty() == false) {
|
if (e.isDirty() == false) {
|
||||||
rollback();
|
rollback();
|
||||||
|
|
|
@ -108,7 +108,7 @@ import java.util.function.LongSupplier;
|
||||||
import java.util.function.Supplier;
|
import java.util.function.Supplier;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Stores cluster metadata in a bare Lucene index (per data path) split across a number of documents. This is used by master-eligible nodes
|
* Stores cluster metadata in a bare Lucene index (per data path) split across a number of documents. This is used by cluster-manager-eligible nodes
|
||||||
* to record the last-accepted cluster state during publication. The metadata is written incrementally where possible, leaving alone any
|
* to record the last-accepted cluster state during publication. The metadata is written incrementally where possible, leaving alone any
|
||||||
* documents that have not changed. The index has the following fields:
|
* documents that have not changed. The index has the following fields:
|
||||||
*
|
*
|
||||||
|
|
|
@ -398,7 +398,7 @@ public class PersistentTasksClusterService implements ClusterStateListener, Clos
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns true if the cluster state change(s) require to reassign some persistent tasks. It can happen in the following
|
* Returns true if the cluster state change(s) require to reassign some persistent tasks. It can happen in the following
|
||||||
* situations: a node left or is added, the routing table changed, the master node changed, the metadata changed or the
|
* situations: a node left or is added, the routing table changed, the cluster-manager node changed, the metadata changed or the
|
||||||
* persistent tasks changed.
|
* persistent tasks changed.
|
||||||
*/
|
*/
|
||||||
boolean shouldReassignPersistentTasks(final ClusterChangedEvent event) {
|
boolean shouldReassignPersistentTasks(final ClusterChangedEvent event) {
|
||||||
|
|
|
@ -38,7 +38,7 @@
|
||||||
* any {@code BlobStoreRepository} implementation must provide via its implementation of
|
* any {@code BlobStoreRepository} implementation must provide via its implementation of
|
||||||
* {@link org.opensearch.repositories.blobstore.BlobStoreRepository#getBlobContainer()}.</p>
|
* {@link org.opensearch.repositories.blobstore.BlobStoreRepository#getBlobContainer()}.</p>
|
||||||
*
|
*
|
||||||
* <p>The blob store is written to and read from by master-eligible nodes and data nodes. All metadata related to a snapshot's
|
* <p>The blob store is written to and read from by cluster-manager-eligible nodes and data nodes. All metadata related to a snapshot's
|
||||||
* scope and health is written by the master node.</p>
|
* scope and health is written by the master node.</p>
|
||||||
* <p>The data-nodes on the other hand, write the data for each individual shard but do not write any blobs outside of shard directories for
|
* <p>The data-nodes on the other hand, write the data for each individual shard but do not write any blobs outside of shard directories for
|
||||||
* shards that they hold the primary of. For each shard, the data-node holding the shard's primary writes the actual data in form of
|
* shards that they hold the primary of. For each shard, the data-node holding the shard's primary writes the actual data in form of
|
||||||
|
|
|
@ -163,7 +163,7 @@ public class AddVotingConfigExclusionsRequestTests extends OpenSearchTestCase {
|
||||||
IllegalArgumentException.class,
|
IllegalArgumentException.class,
|
||||||
() -> makeRequestWithNodeDescriptions("not-a-node").resolveVotingConfigExclusions(clusterState)
|
() -> makeRequestWithNodeDescriptions("not-a-node").resolveVotingConfigExclusions(clusterState)
|
||||||
).getMessage(),
|
).getMessage(),
|
||||||
equalTo("add voting config exclusions request for [not-a-node] matched no master-eligible nodes")
|
equalTo("add voting config exclusions request for [not-a-node] matched no cluster-manager-eligible nodes")
|
||||||
);
|
);
|
||||||
assertWarnings(AddVotingConfigExclusionsRequest.DEPRECATION_MESSAGE);
|
assertWarnings(AddVotingConfigExclusionsRequest.DEPRECATION_MESSAGE);
|
||||||
}
|
}
|
||||||
|
|
|
@ -344,7 +344,7 @@ public class TransportAddVotingConfigExclusionsActionTests extends OpenSearchTes
|
||||||
assertThat(rootCause, instanceOf(IllegalArgumentException.class));
|
assertThat(rootCause, instanceOf(IllegalArgumentException.class));
|
||||||
assertThat(
|
assertThat(
|
||||||
rootCause.getMessage(),
|
rootCause.getMessage(),
|
||||||
equalTo("add voting config exclusions request for [not-a-node] matched no master-eligible nodes")
|
equalTo("add voting config exclusions request for [not-a-node] matched no cluster-manager-eligible nodes")
|
||||||
);
|
);
|
||||||
assertWarnings(AddVotingConfigExclusionsRequest.DEPRECATION_MESSAGE);
|
assertWarnings(AddVotingConfigExclusionsRequest.DEPRECATION_MESSAGE);
|
||||||
}
|
}
|
||||||
|
@ -368,7 +368,7 @@ public class TransportAddVotingConfigExclusionsActionTests extends OpenSearchTes
|
||||||
assertThat(rootCause, instanceOf(IllegalArgumentException.class));
|
assertThat(rootCause, instanceOf(IllegalArgumentException.class));
|
||||||
assertThat(
|
assertThat(
|
||||||
rootCause.getMessage(),
|
rootCause.getMessage(),
|
||||||
equalTo("add voting config exclusions request for [_all, master:false] matched no master-eligible nodes")
|
equalTo("add voting config exclusions request for [_all, master:false] matched no cluster-manager-eligible nodes")
|
||||||
);
|
);
|
||||||
assertWarnings(AddVotingConfigExclusionsRequest.DEPRECATION_MESSAGE);
|
assertWarnings(AddVotingConfigExclusionsRequest.DEPRECATION_MESSAGE);
|
||||||
}
|
}
|
||||||
|
|
|
@ -705,7 +705,7 @@ public class ClusterBootstrapServiceTests extends OpenSearchTestCase {
|
||||||
IllegalArgumentException.class,
|
IllegalArgumentException.class,
|
||||||
() -> new ClusterBootstrapService(settings.build(), transportService, () -> emptyList(), () -> false, vc -> fail())
|
() -> new ClusterBootstrapService(settings.build(), transportService, () -> emptyList(), () -> false, vc -> fail())
|
||||||
).getMessage(),
|
).getMessage(),
|
||||||
containsString("node with [discovery.type] set to [single-node] must be master-eligible")
|
containsString("node with [discovery.type] set to [single-node] must be cluster-manager-eligible")
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -191,7 +191,7 @@ public class ClusterFormationFailureHelperTests extends OpenSearchTestCase {
|
||||||
new StatusInfo(HEALTHY, "healthy-info")
|
new StatusInfo(HEALTHY, "healthy-info")
|
||||||
).getDescription(),
|
).getDescription(),
|
||||||
is(
|
is(
|
||||||
"master not discovered yet: have discovered []; discovery will continue using [] from hosts providers "
|
"cluster-manager not discovered yet: have discovered []; discovery will continue using [] from hosts providers "
|
||||||
+ "and [] from last-known cluster state; node term 15, last-accepted version 12 in term 4"
|
+ "and [] from last-known cluster state; node term 15, last-accepted version 12 in term 4"
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
|
@ -208,7 +208,7 @@ public class ClusterFormationFailureHelperTests extends OpenSearchTestCase {
|
||||||
new StatusInfo(HEALTHY, "healthy-info")
|
new StatusInfo(HEALTHY, "healthy-info")
|
||||||
).getDescription(),
|
).getDescription(),
|
||||||
is(
|
is(
|
||||||
"master not discovered yet: have discovered []; discovery will continue using ["
|
"cluster-manager not discovered yet: have discovered []; discovery will continue using ["
|
||||||
+ otherAddress
|
+ otherAddress
|
||||||
+ "] from hosts providers and [] from last-known cluster state; node term 16, last-accepted version 12 in term 4"
|
+ "] from hosts providers and [] from last-known cluster state; node term 16, last-accepted version 12 in term 4"
|
||||||
)
|
)
|
||||||
|
@ -226,7 +226,7 @@ public class ClusterFormationFailureHelperTests extends OpenSearchTestCase {
|
||||||
new StatusInfo(HEALTHY, "healthy-info")
|
new StatusInfo(HEALTHY, "healthy-info")
|
||||||
).getDescription(),
|
).getDescription(),
|
||||||
is(
|
is(
|
||||||
"master not discovered yet: have discovered ["
|
"cluster-manager not discovered yet: have discovered ["
|
||||||
+ otherNode
|
+ otherNode
|
||||||
+ "]; discovery will continue using [] from hosts providers "
|
+ "]; discovery will continue using [] from hosts providers "
|
||||||
+ "and [] from last-known cluster state; node term 17, last-accepted version 12 in term 4"
|
+ "and [] from last-known cluster state; node term 17, last-accepted version 12 in term 4"
|
||||||
|
@ -257,7 +257,7 @@ public class ClusterFormationFailureHelperTests extends OpenSearchTestCase {
|
||||||
new StatusInfo(HEALTHY, "healthy-info")
|
new StatusInfo(HEALTHY, "healthy-info")
|
||||||
).getDescription(),
|
).getDescription(),
|
||||||
is(
|
is(
|
||||||
"master not discovered yet: have discovered []; discovery will continue using [] from hosts providers "
|
"cluster-manager not discovered yet: have discovered []; discovery will continue using [] from hosts providers "
|
||||||
+ "and [] from last-known cluster state; node term 15, last-accepted version 42 in term 0"
|
+ "and [] from last-known cluster state; node term 15, last-accepted version 42 in term 0"
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
|
@ -328,7 +328,7 @@ public class ClusterFormationFailureHelperTests extends OpenSearchTestCase {
|
||||||
new StatusInfo(HEALTHY, "healthy-info")
|
new StatusInfo(HEALTHY, "healthy-info")
|
||||||
).getDescription(),
|
).getDescription(),
|
||||||
is(
|
is(
|
||||||
"master not discovered yet, this node has not previously joined a bootstrapped cluster, and "
|
"cluster-manager not discovered yet, this node has not previously joined a bootstrapped cluster, and "
|
||||||
+ "[cluster.initial_cluster_manager_nodes] is empty on this node: have discovered []; "
|
+ "[cluster.initial_cluster_manager_nodes] is empty on this node: have discovered []; "
|
||||||
+ "discovery will continue using [] from hosts providers and ["
|
+ "discovery will continue using [] from hosts providers and ["
|
||||||
+ localNode
|
+ localNode
|
||||||
|
@ -348,7 +348,7 @@ public class ClusterFormationFailureHelperTests extends OpenSearchTestCase {
|
||||||
new StatusInfo(HEALTHY, "healthy-info")
|
new StatusInfo(HEALTHY, "healthy-info")
|
||||||
).getDescription(),
|
).getDescription(),
|
||||||
is(
|
is(
|
||||||
"master not discovered yet, this node has not previously joined a bootstrapped cluster, and "
|
"cluster-manager not discovered yet, this node has not previously joined a bootstrapped cluster, and "
|
||||||
+ "[cluster.initial_cluster_manager_nodes] is empty on this node: have discovered []; "
|
+ "[cluster.initial_cluster_manager_nodes] is empty on this node: have discovered []; "
|
||||||
+ "discovery will continue using ["
|
+ "discovery will continue using ["
|
||||||
+ otherAddress
|
+ otherAddress
|
||||||
|
@ -370,7 +370,7 @@ public class ClusterFormationFailureHelperTests extends OpenSearchTestCase {
|
||||||
new StatusInfo(HEALTHY, "healthy-info")
|
new StatusInfo(HEALTHY, "healthy-info")
|
||||||
).getDescription(),
|
).getDescription(),
|
||||||
is(
|
is(
|
||||||
"master not discovered yet, this node has not previously joined a bootstrapped cluster, and "
|
"cluster-manager not discovered yet, this node has not previously joined a bootstrapped cluster, and "
|
||||||
+ "[cluster.initial_cluster_manager_nodes] is empty on this node: have discovered ["
|
+ "[cluster.initial_cluster_manager_nodes] is empty on this node: have discovered ["
|
||||||
+ otherNode
|
+ otherNode
|
||||||
+ "]; "
|
+ "]; "
|
||||||
|
@ -391,8 +391,8 @@ public class ClusterFormationFailureHelperTests extends OpenSearchTestCase {
|
||||||
new StatusInfo(HEALTHY, "healthy-info")
|
new StatusInfo(HEALTHY, "healthy-info")
|
||||||
).getDescription(),
|
).getDescription(),
|
||||||
is(
|
is(
|
||||||
"master not discovered yet, this node has not previously joined a bootstrapped cluster, and "
|
"cluster-manager not discovered yet, this node has not previously joined a bootstrapped cluster, and "
|
||||||
+ "this node must discover master-eligible nodes [other] to bootstrap a cluster: have discovered []; "
|
+ "this node must discover cluster-manager-eligible nodes [other] to bootstrap a cluster: have discovered []; "
|
||||||
+ "discovery will continue using [] from hosts providers and ["
|
+ "discovery will continue using [] from hosts providers and ["
|
||||||
+ localNode
|
+ localNode
|
||||||
+ "] from last-known cluster state; node term 4, last-accepted version 7 in term 4"
|
+ "] from last-known cluster state; node term 4, last-accepted version 7 in term 4"
|
||||||
|
@ -442,7 +442,7 @@ public class ClusterFormationFailureHelperTests extends OpenSearchTestCase {
|
||||||
new StatusInfo(HEALTHY, "healthy-info")
|
new StatusInfo(HEALTHY, "healthy-info")
|
||||||
).getDescription(),
|
).getDescription(),
|
||||||
is(
|
is(
|
||||||
"master not discovered yet and this node was detached from its previous cluster, "
|
"cluster-manager not discovered yet and this node was detached from its previous cluster, "
|
||||||
+ "have discovered []; "
|
+ "have discovered []; "
|
||||||
+ "discovery will continue using [] from hosts providers and ["
|
+ "discovery will continue using [] from hosts providers and ["
|
||||||
+ localNode
|
+ localNode
|
||||||
|
@ -462,7 +462,7 @@ public class ClusterFormationFailureHelperTests extends OpenSearchTestCase {
|
||||||
new StatusInfo(HEALTHY, "healthy-info")
|
new StatusInfo(HEALTHY, "healthy-info")
|
||||||
).getDescription(),
|
).getDescription(),
|
||||||
is(
|
is(
|
||||||
"master not discovered yet and this node was detached from its previous cluster, "
|
"cluster-manager not discovered yet and this node was detached from its previous cluster, "
|
||||||
+ "have discovered []; "
|
+ "have discovered []; "
|
||||||
+ "discovery will continue using ["
|
+ "discovery will continue using ["
|
||||||
+ otherAddress
|
+ otherAddress
|
||||||
|
@ -484,7 +484,7 @@ public class ClusterFormationFailureHelperTests extends OpenSearchTestCase {
|
||||||
new StatusInfo(HEALTHY, "healthy-info")
|
new StatusInfo(HEALTHY, "healthy-info")
|
||||||
).getDescription(),
|
).getDescription(),
|
||||||
is(
|
is(
|
||||||
"master not discovered yet and this node was detached from its previous cluster, "
|
"cluster-manager not discovered yet and this node was detached from its previous cluster, "
|
||||||
+ "have discovered ["
|
+ "have discovered ["
|
||||||
+ otherNode
|
+ otherNode
|
||||||
+ "]; "
|
+ "]; "
|
||||||
|
@ -506,7 +506,7 @@ public class ClusterFormationFailureHelperTests extends OpenSearchTestCase {
|
||||||
new StatusInfo(HEALTHY, "healthy-info")
|
new StatusInfo(HEALTHY, "healthy-info")
|
||||||
).getDescription(),
|
).getDescription(),
|
||||||
is(
|
is(
|
||||||
"master not discovered yet and this node was detached from its previous cluster, "
|
"cluster-manager not discovered yet and this node was detached from its previous cluster, "
|
||||||
+ "have discovered ["
|
+ "have discovered ["
|
||||||
+ yetAnotherNode
|
+ yetAnotherNode
|
||||||
+ "]; "
|
+ "]; "
|
||||||
|
@ -534,7 +534,7 @@ public class ClusterFormationFailureHelperTests extends OpenSearchTestCase {
|
||||||
new StatusInfo(HEALTHY, "healthy-info")
|
new StatusInfo(HEALTHY, "healthy-info")
|
||||||
).getDescription(),
|
).getDescription(),
|
||||||
is(
|
is(
|
||||||
"master not discovered or elected yet, an election requires a node with id [otherNode], "
|
"cluster-manager not discovered or elected yet, an election requires a node with id [otherNode], "
|
||||||
+ "have discovered [] which is not a quorum; "
|
+ "have discovered [] which is not a quorum; "
|
||||||
+ "discovery will continue using [] from hosts providers and ["
|
+ "discovery will continue using [] from hosts providers and ["
|
||||||
+ localNode
|
+ localNode
|
||||||
|
@ -554,7 +554,7 @@ public class ClusterFormationFailureHelperTests extends OpenSearchTestCase {
|
||||||
new StatusInfo(HEALTHY, "healthy-info")
|
new StatusInfo(HEALTHY, "healthy-info")
|
||||||
).getDescription(),
|
).getDescription(),
|
||||||
is(
|
is(
|
||||||
"master not discovered or elected yet, an election requires a node with id [otherNode], "
|
"cluster-manager not discovered or elected yet, an election requires a node with id [otherNode], "
|
||||||
+ "have discovered [] which is not a quorum; "
|
+ "have discovered [] which is not a quorum; "
|
||||||
+ "discovery will continue using ["
|
+ "discovery will continue using ["
|
||||||
+ otherAddress
|
+ otherAddress
|
||||||
|
@ -576,7 +576,7 @@ public class ClusterFormationFailureHelperTests extends OpenSearchTestCase {
|
||||||
new StatusInfo(HEALTHY, "healthy-info")
|
new StatusInfo(HEALTHY, "healthy-info")
|
||||||
).getDescription(),
|
).getDescription(),
|
||||||
is(
|
is(
|
||||||
"master not discovered or elected yet, an election requires a node with id [otherNode], "
|
"cluster-manager not discovered or elected yet, an election requires a node with id [otherNode], "
|
||||||
+ "have discovered ["
|
+ "have discovered ["
|
||||||
+ otherNode
|
+ otherNode
|
||||||
+ "] which is a quorum; "
|
+ "] which is a quorum; "
|
||||||
|
@ -598,7 +598,7 @@ public class ClusterFormationFailureHelperTests extends OpenSearchTestCase {
|
||||||
new StatusInfo(HEALTHY, "healthy-info")
|
new StatusInfo(HEALTHY, "healthy-info")
|
||||||
).getDescription(),
|
).getDescription(),
|
||||||
is(
|
is(
|
||||||
"master not discovered or elected yet, an election requires a node with id [otherNode], "
|
"cluster-manager not discovered or elected yet, an election requires a node with id [otherNode], "
|
||||||
+ "have discovered ["
|
+ "have discovered ["
|
||||||
+ yetAnotherNode
|
+ yetAnotherNode
|
||||||
+ "] which is not a quorum; "
|
+ "] which is not a quorum; "
|
||||||
|
@ -619,7 +619,7 @@ public class ClusterFormationFailureHelperTests extends OpenSearchTestCase {
|
||||||
new StatusInfo(HEALTHY, "healthy-info")
|
new StatusInfo(HEALTHY, "healthy-info")
|
||||||
).getDescription(),
|
).getDescription(),
|
||||||
is(
|
is(
|
||||||
"master not discovered or elected yet, an election requires two nodes with ids [n1, n2], "
|
"cluster-manager not discovered or elected yet, an election requires two nodes with ids [n1, n2], "
|
||||||
+ "have discovered [] which is not a quorum; "
|
+ "have discovered [] which is not a quorum; "
|
||||||
+ "discovery will continue using [] from hosts providers and ["
|
+ "discovery will continue using [] from hosts providers and ["
|
||||||
+ localNode
|
+ localNode
|
||||||
|
@ -638,7 +638,7 @@ public class ClusterFormationFailureHelperTests extends OpenSearchTestCase {
|
||||||
new StatusInfo(HEALTHY, "healthy-info")
|
new StatusInfo(HEALTHY, "healthy-info")
|
||||||
).getDescription(),
|
).getDescription(),
|
||||||
is(
|
is(
|
||||||
"master not discovered or elected yet, an election requires at least 2 nodes with ids from [n1, n2, n3], "
|
"cluster-manager not discovered or elected yet, an election requires at least 2 nodes with ids from [n1, n2, n3], "
|
||||||
+ "have discovered [] which is not a quorum; "
|
+ "have discovered [] which is not a quorum; "
|
||||||
+ "discovery will continue using [] from hosts providers and ["
|
+ "discovery will continue using [] from hosts providers and ["
|
||||||
+ localNode
|
+ localNode
|
||||||
|
@ -657,7 +657,7 @@ public class ClusterFormationFailureHelperTests extends OpenSearchTestCase {
|
||||||
new StatusInfo(HEALTHY, "healthy-info")
|
new StatusInfo(HEALTHY, "healthy-info")
|
||||||
).getDescription(),
|
).getDescription(),
|
||||||
is(
|
is(
|
||||||
"master not discovered or elected yet, an election requires 2 nodes with ids [n1, n2], "
|
"cluster-manager not discovered or elected yet, an election requires 2 nodes with ids [n1, n2], "
|
||||||
+ "have discovered [] which is not a quorum; "
|
+ "have discovered [] which is not a quorum; "
|
||||||
+ "discovery will continue using [] from hosts providers and ["
|
+ "discovery will continue using [] from hosts providers and ["
|
||||||
+ localNode
|
+ localNode
|
||||||
|
@ -676,7 +676,7 @@ public class ClusterFormationFailureHelperTests extends OpenSearchTestCase {
|
||||||
new StatusInfo(HEALTHY, "healthy-info")
|
new StatusInfo(HEALTHY, "healthy-info")
|
||||||
).getDescription(),
|
).getDescription(),
|
||||||
is(
|
is(
|
||||||
"master not discovered or elected yet, an election requires at least 3 nodes with ids from [n1, n2, n3, n4], "
|
"cluster-manager not discovered or elected yet, an election requires at least 3 nodes with ids from [n1, n2, n3, n4], "
|
||||||
+ "have discovered [] which is not a quorum; "
|
+ "have discovered [] which is not a quorum; "
|
||||||
+ "discovery will continue using [] from hosts providers and ["
|
+ "discovery will continue using [] from hosts providers and ["
|
||||||
+ localNode
|
+ localNode
|
||||||
|
@ -695,7 +695,7 @@ public class ClusterFormationFailureHelperTests extends OpenSearchTestCase {
|
||||||
new StatusInfo(HEALTHY, "healthy-info")
|
new StatusInfo(HEALTHY, "healthy-info")
|
||||||
).getDescription(),
|
).getDescription(),
|
||||||
is(
|
is(
|
||||||
"master not discovered or elected yet, an election requires at least 3 nodes with ids from [n1, n2, n3, n4, n5], "
|
"cluster-manager not discovered or elected yet, an election requires at least 3 nodes with ids from [n1, n2, n3, n4, n5], "
|
||||||
+ "have discovered [] which is not a quorum; "
|
+ "have discovered [] which is not a quorum; "
|
||||||
+ "discovery will continue using [] from hosts providers and ["
|
+ "discovery will continue using [] from hosts providers and ["
|
||||||
+ localNode
|
+ localNode
|
||||||
|
@ -714,7 +714,7 @@ public class ClusterFormationFailureHelperTests extends OpenSearchTestCase {
|
||||||
new StatusInfo(HEALTHY, "healthy-info")
|
new StatusInfo(HEALTHY, "healthy-info")
|
||||||
).getDescription(),
|
).getDescription(),
|
||||||
is(
|
is(
|
||||||
"master not discovered or elected yet, an election requires at least 3 nodes with ids from [n1, n2, n3, n4], "
|
"cluster-manager not discovered or elected yet, an election requires at least 3 nodes with ids from [n1, n2, n3, n4], "
|
||||||
+ "have discovered [] which is not a quorum; "
|
+ "have discovered [] which is not a quorum; "
|
||||||
+ "discovery will continue using [] from hosts providers and ["
|
+ "discovery will continue using [] from hosts providers and ["
|
||||||
+ localNode
|
+ localNode
|
||||||
|
@ -733,7 +733,7 @@ public class ClusterFormationFailureHelperTests extends OpenSearchTestCase {
|
||||||
new StatusInfo(HEALTHY, "healthy-info")
|
new StatusInfo(HEALTHY, "healthy-info")
|
||||||
).getDescription(),
|
).getDescription(),
|
||||||
is(
|
is(
|
||||||
"master not discovered or elected yet, an election requires 3 nodes with ids [n1, n2, n3], "
|
"cluster-manager not discovered or elected yet, an election requires 3 nodes with ids [n1, n2, n3], "
|
||||||
+ "have discovered [] which is not a quorum; "
|
+ "have discovered [] which is not a quorum; "
|
||||||
+ "discovery will continue using [] from hosts providers and ["
|
+ "discovery will continue using [] from hosts providers and ["
|
||||||
+ localNode
|
+ localNode
|
||||||
|
@ -752,7 +752,7 @@ public class ClusterFormationFailureHelperTests extends OpenSearchTestCase {
|
||||||
new StatusInfo(HEALTHY, "healthy-info")
|
new StatusInfo(HEALTHY, "healthy-info")
|
||||||
).getDescription(),
|
).getDescription(),
|
||||||
is(
|
is(
|
||||||
"master not discovered or elected yet, an election requires a node with id [n1], "
|
"cluster-manager not discovered or elected yet, an election requires a node with id [n1], "
|
||||||
+ "have discovered [] which is not a quorum; "
|
+ "have discovered [] which is not a quorum; "
|
||||||
+ "discovery will continue using [] from hosts providers and ["
|
+ "discovery will continue using [] from hosts providers and ["
|
||||||
+ localNode
|
+ localNode
|
||||||
|
@ -771,7 +771,7 @@ public class ClusterFormationFailureHelperTests extends OpenSearchTestCase {
|
||||||
new StatusInfo(HEALTHY, "healthy-info")
|
new StatusInfo(HEALTHY, "healthy-info")
|
||||||
).getDescription(),
|
).getDescription(),
|
||||||
is(
|
is(
|
||||||
"master not discovered or elected yet, an election requires a node with id [n1] and a node with id [n2], "
|
"cluster-manager not discovered or elected yet, an election requires a node with id [n1] and a node with id [n2], "
|
||||||
+ "have discovered [] which is not a quorum; "
|
+ "have discovered [] which is not a quorum; "
|
||||||
+ "discovery will continue using [] from hosts providers and ["
|
+ "discovery will continue using [] from hosts providers and ["
|
||||||
+ localNode
|
+ localNode
|
||||||
|
@ -790,7 +790,7 @@ public class ClusterFormationFailureHelperTests extends OpenSearchTestCase {
|
||||||
new StatusInfo(HEALTHY, "healthy-info")
|
new StatusInfo(HEALTHY, "healthy-info")
|
||||||
).getDescription(),
|
).getDescription(),
|
||||||
is(
|
is(
|
||||||
"master not discovered or elected yet, an election requires a node with id [n1] and two nodes with ids [n2, n3], "
|
"cluster-manager not discovered or elected yet, an election requires a node with id [n1] and two nodes with ids [n2, n3], "
|
||||||
+ "have discovered [] which is not a quorum; "
|
+ "have discovered [] which is not a quorum; "
|
||||||
+ "discovery will continue using [] from hosts providers and ["
|
+ "discovery will continue using [] from hosts providers and ["
|
||||||
+ localNode
|
+ localNode
|
||||||
|
@ -809,7 +809,7 @@ public class ClusterFormationFailureHelperTests extends OpenSearchTestCase {
|
||||||
new StatusInfo(HEALTHY, "healthy-info")
|
new StatusInfo(HEALTHY, "healthy-info")
|
||||||
).getDescription(),
|
).getDescription(),
|
||||||
is(
|
is(
|
||||||
"master not discovered or elected yet, an election requires a node with id [n1] and "
|
"cluster-manager not discovered or elected yet, an election requires a node with id [n1] and "
|
||||||
+ "at least 2 nodes with ids from [n2, n3, n4], "
|
+ "at least 2 nodes with ids from [n2, n3, n4], "
|
||||||
+ "have discovered [] which is not a quorum; "
|
+ "have discovered [] which is not a quorum; "
|
||||||
+ "discovery will continue using [] from hosts providers and ["
|
+ "discovery will continue using [] from hosts providers and ["
|
||||||
|
@ -859,7 +859,7 @@ public class ClusterFormationFailureHelperTests extends OpenSearchTestCase {
|
||||||
// nodes from last-known cluster state could be in either order
|
// nodes from last-known cluster state could be in either order
|
||||||
is(
|
is(
|
||||||
oneOf(
|
oneOf(
|
||||||
"master not discovered or elected yet, an election requires two nodes with ids [n1, n2], "
|
"cluster-manager not discovered or elected yet, an election requires two nodes with ids [n1, n2], "
|
||||||
+ "have discovered [] which is not a quorum; "
|
+ "have discovered [] which is not a quorum; "
|
||||||
+ "discovery will continue using [] from hosts providers and ["
|
+ "discovery will continue using [] from hosts providers and ["
|
||||||
+ localNode
|
+ localNode
|
||||||
|
@ -867,7 +867,7 @@ public class ClusterFormationFailureHelperTests extends OpenSearchTestCase {
|
||||||
+ otherMasterNode
|
+ otherMasterNode
|
||||||
+ "] from last-known cluster state; node term 0, last-accepted version 0 in term 0",
|
+ "] from last-known cluster state; node term 0, last-accepted version 0 in term 0",
|
||||||
|
|
||||||
"master not discovered or elected yet, an election requires two nodes with ids [n1, n2], "
|
"cluster-manager not discovered or elected yet, an election requires two nodes with ids [n1, n2], "
|
||||||
+ "have discovered [] which is not a quorum; "
|
+ "have discovered [] which is not a quorum; "
|
||||||
+ "discovery will continue using [] from hosts providers and ["
|
+ "discovery will continue using [] from hosts providers and ["
|
||||||
+ otherMasterNode
|
+ otherMasterNode
|
||||||
|
@ -889,8 +889,8 @@ public class ClusterFormationFailureHelperTests extends OpenSearchTestCase {
|
||||||
new StatusInfo(HEALTHY, "healthy-info")
|
new StatusInfo(HEALTHY, "healthy-info")
|
||||||
).getDescription(),
|
).getDescription(),
|
||||||
is(
|
is(
|
||||||
"master not discovered or elected yet, an election requires one or more nodes that have already participated as "
|
"cluster-manager not discovered or elected yet, an election requires one or more nodes that have already participated as "
|
||||||
+ "master-eligible nodes in the cluster but this node was not master-eligible the last time it joined the cluster, "
|
+ "cluster-manager-eligible nodes in the cluster but this node was not cluster-manager-eligible the last time it joined the cluster, "
|
||||||
+ "have discovered [] which is not a quorum; "
|
+ "have discovered [] which is not a quorum; "
|
||||||
+ "discovery will continue using [] from hosts providers and ["
|
+ "discovery will continue using [] from hosts providers and ["
|
||||||
+ localNode
|
+ localNode
|
||||||
|
|
|
@ -109,7 +109,7 @@ public class CoordinatorTests extends AbstractCoordinatorTestCase {
|
||||||
/**
|
/**
|
||||||
* This test was added to verify that state recovery is properly reset on a node after it has become master and successfully
|
* This test was added to verify that state recovery is properly reset on a node after it has become master and successfully
|
||||||
* recovered a state (see {@link GatewayService}). The situation which triggers this with a decent likelihood is as follows:
|
* recovered a state (see {@link GatewayService}). The situation which triggers this with a decent likelihood is as follows:
|
||||||
* 3 master-eligible nodes (leader, follower1, follower2), the followers are shut down (leader remains), when followers come back
|
* 3 cluster-manager-eligible nodes (leader, follower1, follower2), the followers are shut down (leader remains), when followers come back
|
||||||
* one of them becomes leader and publishes first state (with STATE_NOT_RECOVERED_BLOCK) to old leader, which accepts it.
|
* one of them becomes leader and publishes first state (with STATE_NOT_RECOVERED_BLOCK) to old leader, which accepts it.
|
||||||
* Old leader is initiating an election at the same time, and wins election. It becomes leader again, but as it previously
|
* Old leader is initiating an election at the same time, and wins election. It becomes leader again, but as it previously
|
||||||
* successfully completed state recovery, is never reset to a state where state recovery can be retried.
|
* successfully completed state recovery, is never reset to a state where state recovery can be retried.
|
||||||
|
@ -1558,7 +1558,9 @@ public class CoordinatorTests extends AbstractCoordinatorTestCase {
|
||||||
final String message = event.getMessage().getFormattedMessage();
|
final String message = event.getMessage().getFormattedMessage();
|
||||||
assertThat(
|
assertThat(
|
||||||
message,
|
message,
|
||||||
startsWith("master not discovered or elected yet, an election requires at least 2 nodes with ids from [")
|
startsWith(
|
||||||
|
"cluster-manager not discovered or elected yet, an election requires at least 2 nodes with ids from ["
|
||||||
|
)
|
||||||
);
|
);
|
||||||
|
|
||||||
final List<ClusterNode> matchingNodes = cluster.clusterNodes.stream()
|
final List<ClusterNode> matchingNodes = cluster.clusterNodes.stream()
|
||||||
|
@ -1729,7 +1731,7 @@ public class CoordinatorTests extends AbstractCoordinatorTestCase {
|
||||||
|
|
||||||
if (cluster.clusterNodes.stream().filter(n -> n.getLocalNode().isMasterNode()).count() == 2) {
|
if (cluster.clusterNodes.stream().filter(n -> n.getLocalNode().isMasterNode()).count() == 2) {
|
||||||
// in the 2-node case, auto-shrinking the voting configuration is required to reduce the voting configuration down to just
|
// in the 2-node case, auto-shrinking the voting configuration is required to reduce the voting configuration down to just
|
||||||
// the leader, otherwise restarting the other master-eligible node triggers an election
|
// the leader, otherwise restarting the other cluster-manager-eligible node triggers an election
|
||||||
leader.submitSetAutoShrinkVotingConfiguration(true);
|
leader.submitSetAutoShrinkVotingConfiguration(true);
|
||||||
cluster.stabilise(2 * DEFAULT_CLUSTER_STATE_UPDATE_DELAY); // 1st delay for the setting update, 2nd for the reconfiguration
|
cluster.stabilise(2 * DEFAULT_CLUSTER_STATE_UPDATE_DELAY); // 1st delay for the setting update, 2nd for the reconfiguration
|
||||||
}
|
}
|
||||||
|
|
|
@ -167,7 +167,7 @@ public abstract class AbstractDisruptionTestCase extends OpenSearchIntegTestCase
|
||||||
assertBusy(() -> {
|
assertBusy(() -> {
|
||||||
ClusterState state = getNodeClusterState(node);
|
ClusterState state = getNodeClusterState(node);
|
||||||
final DiscoveryNodes nodes = state.nodes();
|
final DiscoveryNodes nodes = state.nodes();
|
||||||
assertNull("node [" + node + "] still has [" + nodes.getMasterNode() + "] as master", nodes.getMasterNode());
|
assertNull("node [" + node + "] still has [" + nodes.getMasterNode() + "] as cluster-manager", nodes.getMasterNode());
|
||||||
if (expectedBlocks != null) {
|
if (expectedBlocks != null) {
|
||||||
for (ClusterBlockLevel level : expectedBlocks.levels()) {
|
for (ClusterBlockLevel level : expectedBlocks.levels()) {
|
||||||
assertTrue(
|
assertTrue(
|
||||||
|
|
|
@ -321,7 +321,7 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
|
||||||
);
|
);
|
||||||
|
|
||||||
logger.info(
|
logger.info(
|
||||||
"--> creating cluster of {} nodes (master-eligible nodes: {}) with initial configuration {}",
|
"--> creating cluster of {} nodes (cluster-manager-eligible nodes: {}) with initial configuration {}",
|
||||||
initialNodeCount,
|
initialNodeCount,
|
||||||
masterEligibleNodeIds,
|
masterEligibleNodeIds,
|
||||||
initialConfiguration
|
initialConfiguration
|
||||||
|
|
|
@ -1121,7 +1121,7 @@ public final class InternalTestCluster extends TestCluster {
|
||||||
}
|
}
|
||||||
|
|
||||||
assertTrue(
|
assertTrue(
|
||||||
"expected at least one master-eligible node left in " + nodes,
|
"expected at least one cluster-manager-eligible node left in " + nodes,
|
||||||
nodes.isEmpty() || nodes.values().stream().anyMatch(NodeAndClient::isMasterEligible)
|
nodes.isEmpty() || nodes.values().stream().anyMatch(NodeAndClient::isMasterEligible)
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -1848,7 +1848,8 @@ public final class InternalTestCluster extends TestCluster {
|
||||||
publishNode(nodeAndClient);
|
publishNode(nodeAndClient);
|
||||||
|
|
||||||
if (callback.validateClusterForming() || excludedNodeIds.isEmpty() == false) {
|
if (callback.validateClusterForming() || excludedNodeIds.isEmpty() == false) {
|
||||||
// we have to validate cluster size to ensure that the restarted node has rejoined the cluster if it was master-eligible;
|
// we have to validate cluster size to ensure that the restarted node has rejoined the cluster if it was
|
||||||
|
// cluster-manager-eligible;
|
||||||
validateClusterFormed();
|
validateClusterFormed();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1999,7 +2000,7 @@ public final class InternalTestCluster extends TestCluster {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Performs cluster bootstrap when node with index {@link #bootstrapMasterNodeIndex} is started
|
* Performs cluster bootstrap when node with index {@link #bootstrapMasterNodeIndex} is started
|
||||||
* with the names of all existing and new master-eligible nodes.
|
* with the names of all existing and new cluster-manager-eligible nodes.
|
||||||
* Indexing starts from 0.
|
* Indexing starts from 0.
|
||||||
* If {@link #bootstrapMasterNodeIndex} is -1 (default), this method does nothing.
|
* If {@link #bootstrapMasterNodeIndex} is -1 (default), this method does nothing.
|
||||||
*/
|
*/
|
||||||
|
|
Loading…
Reference in New Issue