* Fix ShrinkIndexIT * Move this test suit to cluster scope. Currently, `testShrinkThenSplitWithFailedNode` stops a random node which randomly turns out to be the only shared master node so the cluster reset fails on account of the fact that no shared master node survived. * Closes #44164
This commit is contained in:
parent
c82d9c5b50
commit
5f22370b6b
|
@ -75,7 +75,6 @@ import static org.hamcrest.Matchers.containsString;
|
|||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
|
||||
@ESIntegTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/44164")
|
||||
public class ShrinkIndexIT extends ESIntegTestCase {
|
||||
|
||||
@Override
|
||||
|
@ -559,7 +558,8 @@ public class ShrinkIndexIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
public void testShrinkThenSplitWithFailedNode() throws Exception {
|
||||
internalCluster().ensureAtLeastNumDataNodes(3);
|
||||
internalCluster().ensureAtLeastNumDataNodes(2);
|
||||
final String shrinkNode = internalCluster().startDataOnlyNode();
|
||||
|
||||
final int shardCount = between(2, 5);
|
||||
prepareCreate("original").setSettings(Settings.builder().put(indexSettings())
|
||||
|
@ -567,8 +567,6 @@ public class ShrinkIndexIT extends ESIntegTestCase {
|
|||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, shardCount)).get();
|
||||
client().admin().indices().prepareFlush("original").get();
|
||||
ensureGreen();
|
||||
final String shrinkNode
|
||||
= client().admin().cluster().prepareNodesInfo("data:true").clear().get().getNodes().get(0).getNode().getName();
|
||||
client().admin().indices().prepareUpdateSettings("original")
|
||||
.setSettings(Settings.builder()
|
||||
.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey(), shrinkNode)
|
||||
|
|
|
@ -1632,6 +1632,13 @@ public final class InternalTestCluster extends TestCluster {
|
|||
ensureOpen();
|
||||
NodeAndClient nodeAndClient = getRandomNodeAndClient(nc -> filter.test(nc.node.settings()));
|
||||
if (nodeAndClient != null) {
|
||||
if (nodeAndClient.nodeAndClientId() < sharedNodesSeeds.length && nodeAndClient.isMasterEligible() && autoManageMasterNodes
|
||||
&& nodes.values().stream()
|
||||
.filter(NodeAndClient::isMasterEligible)
|
||||
.filter(n -> n.nodeAndClientId() < sharedNodesSeeds.length)
|
||||
.count() == 1) {
|
||||
throw new AssertionError("Tried to stop the only master eligible shared node");
|
||||
}
|
||||
logger.info("Closing filtered random node [{}] ", nodeAndClient.name);
|
||||
stopNodesAndClient(nodeAndClient);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue