Fix ShrinkIndexIT (#44214) (#44223)

* Fix ShrinkIndexIT

* Move this test suit to cluster scope. Currently, `testShrinkThenSplitWithFailedNode` stops a random node which randomly turns out to be the only shared master node so the cluster reset fails on account of the fact that no shared master node survived.
* Closes #44164
This commit is contained in:
Armin Braun 2019-07-11 17:58:00 +02:00 committed by GitHub
parent c82d9c5b50
commit 5f22370b6b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 9 additions and 4 deletions

View File

@ -75,7 +75,6 @@ import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
@ESIntegTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/44164")
public class ShrinkIndexIT extends ESIntegTestCase {
@Override
@ -559,7 +558,8 @@ public class ShrinkIndexIT extends ESIntegTestCase {
}
public void testShrinkThenSplitWithFailedNode() throws Exception {
internalCluster().ensureAtLeastNumDataNodes(3);
internalCluster().ensureAtLeastNumDataNodes(2);
final String shrinkNode = internalCluster().startDataOnlyNode();
final int shardCount = between(2, 5);
prepareCreate("original").setSettings(Settings.builder().put(indexSettings())
@ -567,8 +567,6 @@ public class ShrinkIndexIT extends ESIntegTestCase {
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, shardCount)).get();
client().admin().indices().prepareFlush("original").get();
ensureGreen();
final String shrinkNode
= client().admin().cluster().prepareNodesInfo("data:true").clear().get().getNodes().get(0).getNode().getName();
client().admin().indices().prepareUpdateSettings("original")
.setSettings(Settings.builder()
.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey(), shrinkNode)

View File

@ -1632,6 +1632,13 @@ public final class InternalTestCluster extends TestCluster {
ensureOpen();
NodeAndClient nodeAndClient = getRandomNodeAndClient(nc -> filter.test(nc.node.settings()));
if (nodeAndClient != null) {
if (nodeAndClient.nodeAndClientId() < sharedNodesSeeds.length && nodeAndClient.isMasterEligible() && autoManageMasterNodes
&& nodes.values().stream()
.filter(NodeAndClient::isMasterEligible)
.filter(n -> n.nodeAndClientId() < sharedNodesSeeds.length)
.count() == 1) {
throw new AssertionError("Tried to stop the only master eligible shared node");
}
logger.info("Closing filtered random node [{}] ", nodeAndClient.name);
stopNodesAndClient(nodeAndClient);
}