[7.x] Adjust randomization in cluster shard limit tests (#47254)

This commit adjusts randomization for the cluster shard limit tests so
that there is often more of a gap left between the limit and the size of
the first index. This allows the same randomization to be used for all
tests, and alleviates flakiness in
`testIndexCreationOverLimitFromTemplate`.
This commit is contained in:
Gordon Brown 2019-10-01 14:53:10 -06:00 committed by GitHub
parent 99b25d3740
commit ba6ee2d40d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 11 additions and 20 deletions

View File

@ -474,7 +474,7 @@ public class MetaDataCreateIndexServiceTests extends ESTestCase {
}
public void testShardLimit() {
int nodesInCluster = randomIntBetween(2,100);
int nodesInCluster = randomIntBetween(2,90);
ClusterShardLimitIT.ShardCounts counts = forDataNodeCount(nodesInCluster);
Settings clusterSettings = Settings.builder()
.put(MetaData.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), counts.getShardsPerNode())

View File

@ -334,7 +334,7 @@ public class MetaDataIndexStateServiceTests extends ESTestCase {
}
public void testValidateShardLimit() {
int nodesInCluster = randomIntBetween(2,100);
int nodesInCluster = randomIntBetween(2,90);
ClusterShardLimitIT.ShardCounts counts = forDataNodeCount(nodesInCluster);
Settings clusterSettings = Settings.builder()
.put(MetaData.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), counts.getShardsPerNode())

View File

@ -45,6 +45,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.lessThanOrEqualTo;
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST)
public class ClusterShardLimitIT extends ESIntegTestCase {
@ -102,24 +103,11 @@ public class ClusterShardLimitIT extends ESIntegTestCase {
assertFalse(clusterState.getMetaData().hasIndex("should-fail"));
}
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/47107")
public void testIndexCreationOverLimitFromTemplate() {
int dataNodes = client().admin().cluster().prepareState().get().getState().getNodes().getDataNodes().size();
final ShardCounts counts;
{
final ShardCounts temporaryCounts = ShardCounts.forDataNodeCount(dataNodes);
/*
* We are going to create an index that will bring us up to one below the limit; we go one below the limit to ensure the
* template is used instead of one shard.
*/
counts = new ShardCounts(
temporaryCounts.shardsPerNode,
temporaryCounts.firstIndexShards - 1,
temporaryCounts.firstIndexReplicas,
temporaryCounts.failingIndexShards + 1,
temporaryCounts.failingIndexReplicas);
}
final ShardCounts counts = ShardCounts.forDataNodeCount(dataNodes);
setShardsPerNode(counts.getShardsPerNode());
if (counts.firstIndexShards > 0) {
@ -401,10 +389,13 @@ public class ClusterShardLimitIT extends ESIntegTestCase {
}
public static ShardCounts forDataNodeCount(int dataNodes) {
assertThat("this method will not work reliably with this many data nodes due to the limit of shards in a single index," +
"use fewer data nodes or multiple indices", dataNodes, lessThanOrEqualTo(90));
int mainIndexReplicas = between(0, dataNodes - 1);
int mainIndexShards = between(1, 10);
int totalShardsInIndex = (mainIndexReplicas + 1) * mainIndexShards;
int shardsPerNode = (int) Math.ceil((double) totalShardsInIndex / dataNodes);
// Sometimes add some headroom to the limit to check that it works even if you're not already right up against the limit
int shardsPerNode = (int) Math.ceil((double) totalShardsInIndex / dataNodes) + between(0, 10);
int totalCap = shardsPerNode * dataNodes;
int failingIndexShards;

View File

@ -572,7 +572,7 @@ public class IndicesServiceTests extends ESSingleNodeTestCase {
}
public void testOverShardLimit() {
int nodesInCluster = randomIntBetween(1,100);
int nodesInCluster = randomIntBetween(1,90);
ClusterShardLimitIT.ShardCounts counts = forDataNodeCount(nodesInCluster);
Settings clusterSettings = Settings.builder()
@ -594,7 +594,7 @@ public class IndicesServiceTests extends ESSingleNodeTestCase {
}
public void testUnderShardLimit() {
int nodesInCluster = randomIntBetween(2,100);
int nodesInCluster = randomIntBetween(2,90);
// Calculate the counts for a cluster 1 node smaller than we have to ensure we have headroom
ClusterShardLimitIT.ShardCounts counts = forDataNodeCount(nodesInCluster - 1);