diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java index 4f969d5fb50..e5bcac3f41f 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java @@ -474,7 +474,7 @@ public class MetaDataCreateIndexServiceTests extends ESTestCase { } public void testShardLimit() { - int nodesInCluster = randomIntBetween(2,100); + int nodesInCluster = randomIntBetween(2,90); ClusterShardLimitIT.ShardCounts counts = forDataNodeCount(nodesInCluster); Settings clusterSettings = Settings.builder() .put(MetaData.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), counts.getShardsPerNode()) diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateServiceTests.java index 7b3293bde79..a7ad9d79e74 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateServiceTests.java @@ -334,7 +334,7 @@ public class MetaDataIndexStateServiceTests extends ESTestCase { } public void testValidateShardLimit() { - int nodesInCluster = randomIntBetween(2,100); + int nodesInCluster = randomIntBetween(2,90); ClusterShardLimitIT.ShardCounts counts = forDataNodeCount(nodesInCluster); Settings clusterSettings = Settings.builder() .put(MetaData.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), counts.getShardsPerNode()) diff --git a/server/src/test/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java b/server/src/test/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java index 5e9320deafa..036b381416b 100644 --- a/server/src/test/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java @@ -45,6 +45,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.lessThanOrEqualTo; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) public class ClusterShardLimitIT extends ESIntegTestCase { @@ -102,24 +103,11 @@ public class ClusterShardLimitIT extends ESIntegTestCase { assertFalse(clusterState.getMetaData().hasIndex("should-fail")); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/47107") public void testIndexCreationOverLimitFromTemplate() { int dataNodes = client().admin().cluster().prepareState().get().getState().getNodes().getDataNodes().size(); - final ShardCounts counts; - { - final ShardCounts temporaryCounts = ShardCounts.forDataNodeCount(dataNodes); - /* - * We are going to create an index that will bring us up to one below the limit; we go one below the limit to ensure the - * template is used instead of one shard. - */ - counts = new ShardCounts( - temporaryCounts.shardsPerNode, - temporaryCounts.firstIndexShards - 1, - temporaryCounts.firstIndexReplicas, - temporaryCounts.failingIndexShards + 1, - temporaryCounts.failingIndexReplicas); - } + final ShardCounts counts = ShardCounts.forDataNodeCount(dataNodes); + setShardsPerNode(counts.getShardsPerNode()); if (counts.firstIndexShards > 0) { @@ -401,10 +389,13 @@ public class ClusterShardLimitIT extends ESIntegTestCase { } public static ShardCounts forDataNodeCount(int dataNodes) { + assertThat("this method will not work reliably with this many data nodes due to the limit of shards in a single index," + + "use fewer data nodes or multiple indices", dataNodes, lessThanOrEqualTo(90)); int mainIndexReplicas = between(0, dataNodes - 1); int mainIndexShards = between(1, 10); int totalShardsInIndex = (mainIndexReplicas + 1) * mainIndexShards; - int shardsPerNode = (int) Math.ceil((double) totalShardsInIndex / dataNodes); + // Sometimes add some headroom to the limit to check that it works even if you're not already right up against the limit + int shardsPerNode = (int) Math.ceil((double) totalShardsInIndex / dataNodes) + between(0, 10); int totalCap = shardsPerNode * dataNodes; int failingIndexShards; diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java index 192377aebb2..40b7b327630 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java @@ -572,7 +572,7 @@ public class IndicesServiceTests extends ESSingleNodeTestCase { } public void testOverShardLimit() { - int nodesInCluster = randomIntBetween(1,100); + int nodesInCluster = randomIntBetween(1,90); ClusterShardLimitIT.ShardCounts counts = forDataNodeCount(nodesInCluster); Settings clusterSettings = Settings.builder() @@ -594,7 +594,7 @@ public class IndicesServiceTests extends ESSingleNodeTestCase { } public void testUnderShardLimit() { - int nodesInCluster = randomIntBetween(2,100); + int nodesInCluster = randomIntBetween(2,90); // Calculate the counts for a cluster 1 node smaller than we have to ensure we have headroom ClusterShardLimitIT.ShardCounts counts = forDataNodeCount(nodesInCluster - 1);