Browse Source

Adjust randomization in cluster shard limit tests (#47254)

This commit adjusts randomization for the cluster shard limit tests so
that there is often more of a gap left between the limit and the size of
the first index. This allows the same randomization to be used for all
tests, and alleviates flakiness in
`testIndexCreationOverLimitFromTemplate`.
Gordon Brown 6 years ago
parent
commit
e4fc9425e8

+ 1 - 1
server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java

@@ -454,7 +454,7 @@ public class MetaDataCreateIndexServiceTests extends ESTestCase {
     }
 
     public void testShardLimit() {
-        int nodesInCluster = randomIntBetween(2,100);
+        int nodesInCluster = randomIntBetween(2,90);
         ClusterShardLimitIT.ShardCounts counts = forDataNodeCount(nodesInCluster);
         Settings clusterSettings = Settings.builder()
             .put(MetaData.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), counts.getShardsPerNode())

+ 7 - 16
server/src/test/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java

@@ -45,6 +45,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF
 import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
 import static org.hamcrest.Matchers.equalTo;
 import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
 
 @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST)
 public class ClusterShardLimitIT extends ESIntegTestCase {
@@ -102,24 +103,11 @@ public class ClusterShardLimitIT extends ESIntegTestCase {
         assertFalse(clusterState.getMetaData().hasIndex("should-fail"));
     }
 
-    @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/47107")
     public void testIndexCreationOverLimitFromTemplate() {
         int dataNodes = client().admin().cluster().prepareState().get().getState().getNodes().getDataNodes().size();
 
-        final ShardCounts counts;
-        {
-            final ShardCounts temporaryCounts = ShardCounts.forDataNodeCount(dataNodes);
-            /*
-             * We are going to create an index that will bring us up to one below the limit; we go one below the limit to ensure the
-             * template is used instead of one shard.
-             */
-            counts = new ShardCounts(
-                temporaryCounts.shardsPerNode,
-                temporaryCounts.firstIndexShards - 1,
-                temporaryCounts.firstIndexReplicas,
-                temporaryCounts.failingIndexShards + 1,
-                temporaryCounts.failingIndexReplicas);
-        }
+        final ShardCounts counts = ShardCounts.forDataNodeCount(dataNodes);
+
         setShardsPerNode(counts.getShardsPerNode());
 
         if (counts.firstIndexShards > 0) {
@@ -401,10 +389,13 @@ public class ClusterShardLimitIT extends ESIntegTestCase {
         }
 
         public static ShardCounts forDataNodeCount(int dataNodes) {
+            assertThat("this method will not work reliably with this many data nodes due to the limit of shards in a single index," +
+                "use fewer data nodes or multiple indices", dataNodes, lessThanOrEqualTo(90));
             int mainIndexReplicas = between(0, dataNodes - 1);
             int mainIndexShards = between(1, 10);
             int totalShardsInIndex = (mainIndexReplicas + 1) * mainIndexShards;
-            int shardsPerNode = (int) Math.ceil((double) totalShardsInIndex / dataNodes);
+            // Sometimes add some headroom to the limit to check that it works even if you're not already right up against the limit
+            int shardsPerNode = (int) Math.ceil((double) totalShardsInIndex / dataNodes) + between(0, 10);
             int totalCap = shardsPerNode * dataNodes;
 
             int failingIndexShards;

+ 1 - 1
server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java

@@ -572,7 +572,7 @@ public class IndicesServiceTests extends ESSingleNodeTestCase {
     }
 
     public void testOverShardLimit() {
-        int nodesInCluster = randomIntBetween(1,100);
+        int nodesInCluster = randomIntBetween(1,90);
         ClusterShardLimitIT.ShardCounts counts = forDataNodeCount(nodesInCluster);
 
         Settings clusterSettings = Settings.builder()