Ver código fonte

fix ShardLimitValidator flaky test (#94218)

Previously the method `canAddShardsToCluster` had a flag to do calculations over frozen or normal nodes. After removing the code, there was a mismatch for the group used.
Pablo Alcantar Morales 2 anos atrás
pai
commit
663e34f38f

+ 6 - 10
server/src/test/java/org/elasticsearch/indices/ShardLimitValidatorTests.java

@@ -33,6 +33,7 @@ import java.util.Set;
 import static org.elasticsearch.cluster.metadata.MetadataIndexStateServiceTests.addClosedIndex;
 import static org.elasticsearch.cluster.metadata.MetadataIndexStateServiceTests.addOpenedIndex;
 import static org.elasticsearch.cluster.shards.ShardCounts.forDataNodeCount;
+import static org.elasticsearch.indices.ShardLimitValidator.NORMAL_GROUP;
 import static org.elasticsearch.indices.ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
@@ -42,14 +43,12 @@ public class ShardLimitValidatorTests extends ESTestCase {
     public void testOverShardLimit() {
         int nodesInCluster = randomIntBetween(1, 90);
         ShardCounts counts = forDataNodeCount(nodesInCluster);
-
-        String group = randomFrom(ShardLimitValidator.VALID_GROUPS);
         ClusterState state = createClusterForShardLimitTest(
             nodesInCluster,
             counts.getFirstIndexShards(),
             counts.getFirstIndexReplicas(),
             counts.getShardsPerNode(),
-            group
+            NORMAL_GROUP
         );
 
         int shardsToAdd = counts.getFailingIndexShards() * (1 + counts.getFailingIndexReplicas());
@@ -58,7 +57,7 @@ public class ShardLimitValidatorTests extends ESTestCase {
             state,
             counts.getShardsPerNode(),
             nodesInCluster,
-            group
+            NORMAL_GROUP
         );
 
         int totalShards = counts.getFailingIndexShards() * (1 + counts.getFailingIndexReplicas());
@@ -73,26 +72,23 @@ public class ShardLimitValidatorTests extends ESTestCase {
                 + "]/["
                 + maxShards
                 + "] maximum "
-                + group
+                + NORMAL_GROUP
                 + " shards open",
             errorMessage.get()
         );
         assertFalse(ShardLimitValidator.canAddShardsToCluster(counts.getFailingIndexShards(), counts.getFailingIndexReplicas(), state));
     }
 
-    @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/94215")
     public void testUnderShardLimit() {
         int nodesInCluster = randomIntBetween(2, 90);
         // Calculate the counts for a cluster 1 node smaller than we have to ensure we have headroom
         ShardCounts counts = forDataNodeCount(nodesInCluster - 1);
-
-        String group = randomFrom(ShardLimitValidator.VALID_GROUPS);
         ClusterState state = createClusterForShardLimitTest(
             nodesInCluster,
             counts.getFirstIndexShards(),
             counts.getFirstIndexReplicas(),
             counts.getShardsPerNode(),
-            group
+            NORMAL_GROUP
         );
 
         int existingShards = counts.getFirstIndexShards() * (1 + counts.getFirstIndexReplicas());
@@ -102,7 +98,7 @@ public class ShardLimitValidatorTests extends ESTestCase {
             state,
             counts.getShardsPerNode(),
             nodesInCluster,
-            group
+            NORMAL_GROUP
         );
 
         assertFalse(errorMessage.isPresent());