Prechádzať zdrojové kódy

Skip range optimization if it'd be slower (#65097)

Don't run the `date_histogram` and `range` aggregations as a `filters`
aggregation if the cost of the filters is high. This should prevent
the optimization from de-optimizing when it bumps into runtime fields
which don't have index structures to speed their queries. For runtime
fields we're better off running `date_histogram` and `range` using the
native `range` aggregator. We detect this situation using `cost` on
the `BulkScorer` from the queries to keep the change general. So it'll
detect other sorts of queries that might be a poor choice for
optimization.
Nik Everett 4 rokov pred
rodič
commit
91cbb9d330

+ 6 - 1
server/src/internalClusterTest/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java

@@ -50,6 +50,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcke
 import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
 import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
 import static org.hamcrest.Matchers.equalTo;
 import static org.hamcrest.Matchers.equalTo;
 import static org.hamcrest.Matchers.greaterThan;
 import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
 import static org.hamcrest.Matchers.hasEntry;
 import static org.hamcrest.Matchers.hasEntry;
 import static org.hamcrest.Matchers.notNullValue;
 import static org.hamcrest.Matchers.notNullValue;
 
 
@@ -626,7 +627,11 @@ public class AggregationProfilerIT extends ESIntegTestCase {
             );
             );
             assertThat(((Number) delegate.get("ranges")).longValue(), equalTo(1L));
             assertThat(((Number) delegate.get("ranges")).longValue(), equalTo(1L));
             assertThat(delegate.get("delegate"), equalTo("FiltersAggregator.FilterByFilter"));
             assertThat(delegate.get("delegate"), equalTo("FiltersAggregator.FilterByFilter"));
-            assertThat(delegate.get("delegate_debug"), equalTo(Map.of("segments_with_deleted_docs", 0)));
+            Map<?, ?> delegateDebug = (Map<?, ?>) delegate.get("delegate_debug"); 
+            assertThat(delegateDebug, hasEntry("segments_with_deleted_docs", 0));
+            assertThat(delegateDebug, hasEntry("max_cost", (long) RangeAggregator.DOCS_PER_RANGE_TO_USE_FILTERS * 2));
+            assertThat(delegateDebug, hasEntry("estimated_cost", (long) RangeAggregator.DOCS_PER_RANGE_TO_USE_FILTERS * 2));
+            assertThat((long) delegateDebug.get("estimate_cost_time"), greaterThanOrEqualTo(0L));  // ~1,276,734 nanos is normal
         }
         }
     }
     }
 }
 }

+ 86 - 9
server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregator.java

@@ -188,7 +188,7 @@ public abstract class FiltersAggregator extends BucketsAggregator {
      * and we don't collect "other" buckets. Collecting {@link FilterByFilter}
      * and we don't collect "other" buckets. Collecting {@link FilterByFilter}
      * is generally going to be much faster than the {@link Compatible} aggregator.
      * is generally going to be much faster than the {@link Compatible} aggregator.
      */
      */
-    public static FiltersAggregator buildFilterOrderOrNull(
+    public static FilterByFilter buildFilterOrderOrNull(
         String name,
         String name,
         AggregatorFactories factories,
         AggregatorFactories factories,
         String[] keys,
         String[] keys,
@@ -269,12 +269,25 @@ public abstract class FiltersAggregator extends BucketsAggregator {
      * {@link Compatible} but doesn't support when there is a parent aggregator
      * {@link Compatible} but doesn't support when there is a parent aggregator
      * or any child aggregators.
      * or any child aggregators.
      */
      */
-    private static class FilterByFilter extends FiltersAggregator {
+    public static class FilterByFilter extends FiltersAggregator {
         private final Query[] filters;
         private final Query[] filters;
-        private Weight[] filterWeights;
+        private final boolean profiling;
+        private long estimatedCost = -1;
+        /**
+         * The maximum allowed estimated cost. Defaults to {@code -1} meaning no
+         * max but can be set. Used for emitting debug info.
+         */
+        private long maxCost = -1;
+        private long estimateCostTime;
+        private Weight[] weights;
+        /**
+         * If {@link #estimateCost} was called then this'll contain a
+         * scorer per leaf per filter. If it wasn't then this'll be {@code null}. 
+         */
+        private BulkScorer[][] scorers;
         private int segmentsWithDeletedDocs;
         private int segmentsWithDeletedDocs;
 
 
-        FilterByFilter(
+        private FilterByFilter(
             String name,
             String name,
             String[] keys,
             String[] keys,
             Query[] filters,
             Query[] filters,
@@ -286,6 +299,57 @@ public abstract class FiltersAggregator extends BucketsAggregator {
         ) throws IOException {
         ) throws IOException {
             super(name, AggregatorFactories.EMPTY, keys, keyed, null, context, parent, cardinality, metadata);
             super(name, AggregatorFactories.EMPTY, keys, keyed, null, context, parent, cardinality, metadata);
             this.filters = filters;
             this.filters = filters;
+            this.profiling = context.getProfilers() != null;
+        }
+
+        /**
+         * Estimate the number of documents that this aggregation must visit. We'll
+         * stop counting once we've passed {@code maxEstimatedCost} if we aren't profiling.
+         */
+        public long estimateCost(long maxCost) throws IOException {
+            this.maxCost = maxCost;
+            if (estimatedCost != -1) {
+                return estimatedCost;
+            }
+            long limit = profiling ? Long.MAX_VALUE : maxCost;
+            long start = profiling ? System.nanoTime() : 0;
+            estimatedCost = 0;
+            weights = buildWeights(topLevelQuery(), filters);
+            List<LeafReaderContext> leaves = searcher().getIndexReader().leaves();
+            /*
+             * Its important that we save a copy of the BulkScorer because for
+             * queries like PointInRangeQuery building the scorer can be a big
+             * chunk of the run time.
+             */
+            scorers = new BulkScorer[leaves.size()][];
+            for (LeafReaderContext ctx : leaves) {
+                scorers[ctx.ord] = new BulkScorer[filters.length];
+                for (int f = 0; f < filters.length; f++) {
+                    scorers[ctx.ord][f] = weights[f].bulkScorer(ctx);
+                    if (scorers[ctx.ord][f] == null) {
+                        // Doesn't find anything in this leaf
+                        continue;
+                    }
+                    if (estimatedCost >= 0 && estimatedCost <= limit) {
+                        // If we've overflowed or are past the limit skip the cost
+                        estimatedCost += scorers[ctx.ord][f].cost();
+                    }
+                }
+            }
+            if (profiling) {
+                estimateCostTime = System.nanoTime() - start;
+            }
+            // If we've overflowed use Long.MAX_VALUE
+            return estimatedCost < 0 ? Long.MAX_VALUE : estimatedCost;
+        }
+
+        /**
+         * Are the scorers cached?
+         * <p>
+         * Package private for testing.
+         */
+        boolean scorersCached() {
+            return scorers != null;
         }
         }
 
 
         /**
         /**
@@ -297,12 +361,19 @@ public abstract class FiltersAggregator extends BucketsAggregator {
          */
          */
         @Override
         @Override
         protected LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException {
         protected LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException {
-            if (filterWeights == null) {
-                filterWeights = buildWeights(topLevelQuery(), filters);
+            if (weights == null) {
+                weights = buildWeights(topLevelQuery(), filters);
             }
             }
             Bits live = ctx.reader().getLiveDocs();
             Bits live = ctx.reader().getLiveDocs();
             for (int filterOrd = 0; filterOrd < filters.length; filterOrd++) {
             for (int filterOrd = 0; filterOrd < filters.length; filterOrd++) {
-                BulkScorer scorer = filterWeights[filterOrd].bulkScorer(ctx);
+                BulkScorer scorer;
+                if (scorers == null) {
+                    // No cached scorers
+                    scorer = weights[filterOrd].bulkScorer(ctx);
+                } else {
+                    // Scorers cached when calling estimateCost
+                    scorer = scorers[ctx.ord][filterOrd];
+                }
                 if (scorer == null) {
                 if (scorer == null) {
                     // the filter doesn't match any docs
                     // the filter doesn't match any docs
                     continue;
                     continue;
@@ -319,6 +390,12 @@ public abstract class FiltersAggregator extends BucketsAggregator {
         public void collectDebugInfo(BiConsumer<String, Object> add) {
         public void collectDebugInfo(BiConsumer<String, Object> add) {
             super.collectDebugInfo(add);
             super.collectDebugInfo(add);
             add.accept("segments_with_deleted_docs", segmentsWithDeletedDocs);
             add.accept("segments_with_deleted_docs", segmentsWithDeletedDocs);
+            if (estimatedCost != -1) {
+                // -1 means we didn't estimate it.
+                add.accept("estimated_cost", estimatedCost);
+                add.accept("max_cost", maxCost);
+                add.accept("estimate_cost_time", estimateCostTime);
+            }
         }
         }
     }
     }
 
 
@@ -402,7 +479,7 @@ public abstract class FiltersAggregator extends BucketsAggregator {
      * top level query on a date and a filter on a date. This kind of thing
      * top level query on a date and a filter on a date. This kind of thing
      * is very common when visualizing logs and metrics.
      * is very common when visualizing logs and metrics.
      */
      */
-    private Query filterMatchingBoth(Query lhs, Query rhs) {
+    static Query filterMatchingBoth(Query lhs, Query rhs) {
         if (lhs instanceof MatchAllDocsQuery) {
         if (lhs instanceof MatchAllDocsQuery) {
             return rhs;
             return rhs;
         }
         }
@@ -424,7 +501,7 @@ public abstract class FiltersAggregator extends BucketsAggregator {
         return builder.build();
         return builder.build();
     }
     }
 
 
-    private Query unwrap(Query query) {
+    private static Query unwrap(Query query) {
         if (query instanceof IndexSortSortedNumericDocValuesRangeQuery) {
         if (query instanceof IndexSortSortedNumericDocValuesRangeQuery) {
             query = ((IndexSortSortedNumericDocValuesRangeQuery) query).getFallbackQuery();
             query = ((IndexSortSortedNumericDocValuesRangeQuery) query).getFallbackQuery();
         }
         }

+ 1 - 0
server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceRangeAggregatorFactory.java

@@ -75,6 +75,7 @@ public class GeoDistanceRangeAggregatorFactory extends ValuesSourceAggregatorFac
                     rangeFactory,
                     rangeFactory,
                     ranges,
                     ranges,
                     averageDocsPerRange,
                     averageDocsPerRange,
+                    null, // null here because we didn't try filters at all
                     keyed,
                     keyed,
                     context,
                     context,
                     parent,
                     parent,

+ 55 - 8
server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java

@@ -58,6 +58,7 @@ import org.elasticsearch.search.internal.SearchContext;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
 import java.util.List;
 import java.util.Map;
 import java.util.Map;
 import java.util.Objects;
 import java.util.Objects;
@@ -288,7 +289,7 @@ public abstract class RangeAggregator extends BucketsAggregator {
          * which is good enough because that is the most embarrassing scenario.
          * which is good enough because that is the most embarrassing scenario.
          */
          */
         double averageDocsPerRange = ((double) context.searcher().getIndexReader().maxDoc()) / ranges.length;
         double averageDocsPerRange = ((double) context.searcher().getIndexReader().maxDoc()) / ranges.length;
-        Aggregator adapted = adaptIntoFiltersOrNull(
+        FromFilters<?> adapted = adaptIntoFiltersOrNull(
             name,
             name,
             factories,
             factories,
             valuesSourceConfig,
             valuesSourceConfig,
@@ -301,8 +302,23 @@ public abstract class RangeAggregator extends BucketsAggregator {
             cardinality,
             cardinality,
             metadata
             metadata
         );
         );
+        Map<String, Object> filtersDebug = null;
         if (adapted != null) {
         if (adapted != null) {
-            return adapted;
+            long maxEstimatedFiltersCost = context.searcher().getIndexReader().maxDoc();
+            long estimatedFiltersCost = adapted.estimateCost(maxEstimatedFiltersCost);
+            if (estimatedFiltersCost <= maxEstimatedFiltersCost) {
+                return adapted;
+            }
+            /*
+             * Looks like it'd be more expensive to use the filter-by-filter
+             * aggregator. Oh well. Snapshot the the filter-by-filter
+             * aggregator's debug information if we're profiling bececause it
+             * is useful even if the aggregator isn't. 
+             */
+            if (context.getProfilers() != null) {
+                filtersDebug = new HashMap<>();
+                adapted.delegate().collectDebugInfo(filtersDebug::put);
+            }
         }
         }
         return buildWithoutAttemptedToAdaptToFilters(
         return buildWithoutAttemptedToAdaptToFilters(
             name,
             name,
@@ -312,6 +328,7 @@ public abstract class RangeAggregator extends BucketsAggregator {
             rangeFactory,
             rangeFactory,
             ranges,
             ranges,
             averageDocsPerRange,
             averageDocsPerRange,
+            filtersDebug,
             keyed,
             keyed,
             context,
             context,
             parent,
             parent,
@@ -320,7 +337,7 @@ public abstract class RangeAggregator extends BucketsAggregator {
         );
         );
     }
     }
 
 
-    public static Aggregator adaptIntoFiltersOrNull(
+    public static FromFilters<?> adaptIntoFiltersOrNull(
         String name,
         String name,
         AggregatorFactories factories,
         AggregatorFactories factories,
         ValuesSourceConfig valuesSourceConfig,
         ValuesSourceConfig valuesSourceConfig,
@@ -383,7 +400,7 @@ public abstract class RangeAggregator extends BucketsAggregator {
                     context.getQueryShardContext()
                     context.getQueryShardContext()
                 );
                 );
         }
         }
-        FiltersAggregator delegate = FiltersAggregator.buildFilterOrderOrNull(
+        FiltersAggregator.FilterByFilter delegate = FiltersAggregator.buildFilterOrderOrNull(
             name,
             name,
             factories,
             factories,
             keys,
             keys,
@@ -424,6 +441,7 @@ public abstract class RangeAggregator extends BucketsAggregator {
         InternalRange.Factory<?, ?> rangeFactory,
         InternalRange.Factory<?, ?> rangeFactory,
         Range[] ranges,
         Range[] ranges,
         double averageDocsPerRange,
         double averageDocsPerRange,
+        Map<String, Object> filtersDebug,
         boolean keyed,
         boolean keyed,
         SearchContext context,
         SearchContext context,
         Aggregator parent,
         Aggregator parent,
@@ -439,6 +457,7 @@ public abstract class RangeAggregator extends BucketsAggregator {
                 rangeFactory,
                 rangeFactory,
                 ranges,
                 ranges,
                 averageDocsPerRange,
                 averageDocsPerRange,
+                filtersDebug,
                 keyed,
                 keyed,
                 context,
                 context,
                 parent,
                 parent,
@@ -454,6 +473,7 @@ public abstract class RangeAggregator extends BucketsAggregator {
             rangeFactory,
             rangeFactory,
             ranges,
             ranges,
             averageDocsPerRange,
             averageDocsPerRange,
+            filtersDebug,
             keyed,
             keyed,
             context,
             context,
             parent,
             parent,
@@ -468,11 +488,23 @@ public abstract class RangeAggregator extends BucketsAggregator {
     private final boolean keyed;
     private final boolean keyed;
     private final InternalRange.Factory rangeFactory;
     private final InternalRange.Factory rangeFactory;
     private final double averageDocsPerRange;
     private final double averageDocsPerRange;
+    private final Map<String, Object> filtersDebug;
 
 
-    private RangeAggregator(String name, AggregatorFactories factories, ValuesSource.Numeric valuesSource, DocValueFormat format,
-            InternalRange.Factory rangeFactory, Range[] ranges, double averageDocsPerRange, boolean keyed, SearchContext context,
-            Aggregator parent, CardinalityUpperBound cardinality, Map<String, Object> metadata) throws IOException {
-
+    private RangeAggregator(
+        String name,
+        AggregatorFactories factories,
+        ValuesSource.Numeric valuesSource,
+        DocValueFormat format,
+        InternalRange.Factory rangeFactory,
+        Range[] ranges,
+        double averageDocsPerRange,
+        Map<String, Object> filtersDebug,
+        boolean keyed,
+        SearchContext context,
+        Aggregator parent,
+        CardinalityUpperBound cardinality,
+        Map<String, Object> metadata
+    ) throws IOException {
         super(name, factories, context, parent, cardinality.multiply(ranges.length), metadata);
         super(name, factories, context, parent, cardinality.multiply(ranges.length), metadata);
         assert valuesSource != null;
         assert valuesSource != null;
         this.valuesSource = valuesSource;
         this.valuesSource = valuesSource;
@@ -481,6 +513,7 @@ public abstract class RangeAggregator extends BucketsAggregator {
         this.rangeFactory = rangeFactory;
         this.rangeFactory = rangeFactory;
         this.ranges = ranges;
         this.ranges = ranges;
         this.averageDocsPerRange = averageDocsPerRange;
         this.averageDocsPerRange = averageDocsPerRange;
+        this.filtersDebug = filtersDebug;
     }
     }
 
 
     @Override
     @Override
@@ -540,6 +573,9 @@ public abstract class RangeAggregator extends BucketsAggregator {
         super.collectDebugInfo(add);
         super.collectDebugInfo(add);
         add.accept("ranges", ranges.length);
         add.accept("ranges", ranges.length);
         add.accept("average_docs_per_range", averageDocsPerRange);
         add.accept("average_docs_per_range", averageDocsPerRange);
+        if (filtersDebug != null) {
+            add.accept("filters_debug", filtersDebug);
+        }
     }
     }
 
 
     public static class Unmapped<R extends RangeAggregator.Range> extends NonCollectingAggregator {
     public static class Unmapped<R extends RangeAggregator.Range> extends NonCollectingAggregator {
@@ -591,6 +627,7 @@ public abstract class RangeAggregator extends BucketsAggregator {
             Factory rangeFactory,
             Factory rangeFactory,
             Range[] ranges,
             Range[] ranges,
             double averageDocsPerRange,
             double averageDocsPerRange,
+            Map<String, Object> filtersDebug,
             boolean keyed,
             boolean keyed,
             SearchContext context,
             SearchContext context,
             Aggregator parent,
             Aggregator parent,
@@ -605,6 +642,7 @@ public abstract class RangeAggregator extends BucketsAggregator {
                 rangeFactory,
                 rangeFactory,
                 ranges,
                 ranges,
                 averageDocsPerRange,
                 averageDocsPerRange,
+                filtersDebug,
                 keyed,
                 keyed,
                 context,
                 context,
                 parent,
                 parent,
@@ -641,6 +679,7 @@ public abstract class RangeAggregator extends BucketsAggregator {
             Factory rangeFactory,
             Factory rangeFactory,
             Range[] ranges,
             Range[] ranges,
             double averageDocsPerRange,
             double averageDocsPerRange,
+            Map<String, Object> filtersDebug,
             boolean keyed,
             boolean keyed,
             SearchContext context,
             SearchContext context,
             Aggregator parent,
             Aggregator parent,
@@ -655,6 +694,7 @@ public abstract class RangeAggregator extends BucketsAggregator {
                 rangeFactory,
                 rangeFactory,
                 ranges,
                 ranges,
                 averageDocsPerRange,
                 averageDocsPerRange,
+                filtersDebug,
                 keyed,
                 keyed,
                 context,
                 context,
                 parent,
                 parent,
@@ -747,6 +787,13 @@ public abstract class RangeAggregator extends BucketsAggregator {
             this.averageDocsPerRange = averageDocsPerRange;
             this.averageDocsPerRange = averageDocsPerRange;
         }
         }
 
 
+        /**
+         * Estimate the number of documents that this aggregation must visit.
+         */
+        long estimateCost(long maxEstimatedCost) throws IOException {
+            return ((FiltersAggregator.FilterByFilter) delegate()).estimateCost(maxEstimatedCost);
+        }
+
         @Override
         @Override
         protected InternalAggregation adapt(InternalAggregation delegateResult) {
         protected InternalAggregation adapt(InternalAggregation delegateResult) {
             InternalFilters filters = (InternalFilters) delegateResult;
             InternalFilters filters = (InternalFilters) delegateResult;

+ 61 - 1
server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorTests.java

@@ -24,6 +24,7 @@ import org.apache.lucene.document.LongPoint;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.search.IndexOrDocValuesQuery;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.MatchAllDocsQuery;
 import org.apache.lucene.search.MatchAllDocsQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.Query;
@@ -42,12 +43,17 @@ import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper
 import org.junit.Before;
 import org.junit.Before;
 
 
 import java.io.IOException;
 import java.io.IOException;
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.HashSet;
 import java.util.List;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 import java.util.Set;
 
 
 import static org.hamcrest.Matchers.equalTo;
 import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.hasEntry;
 import static org.hamcrest.Matchers.hasSize;
 import static org.hamcrest.Matchers.hasSize;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.mockito.Mockito.mock;
 
 
 public class FiltersAggregatorTests extends AggregatorTestCase {
 public class FiltersAggregatorTests extends AggregatorTestCase {
     private MappedFieldType fieldType;
     private MappedFieldType fieldType;
@@ -200,7 +206,31 @@ public class FiltersAggregatorTests extends AggregatorTestCase {
         }
         }
     }
     }
 
 
-    public void testMergePointRangeQueries() throws IOException {
+    /**
+     * Test that we perform the appropriate unwrapping to merged queries.
+     */
+    public void testFilterMatchingBoth() throws IOException {
+        Query topLevelQuery = LongPoint.newRangeQuery(
+            "test",
+            DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2020-01-01"),
+            DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2020-02-01")
+        );
+        Query filterQuery = LongPoint.newRangeQuery(
+            "test",
+            DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2020-01-01"),
+            DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2020-02-01")
+        );
+        Query matchingBoth = FiltersAggregator.filterMatchingBoth(new IndexOrDocValuesQuery(topLevelQuery, mock(Query.class)), filterQuery);
+        /*
+         * The topLevelQuery is entirely contained within the filter query so
+         * it is good enough to match that. See MergedPointRangeQueryTests for
+         * tons more tests around this. Really in this test we're just excited
+         * to prove that we unwrapped the IndexOrDocValuesQuery above. 
+         */
+        assertThat(matchingBoth, equalTo(topLevelQuery));
+    }
+
+    public void testWithMergedPointRangeQueries() throws IOException {
         MappedFieldType ft = new DateFieldMapper.DateFieldType("test", Resolution.MILLISECONDS);
         MappedFieldType ft = new DateFieldMapper.DateFieldType("test", Resolution.MILLISECONDS);
         AggregationBuilder builder = new FiltersAggregationBuilder(
         AggregationBuilder builder = new FiltersAggregationBuilder(
             "test",
             "test",
@@ -220,4 +250,34 @@ public class FiltersAggregatorTests extends AggregatorTestCase {
             assertThat(filters.getBucketByKey("q1").getDocCount(), equalTo(1L));
             assertThat(filters.getBucketByKey("q1").getDocCount(), equalTo(1L));
         }, ft);
         }, ft);
     }
     }
+
+    public void testFilterByFilterCost() throws IOException {
+        MappedFieldType ft = new DateFieldMapper.DateFieldType("test", Resolution.MILLISECONDS);
+        AggregationBuilder builder = new FiltersAggregationBuilder(
+            "test",
+            new KeyedFilter("q1", new RangeQueryBuilder("test").from("2020-01-01").to("2020-03-01").includeUpper(false))
+        );
+        withAggregator(
+            builder,
+            new MatchAllDocsQuery(),
+            iw -> {
+                iw.addDocument(List.of(new LongPoint("test", DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2010-01-02"))));
+                iw.addDocument(List.of(new LongPoint("test", DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2020-01-02"))));
+            },
+            (searcher, agg) -> {
+                assertThat(agg, instanceOf(FiltersAggregator.FilterByFilter.class));
+                FiltersAggregator.FilterByFilter filterByFilter = (FiltersAggregator.FilterByFilter) agg;
+                int maxDoc = searcher.getIndexReader().maxDoc();
+                assertThat(filterByFilter.estimateCost(maxDoc), equalTo(1L));
+                assertThat(filterByFilter.scorersCached(), equalTo(true));
+                Map<String, Object> debug = new HashMap<>();
+                filterByFilter.collectDebugInfo(debug::put);
+                assertThat(debug, hasEntry("segments_with_deleted_docs", 0));
+                assertThat(debug, hasEntry("estimated_cost", 1L));
+                assertThat(debug, hasEntry("max_cost", (long) maxDoc));
+                assertThat(debug, hasEntry("estimate_cost_time", 0L));
+            },
+            ft
+        );
+    }
 }
 }

+ 21 - 0
test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java

@@ -48,6 +48,7 @@ import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.NumericUtils;
 import org.apache.lucene.util.NumericUtils;
 import org.elasticsearch.Version;
 import org.elasticsearch.Version;
 import org.elasticsearch.cluster.metadata.IndexMetadata;
 import org.elasticsearch.cluster.metadata.IndexMetadata;
+import org.elasticsearch.common.CheckedBiConsumer;
 import org.elasticsearch.common.CheckedConsumer;
 import org.elasticsearch.common.CheckedConsumer;
 import org.elasticsearch.common.TriFunction;
 import org.elasticsearch.common.TriFunction;
 import org.elasticsearch.common.breaker.CircuitBreaker;
 import org.elasticsearch.common.breaker.CircuitBreaker;
@@ -489,6 +490,26 @@ public abstract class AggregatorTestCase extends ESTestCase {
         }
         }
     }
     }
 
 
+    protected void withAggregator(
+        AggregationBuilder aggregationBuilder,
+        Query query,
+        CheckedConsumer<RandomIndexWriter, IOException> buildIndex,
+        CheckedBiConsumer<IndexSearcher, Aggregator, IOException> verify,
+        MappedFieldType... fieldTypes
+    ) throws IOException {
+        try (Directory directory = newDirectory()) {
+            RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
+            buildIndex.accept(indexWriter);
+            indexWriter.close();
+
+            try (DirectoryReader unwrapped = DirectoryReader.open(directory); IndexReader indexReader = wrapDirectoryReader(unwrapped)) {
+                IndexSearcher searcher = newIndexSearcher(indexReader);
+                SearchContext context = createSearchContext(searcher, query, fieldTypes);
+                verify.accept(searcher, createAggregator(aggregationBuilder, context));
+            }
+        }
+    }
+
     /**
     /**
      * Override to wrap the {@linkplain DirectoryReader} for aggs like
      * Override to wrap the {@linkplain DirectoryReader} for aggs like
      * {@link NestedAggregationBuilder}.
      * {@link NestedAggregationBuilder}.