فهرست منبع

Use LogDocMergePolicy in CompositeAggregatorTests (#103174)

* Use LogDocMergePolicy in CompositeAggregatorTests

The latest lucene upgrade adds a new merge policy that reverse the
order of the documents. To avoid randomisation we hardcode the merge
policy to LogDocMergePolicy.

This has also been applied to NestedAggregatorTests, so the logic is
moved to AggregatorTestCase to be available to all agg tests.

Fixes #103105

* make policy optional

* update comment

* suggested updates
Kostas Krikellas 1 سال پیش
والد
کامیت
7174c19abf

+ 11 - 4
server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java

@@ -21,6 +21,7 @@ import org.apache.lucene.document.StringField;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.index.LogDocMergePolicy;
 import org.apache.lucene.index.NoMergePolicy;
 import org.apache.lucene.index.NoMergePolicy;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.FieldExistsQuery;
 import org.apache.lucene.search.FieldExistsQuery;
@@ -766,7 +767,10 @@ public class CompositeAggregatorTests extends AggregatorTestCase {
             assertEquals(2L, result.getBuckets().get(1).getDocCount());
             assertEquals(2L, result.getBuckets().get(1).getDocCount());
             assertEquals("{keyword=d}", result.getBuckets().get(2).getKeyAsString());
             assertEquals("{keyword=d}", result.getBuckets().get(2).getKeyAsString());
             assertEquals(1L, result.getBuckets().get(2).getDocCount());
             assertEquals(1L, result.getBuckets().get(2).getDocCount());
-        }, new AggTestConfig(new CompositeAggregationBuilder("name", Collections.singletonList(terms)), FIELD_TYPES));
+        },
+            new AggTestConfig(new CompositeAggregationBuilder("name", Collections.singletonList(terms)), FIELD_TYPES)
+                .withLogDocMergePolicy()
+        );
     }
     }
 
 
     /**
     /**
@@ -819,7 +823,7 @@ public class CompositeAggregatorTests extends AggregatorTestCase {
                 builder,
                 builder,
                 new KeywordFieldMapper.KeywordFieldType(nestedPath + "." + leafNameField),
                 new KeywordFieldMapper.KeywordFieldType(nestedPath + "." + leafNameField),
                 new NumberFieldMapper.NumberFieldType("price", NumberFieldMapper.NumberType.LONG)
                 new NumberFieldMapper.NumberFieldType("price", NumberFieldMapper.NumberType.LONG)
-            )
+            ).withLogDocMergePolicy()
         );
         );
     }
     }
 
 
@@ -874,7 +878,7 @@ public class CompositeAggregatorTests extends AggregatorTestCase {
                 builder,
                 builder,
                 new KeywordFieldMapper.KeywordFieldType(nestedPath + "." + leafNameField),
                 new KeywordFieldMapper.KeywordFieldType(nestedPath + "." + leafNameField),
                 new NumberFieldMapper.NumberFieldType("price", NumberFieldMapper.NumberType.LONG)
                 new NumberFieldMapper.NumberFieldType("price", NumberFieldMapper.NumberType.LONG)
-            )
+            ).withLogDocMergePolicy()
         );
         );
     }
     }
 
 
@@ -3095,7 +3099,7 @@ public class CompositeAggregatorTests extends AggregatorTestCase {
 
 
     public void testParentFactoryValidation() throws Exception {
     public void testParentFactoryValidation() throws Exception {
         try (Directory directory = newDirectory()) {
         try (Directory directory = newDirectory()) {
-            try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) {
+            try (RandomIndexWriter indexWriter = newRandomIndexWriterWithLogDocMergePolicy(directory)) {
                 Document document = new Document();
                 Document document = new Document();
                 document.clear();
                 document.clear();
                 addToDocument(0, document, createDocument("term-field", "a", "long", 100L));
                 addToDocument(0, document, createDocument("term-field", "a", "long", 100L));
@@ -3650,6 +3654,9 @@ public class CompositeAggregatorTests extends AggregatorTestCase {
             }
             }
             if (forceMerge == false) {
             if (forceMerge == false) {
                 config.setMergePolicy(NoMergePolicy.INSTANCE);
                 config.setMergePolicy(NoMergePolicy.INSTANCE);
+            } else {
+                // Use LogDocMergePolicy to avoid randomization issues with the doc retrieval order.
+                config.setMergePolicy(new LogDocMergePolicy());
             }
             }
             try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory, config)) {
             try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory, config)) {
                 Document document = new Document();
                 Document document = new Document();

+ 10 - 16
server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java

@@ -25,7 +25,6 @@ import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.ConstantScoreQuery;
 import org.apache.lucene.search.ConstantScoreQuery;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.tests.analysis.MockAnalyzer;
 import org.apache.lucene.tests.index.RandomIndexWriter;
 import org.apache.lucene.tests.index.RandomIndexWriter;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.BytesRef;
 import org.elasticsearch.common.lucene.search.Queries;
 import org.elasticsearch.common.lucene.search.Queries;
@@ -137,14 +136,9 @@ public class NestedAggregatorTests extends AggregatorTestCase {
         return new ScriptService(Settings.EMPTY, engines, ScriptModule.CORE_CONTEXTS, () -> 1L);
         return new ScriptService(Settings.EMPTY, engines, ScriptModule.CORE_CONTEXTS, () -> 1L);
     }
     }
 
 
-    private static RandomIndexWriter newRandomIndexWriter(Directory directory) throws IOException {
-        final IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(new LogDocMergePolicy());
-        return new RandomIndexWriter(random(), directory, conf);
-    }
-
     public void testNoDocs() throws IOException {
     public void testNoDocs() throws IOException {
         try (Directory directory = newDirectory()) {
         try (Directory directory = newDirectory()) {
-            try (RandomIndexWriter iw = newRandomIndexWriter(directory)) {
+            try (RandomIndexWriter iw = newRandomIndexWriterWithLogDocMergePolicy(directory)) {
                 // intentionally not writing any docs
                 // intentionally not writing any docs
             }
             }
             try (DirectoryReader indexReader = wrapInMockESDirectoryReader(DirectoryReader.open(directory))) {
             try (DirectoryReader indexReader = wrapInMockESDirectoryReader(DirectoryReader.open(directory))) {
@@ -171,7 +165,7 @@ public class NestedAggregatorTests extends AggregatorTestCase {
         int expectedNestedDocs = 0;
         int expectedNestedDocs = 0;
         double expectedMaxValue = Double.NEGATIVE_INFINITY;
         double expectedMaxValue = Double.NEGATIVE_INFINITY;
         try (Directory directory = newDirectory()) {
         try (Directory directory = newDirectory()) {
-            try (RandomIndexWriter iw = newRandomIndexWriter(directory)) {
+            try (RandomIndexWriter iw = newRandomIndexWriterWithLogDocMergePolicy(directory)) {
                 for (int i = 0; i < numRootDocs; i++) {
                 for (int i = 0; i < numRootDocs; i++) {
                     List<Iterable<IndexableField>> documents = new ArrayList<>();
                     List<Iterable<IndexableField>> documents = new ArrayList<>();
                     int numNestedDocs = randomIntBetween(0, 20);
                     int numNestedDocs = randomIntBetween(0, 20);
@@ -220,7 +214,7 @@ public class NestedAggregatorTests extends AggregatorTestCase {
         int expectedNestedDocs = 0;
         int expectedNestedDocs = 0;
         double expectedMaxValue = Double.NEGATIVE_INFINITY;
         double expectedMaxValue = Double.NEGATIVE_INFINITY;
         try (Directory directory = newDirectory()) {
         try (Directory directory = newDirectory()) {
-            try (RandomIndexWriter iw = newRandomIndexWriter(directory)) {
+            try (RandomIndexWriter iw = newRandomIndexWriterWithLogDocMergePolicy(directory)) {
                 for (int i = 0; i < numRootDocs; i++) {
                 for (int i = 0; i < numRootDocs; i++) {
                     List<Iterable<IndexableField>> documents = new ArrayList<>();
                     List<Iterable<IndexableField>> documents = new ArrayList<>();
                     int numNestedDocs = randomIntBetween(0, 20);
                     int numNestedDocs = randomIntBetween(0, 20);
@@ -270,7 +264,7 @@ public class NestedAggregatorTests extends AggregatorTestCase {
         int expectedNestedDocs = 0;
         int expectedNestedDocs = 0;
         double expectedSum = 0;
         double expectedSum = 0;
         try (Directory directory = newDirectory()) {
         try (Directory directory = newDirectory()) {
-            try (RandomIndexWriter iw = newRandomIndexWriter(directory)) {
+            try (RandomIndexWriter iw = newRandomIndexWriterWithLogDocMergePolicy(directory)) {
                 for (int i = 0; i < numRootDocs; i++) {
                 for (int i = 0; i < numRootDocs; i++) {
                     List<Iterable<IndexableField>> documents = new ArrayList<>();
                     List<Iterable<IndexableField>> documents = new ArrayList<>();
                     int numNestedDocs = randomIntBetween(0, 20);
                     int numNestedDocs = randomIntBetween(0, 20);
@@ -394,7 +388,7 @@ public class NestedAggregatorTests extends AggregatorTestCase {
 
 
     public void testNestedOrdering() throws IOException {
     public void testNestedOrdering() throws IOException {
         try (Directory directory = newDirectory()) {
         try (Directory directory = newDirectory()) {
-            try (RandomIndexWriter iw = newRandomIndexWriter(directory)) {
+            try (RandomIndexWriter iw = newRandomIndexWriterWithLogDocMergePolicy(directory)) {
                 iw.addDocuments(generateBook("1", new String[] { "a" }, new int[] { 12, 13, 14 }));
                 iw.addDocuments(generateBook("1", new String[] { "a" }, new int[] { 12, 13, 14 }));
                 iw.addDocuments(generateBook("2", new String[] { "b" }, new int[] { 5, 50 }));
                 iw.addDocuments(generateBook("2", new String[] { "b" }, new int[] { 5, 50 }));
                 iw.addDocuments(generateBook("3", new String[] { "c" }, new int[] { 39, 19 }));
                 iw.addDocuments(generateBook("3", new String[] { "c" }, new int[] { 39, 19 }));
@@ -521,7 +515,7 @@ public class NestedAggregatorTests extends AggregatorTestCase {
             books.add(Tuple.tuple(Strings.format("%03d", i), chapters));
             books.add(Tuple.tuple(Strings.format("%03d", i), chapters));
         }
         }
         try (Directory directory = newDirectory()) {
         try (Directory directory = newDirectory()) {
-            try (RandomIndexWriter iw = newRandomIndexWriter(directory)) {
+            try (RandomIndexWriter iw = newRandomIndexWriterWithLogDocMergePolicy(directory)) {
                 int id = 0;
                 int id = 0;
                 for (Tuple<String, int[]> book : books) {
                 for (Tuple<String, int[]> book : books) {
                     iw.addDocuments(generateBook(Strings.format("%03d", id), new String[] { book.v1() }, book.v2()));
                     iw.addDocuments(generateBook(Strings.format("%03d", id), new String[] { book.v1() }, book.v2()));
@@ -571,7 +565,7 @@ public class NestedAggregatorTests extends AggregatorTestCase {
 
 
     public void testPreGetChildLeafCollectors() throws IOException {
     public void testPreGetChildLeafCollectors() throws IOException {
         try (Directory directory = newDirectory()) {
         try (Directory directory = newDirectory()) {
-            try (RandomIndexWriter iw = newRandomIndexWriter(directory)) {
+            try (RandomIndexWriter iw = newRandomIndexWriterWithLogDocMergePolicy(directory)) {
                 List<Iterable<IndexableField>> documents = new ArrayList<>();
                 List<Iterable<IndexableField>> documents = new ArrayList<>();
                 LuceneDocument document = new LuceneDocument();
                 LuceneDocument document = new LuceneDocument();
                 document.add(new StringField(IdFieldMapper.NAME, Uid.encodeId("1"), Field.Store.NO));
                 document.add(new StringField(IdFieldMapper.NAME, Uid.encodeId("1"), Field.Store.NO));
@@ -689,7 +683,7 @@ public class NestedAggregatorTests extends AggregatorTestCase {
         MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(VALUE_FIELD_NAME, NumberFieldMapper.NumberType.LONG);
         MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(VALUE_FIELD_NAME, NumberFieldMapper.NumberType.LONG);
 
 
         try (Directory directory = newDirectory()) {
         try (Directory directory = newDirectory()) {
-            try (RandomIndexWriter iw = newRandomIndexWriter(directory)) {
+            try (RandomIndexWriter iw = newRandomIndexWriterWithLogDocMergePolicy(directory)) {
                 for (int i = 0; i < numRootDocs; i++) {
                 for (int i = 0; i < numRootDocs; i++) {
                     List<Iterable<IndexableField>> documents = new ArrayList<>();
                     List<Iterable<IndexableField>> documents = new ArrayList<>();
                     int numNestedDocs = randomIntBetween(0, 20);
                     int numNestedDocs = randomIntBetween(0, 20);
@@ -730,7 +724,7 @@ public class NestedAggregatorTests extends AggregatorTestCase {
         int expectedNestedDocs = 0;
         int expectedNestedDocs = 0;
         double expectedMaxValue = Double.NEGATIVE_INFINITY;
         double expectedMaxValue = Double.NEGATIVE_INFINITY;
         try (Directory directory = newDirectory()) {
         try (Directory directory = newDirectory()) {
-            try (RandomIndexWriter iw = newRandomIndexWriter(directory)) {
+            try (RandomIndexWriter iw = newRandomIndexWriterWithLogDocMergePolicy(directory)) {
                 for (int i = 0; i < numRootDocs; i++) {
                 for (int i = 0; i < numRootDocs; i++) {
                     List<Iterable<IndexableField>> documents = new ArrayList<>();
                     List<Iterable<IndexableField>> documents = new ArrayList<>();
                     expectedMaxValue = Math.max(expectedMaxValue, generateMaxDocs(documents, 1, i, NESTED_OBJECT, VALUE_FIELD_NAME));
                     expectedMaxValue = Math.max(expectedMaxValue, generateMaxDocs(documents, 1, i, NESTED_OBJECT, VALUE_FIELD_NAME));
@@ -796,7 +790,7 @@ public class NestedAggregatorTests extends AggregatorTestCase {
                 )
                 )
             );
             );
         try (Directory directory = newDirectory()) {
         try (Directory directory = newDirectory()) {
-            try (RandomIndexWriter iw = newRandomIndexWriter(directory)) {
+            try (RandomIndexWriter iw = newRandomIndexWriterWithLogDocMergePolicy(directory)) {
                 buildResellerData(numProducts, numResellers).accept(iw);
                 buildResellerData(numProducts, numResellers).accept(iw);
             }
             }
             try (DirectoryReader indexReader = wrapInMockESDirectoryReader(DirectoryReader.open(directory))) {
             try (DirectoryReader indexReader = wrapInMockESDirectoryReader(DirectoryReader.open(directory))) {

+ 4 - 12
server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregatorTests.java

@@ -13,11 +13,8 @@ import org.apache.lucene.document.Field;
 import org.apache.lucene.document.SortedNumericDocValuesField;
 import org.apache.lucene.document.SortedNumericDocValuesField;
 import org.apache.lucene.document.StringField;
 import org.apache.lucene.document.StringField;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.index.IndexableField;
-import org.apache.lucene.index.LogDocMergePolicy;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.tests.analysis.MockAnalyzer;
 import org.apache.lucene.tests.index.RandomIndexWriter;
 import org.apache.lucene.tests.index.RandomIndexWriter;
 import org.elasticsearch.index.mapper.IdFieldMapper;
 import org.elasticsearch.index.mapper.IdFieldMapper;
 import org.elasticsearch.index.mapper.LuceneDocument;
 import org.elasticsearch.index.mapper.LuceneDocument;
@@ -62,14 +59,9 @@ public class ReverseNestedAggregatorTests extends AggregatorTestCase {
         return wrapInMockESDirectoryReader(reader);
         return wrapInMockESDirectoryReader(reader);
     }
     }
 
 
-    private static RandomIndexWriter newRandomIndexWriter(Directory directory) throws IOException {
-        final IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(new LogDocMergePolicy());
-        return new RandomIndexWriter(random(), directory, conf);
-    }
-
     public void testNoDocs() throws IOException {
     public void testNoDocs() throws IOException {
         try (Directory directory = newDirectory()) {
         try (Directory directory = newDirectory()) {
-            try (RandomIndexWriter iw = newRandomIndexWriter(directory)) {
+            try (RandomIndexWriter iw = newRandomIndexWriterWithLogDocMergePolicy(directory)) {
                 // intentionally not writing any docs
                 // intentionally not writing any docs
             }
             }
             try (DirectoryReader indexReader = wrapInMockESDirectoryReader(DirectoryReader.open(directory))) {
             try (DirectoryReader indexReader = wrapInMockESDirectoryReader(DirectoryReader.open(directory))) {
@@ -98,7 +90,7 @@ public class ReverseNestedAggregatorTests extends AggregatorTestCase {
         int expectedNestedDocs = 0;
         int expectedNestedDocs = 0;
         double expectedMaxValue = Double.NEGATIVE_INFINITY;
         double expectedMaxValue = Double.NEGATIVE_INFINITY;
         try (Directory directory = newDirectory()) {
         try (Directory directory = newDirectory()) {
-            try (RandomIndexWriter iw = newRandomIndexWriter(directory)) {
+            try (RandomIndexWriter iw = newRandomIndexWriterWithLogDocMergePolicy(directory)) {
                 for (int i = 0; i < numParentDocs; i++) {
                 for (int i = 0; i < numParentDocs; i++) {
                     List<Iterable<IndexableField>> documents = new ArrayList<>();
                     List<Iterable<IndexableField>> documents = new ArrayList<>();
                     int numNestedDocs = randomIntBetween(0, 20);
                     int numNestedDocs = randomIntBetween(0, 20);
@@ -154,7 +146,7 @@ public class ReverseNestedAggregatorTests extends AggregatorTestCase {
         MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(VALUE_FIELD_NAME, NumberFieldMapper.NumberType.LONG);
         MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(VALUE_FIELD_NAME, NumberFieldMapper.NumberType.LONG);
 
 
         try (Directory directory = newDirectory()) {
         try (Directory directory = newDirectory()) {
-            try (RandomIndexWriter iw = newRandomIndexWriter(directory)) {
+            try (RandomIndexWriter iw = newRandomIndexWriterWithLogDocMergePolicy(directory)) {
                 for (int i = 0; i < numParentDocs; i++) {
                 for (int i = 0; i < numParentDocs; i++) {
                     List<Iterable<IndexableField>> documents = new ArrayList<>();
                     List<Iterable<IndexableField>> documents = new ArrayList<>();
                     int numNestedDocs = randomIntBetween(0, 20);
                     int numNestedDocs = randomIntBetween(0, 20);
@@ -219,7 +211,7 @@ public class ReverseNestedAggregatorTests extends AggregatorTestCase {
         );
         );
 
 
         try (Directory directory = newDirectory()) {
         try (Directory directory = newDirectory()) {
-            try (RandomIndexWriter iw = newRandomIndexWriter(directory)) {
+            try (RandomIndexWriter iw = newRandomIndexWriterWithLogDocMergePolicy(directory)) {
                 NestedAggregatorTests.buildResellerData(numProducts, numResellers).accept(iw);
                 NestedAggregatorTests.buildResellerData(numProducts, numResellers).accept(iw);
             }
             }
             try (DirectoryReader indexReader = wrapInMockESDirectoryReader(DirectoryReader.open(directory))) {
             try (DirectoryReader indexReader = wrapInMockESDirectoryReader(DirectoryReader.open(directory))) {

+ 37 - 1
test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java

@@ -23,6 +23,7 @@ import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexReaderContext;
 import org.apache.lucene.index.IndexReaderContext;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.LogDocMergePolicy;
 import org.apache.lucene.index.NoMergePolicy;
 import org.apache.lucene.index.NoMergePolicy;
 import org.apache.lucene.index.OrdinalMap;
 import org.apache.lucene.index.OrdinalMap;
 import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.index.SortedDocValues;
@@ -487,6 +488,17 @@ public abstract class AggregatorTestCase extends ESTestCase {
         return null;
         return null;
     }
     }
 
 
+    /**
+     * Create a RandomIndexWriter that uses the LogDocMergePolicy.
+     *
+     * The latest lucene upgrade adds a new merge policy that reverses the order of the documents and it is not compatible with some
+     * aggregation types. This writer avoids randomization by hardcoding the merge policy to LogDocMergePolicy.
+     */
+    protected static RandomIndexWriter newRandomIndexWriterWithLogDocMergePolicy(Directory directory) throws IOException {
+        final IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(new LogDocMergePolicy());
+        return new RandomIndexWriter(random(), directory, conf);
+    }
+
     /**
     /**
      * Collects all documents that match the provided query {@link Query} and
      * Collects all documents that match the provided query {@link Query} and
      * returns the reduced {@link InternalAggregation}.
      * returns the reduced {@link InternalAggregation}.
@@ -713,6 +725,10 @@ public abstract class AggregatorTestCase extends ESTestCase {
         boolean timeSeries = aggTestConfig.builder().isInSortOrderExecutionRequired();
         boolean timeSeries = aggTestConfig.builder().isInSortOrderExecutionRequired();
         try (Directory directory = newDirectory()) {
         try (Directory directory = newDirectory()) {
             IndexWriterConfig config = LuceneTestCase.newIndexWriterConfig(random(), new MockAnalyzer(random()));
             IndexWriterConfig config = LuceneTestCase.newIndexWriterConfig(random(), new MockAnalyzer(random()));
+            if (aggTestConfig.useLogDocMergePolicy()) {
+                // Use LogDocMergePolicy to avoid randomization issues with the doc retrieval order for nested aggs.
+                config.setMergePolicy(new LogDocMergePolicy());
+            }
             if (timeSeries) {
             if (timeSeries) {
                 Sort sort = new Sort(
                 Sort sort = new Sort(
                     new SortField(TimeSeriesIdFieldMapper.NAME, SortField.Type.STRING, false),
                     new SortField(TimeSeriesIdFieldMapper.NAME, SortField.Type.STRING, false),
@@ -1524,11 +1540,13 @@ public abstract class AggregatorTestCase extends ESTestCase {
         boolean splitLeavesIntoSeparateAggregators,
         boolean splitLeavesIntoSeparateAggregators,
         boolean shouldBeCached,
         boolean shouldBeCached,
         boolean incrementalReduce,
         boolean incrementalReduce,
+
+        boolean useLogDocMergePolicy,
         MappedFieldType... fieldTypes
         MappedFieldType... fieldTypes
     ) {
     ) {
 
 
         public AggTestConfig(AggregationBuilder builder, MappedFieldType... fieldTypes) {
         public AggTestConfig(AggregationBuilder builder, MappedFieldType... fieldTypes) {
-            this(new MatchAllDocsQuery(), builder, DEFAULT_MAX_BUCKETS, randomBoolean(), true, randomBoolean(), fieldTypes);
+            this(new MatchAllDocsQuery(), builder, DEFAULT_MAX_BUCKETS, randomBoolean(), true, randomBoolean(), false, fieldTypes);
         }
         }
 
 
         public AggTestConfig withQuery(Query query) {
         public AggTestConfig withQuery(Query query) {
@@ -1539,6 +1557,7 @@ public abstract class AggregatorTestCase extends ESTestCase {
                 splitLeavesIntoSeparateAggregators,
                 splitLeavesIntoSeparateAggregators,
                 shouldBeCached,
                 shouldBeCached,
                 incrementalReduce,
                 incrementalReduce,
+                useLogDocMergePolicy,
                 fieldTypes
                 fieldTypes
             );
             );
         }
         }
@@ -1551,6 +1570,7 @@ public abstract class AggregatorTestCase extends ESTestCase {
                 splitLeavesIntoSeparateAggregators,
                 splitLeavesIntoSeparateAggregators,
                 shouldBeCached,
                 shouldBeCached,
                 incrementalReduce,
                 incrementalReduce,
+                useLogDocMergePolicy,
                 fieldTypes
                 fieldTypes
             );
             );
         }
         }
@@ -1563,6 +1583,7 @@ public abstract class AggregatorTestCase extends ESTestCase {
                 splitLeavesIntoSeparateAggregators,
                 splitLeavesIntoSeparateAggregators,
                 shouldBeCached,
                 shouldBeCached,
                 incrementalReduce,
                 incrementalReduce,
+                useLogDocMergePolicy,
                 fieldTypes
                 fieldTypes
             );
             );
         }
         }
@@ -1575,6 +1596,7 @@ public abstract class AggregatorTestCase extends ESTestCase {
                 splitLeavesIntoSeparateAggregators,
                 splitLeavesIntoSeparateAggregators,
                 shouldBeCached,
                 shouldBeCached,
                 incrementalReduce,
                 incrementalReduce,
+                useLogDocMergePolicy,
                 fieldTypes
                 fieldTypes
             );
             );
         }
         }
@@ -1587,6 +1609,20 @@ public abstract class AggregatorTestCase extends ESTestCase {
                 splitLeavesIntoSeparateAggregators,
                 splitLeavesIntoSeparateAggregators,
                 shouldBeCached,
                 shouldBeCached,
                 incrementalReduce,
                 incrementalReduce,
+                useLogDocMergePolicy,
+                fieldTypes
+            );
+        }
+
+        public AggTestConfig withLogDocMergePolicy() {
+            return new AggTestConfig(
+                query,
+                builder,
+                maxBuckets,
+                splitLeavesIntoSeparateAggregators,
+                shouldBeCached,
+                incrementalReduce,
+                true,
                 fieldTypes
                 fieldTypes
             );
             );
         }
         }