Browse Source

Add multi_terms aggs (#67597)

Adds a multi_terms aggregation support. The multi terms aggregation works
very similarly to the terms aggregation but supports multiple terms. The goal
of this PR is to add the basic functionality so it is not optimized at the
moment. It will be done in follow up PRs.

Closes #65623
Igor Motov 4 years ago
parent
commit
9e3384ebc9
34 changed files with 3593 additions and 56 deletions
  1. 2 0
      docs/reference/aggregations/bucket.asciidoc
  2. 425 0
      docs/reference/aggregations/bucket/multi-terms-aggregation.asciidoc
  3. 9 3
      docs/reference/aggregations/bucket/terms-aggregation.asciidoc
  4. 2 1
      docs/reference/rest-api/usage.asciidoc
  5. 4 0
      server/src/main/java/org/elasticsearch/search/DocValueFormat.java
  6. 25 2
      server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractInternalTerms.java
  7. 1 1
      server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java
  8. 2 17
      server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java
  9. 1 1
      server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java
  10. 1 1
      server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java
  11. 14 5
      server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregator.java
  12. 1 1
      server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java
  13. 2 2
      server/src/main/java/org/elasticsearch/search/aggregations/metrics/WeightedAvgAggregationBuilder.java
  14. 1 1
      server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java
  15. 82 6
      server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceFieldConfig.java
  16. 14 3
      server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceParseHelper.java
  17. 14 3
      server/src/test/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceFieldConfigTests.java
  18. 1 1
      test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java
  19. 9 1
      x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/AnalyticsPlugin.java
  20. 616 0
      x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/InternalMultiTerms.java
  21. 430 0
      x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregationBuilder.java
  22. 70 0
      x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregationFactory.java
  23. 495 0
      x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregator.java
  24. 1 1
      x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregationBuilder.java
  25. 2 2
      x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/ttest/TTestAggregationBuilder.java
  26. 1 0
      x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/action/AnalyticsStatsActionNodeResponseTests.java
  27. 332 0
      x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/multiterms/InternalMultiTermsTests.java
  28. 128 0
      x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregationBuilderTests.java
  29. 667 0
      x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregatorTests.java
  30. 2 1
      x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/analytics/action/AnalyticsStatsAction.java
  31. 2 2
      x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/GeoLineAggregationBuilder.java
  32. 211 0
      x-pack/plugin/src/test/resources/rest-api-spec/test/analytics/multi_terms.yml
  33. 24 0
      x-pack/plugin/src/test/resources/rest-api-spec/test/analytics/usage.yml
  34. 2 1
      x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/TransformAggregations.java

+ 2 - 0
docs/reference/aggregations/bucket.asciidoc

@@ -50,6 +50,8 @@ include::bucket/iprange-aggregation.asciidoc[]
 
 include::bucket/missing-aggregation.asciidoc[]
 
+include::bucket/multi-terms-aggregation.asciidoc[]
+
 include::bucket/nested-aggregation.asciidoc[]
 
 include::bucket/parent-aggregation.asciidoc[]

+ 425 - 0
docs/reference/aggregations/bucket/multi-terms-aggregation.asciidoc

@@ -0,0 +1,425 @@
+[role="xpack"]
+[testenv="basic"]
+[[search-aggregations-bucket-multi-terms-aggregation]]
+=== Multi Terms aggregation
+++++
+<titleabbrev>Multi Terms</titleabbrev>
+++++
+
+A multi-bucket value source based aggregation where buckets are dynamically built - one per unique set of values. The multi terms
+aggregation is very similar to the <<search-aggregations-bucket-terms-aggregation-order,`terms aggregation`>>, however in most cases
+it will be slower than the terms aggregation and will consume more memory. Therefore, if the same set of fields is constantly used,
+it would be more efficient to index a combined key for this fields as a separate field and use the terms aggregation on this field.
+
+//////////////////////////
+
+[source,js]
+--------------------------------------------------
+PUT /products
+{
+  "mappings": {
+    "properties": {
+      "genre": {
+        "type": "keyword"
+      },
+      "product": {
+        "type": "keyword"
+      },
+      "quantity": {
+        "type": "integer"
+      }
+    }
+  }
+}
+
+POST /products/_bulk?refresh
+{"index":{"_id":0}}
+{"genre": "rock", "product": "Product A", "quantity": 4}
+{"index":{"_id":1}}
+{"genre": "rock", "product": "Product A", "quantity": 5}
+{"index":{"_id":2}}
+{"genre": "rock", "product": "Product B", "quantity": 1}
+{"index":{"_id":3}}
+{"genre": "jazz", "product": "Product B", "quantity": 10}
+{"index":{"_id":4}}
+{"genre": "electronic", "product": "Product B", "quantity": 3}
+{"index":{"_id":5}}
+{"genre": "electronic"}
+
+-------------------------------------------------
+// NOTCONSOLE
+// TESTSETUP
+
+//////////////////////////
+
+Example:
+
+[source,console,id=multi-terms-aggregation-example]
+--------------------------------------------------
+GET /products/_search
+{
+  "aggs": {
+    "genres_and_products": {
+      "multi_terms": {
+        "terms": [{
+          "field": "genre" <1>
+        }, {
+          "field": "product"
+        }]
+      }
+    }
+  }
+}
+--------------------------------------------------
+// TEST[s/_search/_search\?filter_path=aggregations/]
+
+<1> `multi_terms` aggregation can work with the same field types as a
+<<search-aggregations-bucket-terms-aggregation-order,`terms aggregation`>> and supports most of the terms aggregation parameters.
+
+Response:
+
+[source,console-result]
+--------------------------------------------------
+{
+  ...
+  "aggregations" : {
+    "genres_and_products" : {
+      "doc_count_error_upper_bound" : 0,  <1>
+      "sum_other_doc_count" : 0,          <2>
+      "buckets" : [                       <3>
+        {
+          "key" : [                       <4>
+            "rock",
+            "Product A"
+          ],
+          "key_as_string" : "rock|Product A",
+          "doc_count" : 2
+        },
+        {
+          "key" : [
+            "electronic",
+            "Product B"
+          ],
+          "key_as_string" : "electronic|Product B",
+          "doc_count" : 1
+        },
+        {
+          "key" : [
+            "jazz",
+            "Product B"
+          ],
+          "key_as_string" : "jazz|Product B",
+          "doc_count" : 1
+        },
+        {
+          "key" : [
+            "rock",
+            "Product B"
+          ],
+          "key_as_string" : "rock|Product B",
+          "doc_count" : 1
+        }
+      ]
+    }
+  }
+}
+--------------------------------------------------
+// TESTRESPONSE[s/\.\.\.//]
+
+<1> an upper bound of the error on the document counts for each term, see <<search-aggregations-bucket-multi-terms-aggregation-approximate-counts,below>
+<2> when there are lots of unique terms, Elasticsearch only returns the top terms; this number is the sum of the document counts for all buckets that are not part of the response
+<3> the list of the top buckets.
+<4> the keys are arrays of values ordered the same ways as expression in the `terms` parameter of the aggregation
+
+By default, the `multi_terms` aggregation will return the buckets for the top ten terms ordered by the `doc_count`. One can
+change this default behaviour by setting the `size` parameter.
+
+[[search-aggregations-bucket-multi-terms-aggregation-parameters]]
+==== Aggregation Parameters
+
+The following parameters are supported. See <<search-aggregations-bucket-terms-aggregation-order,`terms aggregation`>> for more detailed
+explanation of these parameters.
+
+[horizontal]
+size::                        Optional. Defines how many term buckets should be returned out of the overall terms list. Defaults to 10.
+
+shard_size::                  Optional. The higher the requested `size` is, the more accurate the results will be, but also, the more
+                              expensive it will be to compute the final results. The default `shard_size` is `(size * 1.5 + 10)`.
+
+show_term_doc_count_error::   Optional. Calculates the doc count error on per term basis. Defaults to `false`
+
+order::                       Optional. Specifies the order of the buckets. Defaults to the number of documents per bucket. The bucket terms
+                              value is used as a tiebreaker for buckets with the same document count.
+
+min_doc_count::               Optional. The minimal number of documents in a bucket for it to be returned. Defaults to 1.
+
+shard_min_doc_count::         Optional. The minimal number of documents in a bucket on each shard for it to be returned. Defaults to
+                              `min_doc_count`.
+
+collect_mode::                Optional. Specifies the strategy for data collection. The `depth_first` or `breadth_first` modes are
+                              supported. Defaults to `breadth_first`.
+
+
+[[search-aggregations-bucket-multi-terms-aggregation-script]]
+==== Script
+
+Generating the terms using a script:
+
+[source,console,id=multi-terms-aggregation-script-example]
+--------------------------------------------------
+GET /products/_search
+{
+  "aggs": {
+    "genres_and_products": {
+      "multi_terms": {
+        "terms": [
+          {
+            "script": {
+              "source": "doc['genre'].value.length()",
+              "lang": "painless"
+            }
+          },
+          {
+            "field": "product"
+          }
+        ]
+      }
+    }
+  }
+}
+--------------------------------------------------
+// TEST[s/_search/_search\?filter_path=aggregations/]
+
+Response:
+
+[source,console-result]
+--------------------------------------------------
+{
+  ...
+  "aggregations" : {
+    "genres_and_products" : {
+      "doc_count_error_upper_bound" : 0,
+      "sum_other_doc_count" : 0,
+      "buckets" : [
+        {
+          "key" : [
+            "4",
+            "Product A"
+          ],
+          "key_as_string" : "4|Product A",
+          "doc_count" : 2
+        },
+        {
+          "key" : [
+            "4",
+            "Product B"
+          ],
+          "key_as_string" : "4|Product B",
+          "doc_count" : 2
+        },
+        {
+          "key" : [
+            "10",
+            "Product B"
+          ],
+          "key_as_string" : "10|Product B",
+          "doc_count" : 1
+        }
+      ]
+    }
+  }
+}
+--------------------------------------------------
+// TESTRESPONSE[s/\.\.\.//]
+
+==== Missing value
+
+The `missing` parameter defines how documents that are missing a value should be treated.
+By default if any of the key components are missing the entire document will be ignored
+but it is also possible to treat them as if they had a value by using the `missing` parameter.
+
+[source,console,id=multi-terms-aggregation-missing-example]
+--------------------------------------------------
+GET /products/_search
+{
+  "aggs": {
+    "genres_and_products": {
+      "multi_terms": {
+        "terms": [
+          {
+            "field": "genre"
+          },
+          {
+            "field": "product",
+            "missing": "Product Z"
+          }
+        ]
+      }
+    }
+  }
+}
+--------------------------------------------------
+// TEST[s/_search/_search\?filter_path=aggregations/]
+
+Response:
+
+[source,console-result]
+--------------------------------------------------
+{
+   ...
+   "aggregations" : {
+    "genres_and_products" : {
+      "doc_count_error_upper_bound" : 0,
+      "sum_other_doc_count" : 0,
+      "buckets" : [
+        {
+          "key" : [
+            "rock",
+            "Product A"
+          ],
+          "key_as_string" : "rock|Product A",
+          "doc_count" : 2
+        },
+        {
+          "key" : [
+            "electronic",
+            "Product B"
+          ],
+          "key_as_string" : "electronic|Product B",
+          "doc_count" : 1
+        },
+        {
+          "key" : [
+            "electronic",
+            "Product Z"
+          ],
+          "key_as_string" : "electronic|Product Z",  <1>
+          "doc_count" : 1
+        },
+        {
+          "key" : [
+            "jazz",
+            "Product B"
+          ],
+          "key_as_string" : "jazz|Product B",
+          "doc_count" : 1
+        },
+        {
+          "key" : [
+            "rock",
+            "Product B"
+          ],
+          "key_as_string" : "rock|Product B",
+          "doc_count" : 1
+        }
+      ]
+    }
+  }
+}
+--------------------------------------------------
+// TESTRESPONSE[s/\.\.\.//]
+
+<1> Documents without a value in the `product` field will fall into the same bucket as documents that have the value `Product Z`.
+
+==== Mixing field types
+
+WARNING: When aggregating on multiple indices the type of the aggregated field may not be the same in all indices.
+Some types are compatible with each other (`integer` and `long` or `float` and `double`) but when the types are a mix
+of decimal and non-decimal number the terms aggregation will promote the non-decimal numbers to decimal numbers.
+This can result in a loss of precision in the bucket values.
+
+==== Sub aggregation and sorting examples
+
+As most bucket aggregations the `multi_term` supports sub aggregations and ordering the buckets by metrics sub-aggregation:
+
+[source,console,id=multi-terms-aggregation-subaggregation-example]
+--------------------------------------------------
+GET /products/_search
+{
+  "aggs": {
+    "genres_and_products": {
+      "multi_terms": {
+        "terms": [
+          {
+            "field": "genre"
+          },
+          {
+            "field": "product"
+          }
+        ],
+        "order": {
+          "total_quantity": "desc"
+        }
+      },
+      "aggs": {
+        "total_quantity": {
+          "sum": {
+            "field": "quantity"
+          }
+        }
+      }
+    }
+  }
+}
+--------------------------------------------------
+// TEST[s/_search/_search\?filter_path=aggregations/]
+
+[source,console-result]
+--------------------------------------------------
+{
+  ...
+  "aggregations" : {
+    "genres_and_products" : {
+      "doc_count_error_upper_bound" : 0,
+      "sum_other_doc_count" : 0,
+      "buckets" : [
+        {
+          "key" : [
+            "jazz",
+            "Product B"
+          ],
+          "key_as_string" : "jazz|Product B",
+          "doc_count" : 1,
+          "total_quantity" : {
+            "value" : 10.0
+          }
+        },
+        {
+          "key" : [
+            "rock",
+            "Product A"
+          ],
+          "key_as_string" : "rock|Product A",
+          "doc_count" : 2,
+          "total_quantity" : {
+            "value" : 9.0
+          }
+        },
+        {
+          "key" : [
+            "electronic",
+            "Product B"
+          ],
+          "key_as_string" : "electronic|Product B",
+          "doc_count" : 1,
+          "total_quantity" : {
+            "value" : 3.0
+          }
+        },
+        {
+          "key" : [
+            "rock",
+            "Product B"
+          ],
+          "key_as_string" : "rock|Product B",
+          "doc_count" : 1,
+          "total_quantity" : {
+            "value" : 1.0
+          }
+        }
+      ]
+    }
+  }
+}
+--------------------------------------------------
+// TESTRESPONSE[s/\.\.\.//]

+ 9 - 3
docs/reference/aggregations/bucket/terms-aggregation.asciidoc

@@ -346,10 +346,10 @@ GET /_search
         "order": [ { "rock>playback_stats.avg": "desc" }, { "_count": "desc" } ]
       },
       "aggs": {
-        "rock": { 
+        "rock": {
           "filter": { "term": { "genre": "rock" } },
           "aggs": {
-            "playback_stats": { "stats": { "field": "play_count" } } 
+            "playback_stats": { "stats": { "field": "play_count" } }
           }
         }
       }
@@ -614,7 +614,7 @@ to produce a list of all of the unique values in the field.  Global ordinals
 results in an important performance boost which would not be possible across
 multiple fields.
 
-There are two approaches that you can use to perform a `terms` agg across
+There are three approaches that you can use to perform a `terms` agg across
 multiple fields:
 
 <<search-aggregations-bucket-terms-aggregation-script,Script>>::
@@ -631,6 +631,12 @@ fields, then use `copy_to` in your mapping to create a new dedicated field at
 index time which contains the values from both fields.  You can aggregate on
 this single field, which will benefit from the global ordinals optimization.
 
+<<search-aggregations-bucket-multi-terms-aggregation, `multi_terms` aggregation>>::
+
+Use multi_terms aggregation to combine terms from multiple fields into a compound key. This
+also disables the global ordinals and will be slower than collecting terms from a single field.
+It is faster but less flexible than using a script.
+
 [[search-aggregations-bucket-terms-aggregation-collect]]
 ==== Collect mode
 

+ 2 - 1
docs/reference/rest-api/usage.asciidoc

@@ -288,7 +288,8 @@ GET /_xpack/usage
       "t_test_usage" : 0,
       "rate_usage" : 0,
       "string_stats_usage" : 0,
-      "moving_percentiles_usage" : 0
+      "moving_percentiles_usage" : 0,
+      "multi_terms_usage" : 0
     }
   },
   "data_streams" : {

+ 4 - 0
server/src/main/java/org/elasticsearch/search/DocValueFormat.java

@@ -464,6 +464,10 @@ public interface DocValueFormat extends NamedWriteable {
         public int hashCode() {
             return Objects.hash(pattern);
         }
+
+        @Override public String toString() {
+            return pattern;
+        }
     };
 
     /**

+ 25 - 2
server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractInternalTerms.java

@@ -11,6 +11,7 @@ package org.elasticsearch.search.aggregations.bucket.terms;
 import org.apache.lucene.util.PriorityQueue;
 import org.elasticsearch.Version;
 import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.xcontent.XContentBuilder;
 import org.elasticsearch.search.aggregations.AggregationExecutionException;
 import org.elasticsearch.search.aggregations.BucketOrder;
 import org.elasticsearch.search.aggregations.InternalAggregation;
@@ -29,6 +30,8 @@ import java.util.Map;
 
 import static org.elasticsearch.search.aggregations.InternalOrder.isKeyAsc;
 import static org.elasticsearch.search.aggregations.InternalOrder.isKeyOrder;
+import static org.elasticsearch.search.aggregations.bucket.terms.InternalTerms.DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME;
+import static org.elasticsearch.search.aggregations.bucket.terms.InternalTerms.SUM_OF_OTHER_DOC_COUNTS;
 
 /**
  * Base class for terms and multi_terms aggregation that handles common reduce logic
@@ -85,7 +88,7 @@ public abstract class AbstractInternalTerms<
 
     protected abstract int getRequiredSize();
 
-    abstract B createBucket(long docCount, InternalAggregations aggs, long docCountError, B prototype);
+    protected abstract B createBucket(long docCount, InternalAggregations aggs, long docCountError, B prototype);
 
     @Override
     public B reduceBucket(List<B> buckets, ReduceContext context) {
@@ -182,7 +185,12 @@ public abstract class AbstractInternalTerms<
             currentBuckets.add(top.current());
             if (top.hasNext()) {
                 top.next();
-                assert cmp.compare(top.current(), lastBucket) > 0 : "shards must return data sorted by key";
+                /*
+                 * Typically the bucket keys are strictly increasing, but when we merge aggs from two different indices
+                 * we can promote long and unsigned long keys to double, which can cause 2 long keys to be promoted into
+                 * the same double key.
+                 */
+                assert cmp.compare(top.current(), lastBucket) >= 0 : "shards must return data sorted by key";
                 pq.updateTop();
             } else {
                 pq.pop();
@@ -322,4 +330,19 @@ public abstract class AbstractInternalTerms<
             otherDocCount);
     }
 
+    protected static XContentBuilder doXContentCommon(XContentBuilder builder,
+                                                      Params params,
+                                                      long docCountError,
+                                                      long otherDocCount,
+                                                      List<? extends AbstractTermsBucket> buckets) throws IOException {
+        builder.field(DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME.getPreferredName(), docCountError);
+        builder.field(SUM_OF_OTHER_DOC_COUNTS.getPreferredName(), otherDocCount);
+        builder.startArray(CommonFields.BUCKETS.getPreferredName());
+        for (AbstractTermsBucket bucket : buckets) {
+            bucket.toXContent(builder, params);
+        }
+        builder.endArray();
+        return builder;
+    }
+
 }

+ 1 - 1
server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java

@@ -161,7 +161,7 @@ public class DoubleTerms extends InternalMappedTerms<DoubleTerms, DoubleTerms.Bu
     }
 
     @Override
-    Bucket createBucket(long docCount, InternalAggregations aggs, long docCountError, DoubleTerms.Bucket prototype) {
+    protected Bucket createBucket(long docCount, InternalAggregations aggs, long docCountError, DoubleTerms.Bucket prototype) {
         return new Bucket(prototype.term, docCount, aggs, prototype.showDocCountError, docCountError, format);
     }
 }

+ 2 - 17
server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java

@@ -28,8 +28,8 @@ public abstract class InternalTerms<A extends InternalTerms<A, B>, B extends Int
     extends AbstractInternalTerms<A, B> implements Terms {
 
 
-    protected static final ParseField DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME = new ParseField("doc_count_error_upper_bound");
-    protected static final ParseField SUM_OF_OTHER_DOC_COUNTS = new ParseField("sum_other_doc_count");
+    public static final ParseField DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME = new ParseField("doc_count_error_upper_bound");
+    public static final ParseField SUM_OF_OTHER_DOC_COUNTS = new ParseField("sum_other_doc_count");
 
     public abstract static class Bucket<B extends Bucket<B>> extends AbstractTermsBucket
         implements Terms.Bucket, KeyComparable<B> {
@@ -256,19 +256,4 @@ public abstract class InternalTerms<A extends InternalTerms<A, B>, B extends Int
     public int hashCode() {
         return Objects.hash(super.hashCode(), minDocCount, reduceOrder, order, requiredSize);
     }
-
-    protected static XContentBuilder doXContentCommon(XContentBuilder builder,
-                                                      Params params,
-                                                      long docCountError,
-                                                      long otherDocCount,
-                                                      List<? extends Bucket<?>> buckets) throws IOException {
-        builder.field(DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME.getPreferredName(), docCountError);
-        builder.field(SUM_OF_OTHER_DOC_COUNTS.getPreferredName(), otherDocCount);
-        builder.startArray(CommonFields.BUCKETS.getPreferredName());
-        for (Bucket<?> bucket : buckets) {
-            bucket.toXContent(builder, params);
-        }
-        builder.endArray();
-        return builder;
-    }
 }

+ 1 - 1
server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java

@@ -175,7 +175,7 @@ public class LongTerms extends InternalMappedTerms<LongTerms, LongTerms.Bucket>
     }
 
     @Override
-    Bucket createBucket(long docCount, InternalAggregations aggs, long docCountError, LongTerms.Bucket prototype) {
+    protected Bucket createBucket(long docCount, InternalAggregations aggs, long docCountError, LongTerms.Bucket prototype) {
         return new Bucket(prototype.term, docCount, aggs, prototype.showDocCountError, docCountError, format);
     }
 

+ 1 - 1
server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java

@@ -124,7 +124,7 @@ public class StringTerms extends InternalMappedTerms<StringTerms, StringTerms.Bu
     }
 
     @Override
-    Bucket createBucket(long docCount, InternalAggregations aggs, long docCountError, StringTerms.Bucket prototype) {
+    protected Bucket createBucket(long docCount, InternalAggregations aggs, long docCountError, StringTerms.Bucket prototype) {
         return new Bucket(prototype.termBytes, docCount, aggs, prototype.showDocCountError, docCountError, format);
     }
 

+ 14 - 5
server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregator.java

@@ -162,7 +162,7 @@ public abstract class TermsAggregator extends DeferableBucketAggregator {
     protected final BucketCountThresholds bucketCountThresholds;
     protected final BucketOrder order;
     protected final Comparator<InternalTerms.Bucket<?>> partiallyBuiltBucketComparator;
-    protected final Set<Aggregator> aggsUsedForSorting = new HashSet<>();
+    protected final Set<Aggregator> aggsUsedForSorting;
     protected final SubAggCollectionMode collectMode;
 
     public TermsAggregator(String name, AggregatorFactories factories, AggregationContext context, Aggregator parent,
@@ -183,22 +183,31 @@ public abstract class TermsAggregator extends DeferableBucketAggregator {
         } else {
             this.collectMode = collectMode;
         }
+        aggsUsedForSorting = aggsUsedForSorting(this, order);
+    }
+
+    /**
+     * Walks through bucket order and extracts all aggregations used for sorting
+     */
+    public static Set<Aggregator> aggsUsedForSorting(Aggregator root, BucketOrder order) {
+        Set<Aggregator> aggsUsedForSorting = new HashSet<>();
         // Don't defer any child agg if we are dependent on it for pruning results
-        if (order instanceof Aggregation){
+        if (order instanceof Aggregation) {
             AggregationPath path = ((Aggregation) order).path();
-            aggsUsedForSorting.add(path.resolveTopmostAggregator(this));
+            aggsUsedForSorting.add(path.resolveTopmostAggregator(root));
         } else if (order instanceof CompoundOrder) {
             CompoundOrder compoundOrder = (CompoundOrder) order;
             for (BucketOrder orderElement : compoundOrder.orderElements()) {
                 if (orderElement instanceof Aggregation) {
                     AggregationPath path = ((Aggregation) orderElement).path();
-                    aggsUsedForSorting.add(path.resolveTopmostAggregator(this));
+                    aggsUsedForSorting.add(path.resolveTopmostAggregator(root));
                 }
             }
         }
+        return aggsUsedForSorting;
     }
 
-    static boolean descendsFromNestedAggregator(Aggregator parent) {
+    public static boolean descendsFromNestedAggregator(Aggregator parent) {
         while (parent != null) {
             if (parent.getClass() == NestedAggregator.class) {
                 return true;

+ 1 - 1
server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java

@@ -76,7 +76,7 @@ public class UnmappedTerms extends InternalTerms<UnmappedTerms, UnmappedTerms.Bu
     }
 
     @Override
-    Bucket createBucket(long docCount, InternalAggregations aggs, long docCountError, Bucket prototype) {
+    protected Bucket createBucket(long docCount, InternalAggregations aggs, long docCountError, Bucket prototype) {
         throw new UnsupportedOperationException("not supported for UnmappedTerms");
     }
 

+ 2 - 2
server/src/main/java/org/elasticsearch/search/aggregations/metrics/WeightedAvgAggregationBuilder.java

@@ -43,8 +43,8 @@ public class WeightedAvgAggregationBuilder extends MultiValuesSourceAggregationB
             ObjectParser.fromBuilder(NAME, WeightedAvgAggregationBuilder::new);
     static {
         MultiValuesSourceParseHelper.declareCommon(PARSER, true, ValueType.NUMERIC);
-        MultiValuesSourceParseHelper.declareField(VALUE_FIELD.getPreferredName(), PARSER, true, false, false);
-        MultiValuesSourceParseHelper.declareField(WEIGHT_FIELD.getPreferredName(), PARSER, true, false, false);
+        MultiValuesSourceParseHelper.declareField(VALUE_FIELD.getPreferredName(), PARSER, true, false, false, false);
+        MultiValuesSourceParseHelper.declareField(WEIGHT_FIELD.getPreferredName(), PARSER, true, false, false, false);
     }
 
     public static void registerUsage(ValuesSourceRegistry.Builder builder) {

+ 1 - 1
server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java

@@ -171,7 +171,7 @@ public abstract class MultiValuesSourceAggregationBuilder<AB extends MultiValues
     }
 
 
-    private static DocValueFormat resolveFormat(@Nullable String format, @Nullable ValueType valueType,
+    public static DocValueFormat resolveFormat(@Nullable String format, @Nullable ValueType valueType,
                                                 ValuesSourceType defaultValuesSourceType) {
         if (valueType == null) {
             // If the user didn't send a hint, all we can do is fall back to the default

+ 82 - 6
server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceFieldConfig.java

@@ -21,6 +21,7 @@ import org.elasticsearch.common.xcontent.XContentParser;
 import org.elasticsearch.index.query.AbstractQueryBuilder;
 import org.elasticsearch.index.query.QueryBuilder;
 import org.elasticsearch.script.Script;
+import org.elasticsearch.search.aggregations.AggregationBuilder;
 
 import java.io.IOException;
 import java.time.ZoneId;
@@ -31,15 +32,31 @@ public class MultiValuesSourceFieldConfig implements Writeable, ToXContentObject
     private final String fieldName;
     private final Object missing;
     private final Script script;
+    // supported only if timezoneAware == true
     private final ZoneId timeZone;
+    // supported only if filtered == true
     private final QueryBuilder filter;
+    // supported only if heterogeneous == true
+    private final ValueType userValueTypeHint;
+    private final String format;
 
     private static final String NAME = "field_config";
 
     public static final ParseField FILTER = new ParseField("filter");
 
-    public static <C> ObjectParser<MultiValuesSourceFieldConfig.Builder, C> parserBuilder(boolean scriptable, boolean timezoneAware,
-                                                                                          boolean filtered) {
+    /**
+     * Creates a parser capable of parsing value sources in different context
+     * @param scriptable - allows specifying script in addition to a field as a values source
+     * @param timezoneAware - allows specifying timezone
+     * @param filtered - allows specifying filters on the values
+     * @param heterogeneous - allows specifying value-source specific format and user value type hint
+     * @param <C> - parser context
+     * @return configured parser
+     */
+    public static <C> ObjectParser<MultiValuesSourceFieldConfig.Builder, C> parserBuilder(boolean scriptable,
+                                                                                                    boolean timezoneAware,
+                                                                                                    boolean filtered,
+                                                                                                    boolean heterogeneous) {
 
         ObjectParser<MultiValuesSourceFieldConfig.Builder, C> parser
             = new ObjectParser<>(MultiValuesSourceFieldConfig.NAME, MultiValuesSourceFieldConfig.Builder::new);
@@ -69,15 +86,26 @@ public class MultiValuesSourceFieldConfig implements Writeable, ToXContentObject
                 (p, context) -> AbstractQueryBuilder.parseInnerQueryBuilder(p),
                 FILTER, ObjectParser.ValueType.OBJECT);
         }
+
+        if (heterogeneous) {
+            parser.declareField(MultiValuesSourceFieldConfig.Builder::setUserValueTypeHint,
+                p -> ValueType.lenientParse(p.text()), ValueType.VALUE_TYPE, ObjectParser.ValueType.STRING);
+
+            parser.declareField(MultiValuesSourceFieldConfig.Builder::setFormat, XContentParser::text,
+                ParseField.CommonFields.FORMAT, ObjectParser.ValueType.STRING);
+        }
         return parser;
     };
 
-    protected MultiValuesSourceFieldConfig(String fieldName, Object missing, Script script, ZoneId timeZone, QueryBuilder filter) {
+    protected MultiValuesSourceFieldConfig(String fieldName, Object missing, Script script, ZoneId timeZone, QueryBuilder filter,
+                                           ValueType userValueTypeHint, String format) {
         this.fieldName = fieldName;
         this.missing = missing;
         this.script = script;
         this.timeZone = timeZone;
         this.filter = filter;
+        this.userValueTypeHint = userValueTypeHint;
+        this.format = format;
     }
 
     public MultiValuesSourceFieldConfig(StreamInput in) throws IOException {
@@ -94,6 +122,13 @@ public class MultiValuesSourceFieldConfig implements Writeable, ToXContentObject
         } else {
             this.filter = null;
         }
+        if (in.getVersion().onOrAfter(Version.V_8_0_0)) {
+            this.userValueTypeHint = in.readOptionalWriteable(ValueType::readFromStream);
+            this.format = in.readOptionalString();
+        } else {
+            this.userValueTypeHint = null;
+            this.format = null;
+        }
     }
 
     public Object getMissing() {
@@ -116,6 +151,15 @@ public class MultiValuesSourceFieldConfig implements Writeable, ToXContentObject
         return filter;
     }
 
+    public ValueType getUserValueTypeHint() {
+        return userValueTypeHint;
+    }
+
+    public String getFormat() {
+        return format;
+    }
+
+
     @Override
     public void writeTo(StreamOutput out) throws IOException {
         if (out.getVersion().onOrAfter(Version.V_7_6_0)) {
@@ -129,6 +173,10 @@ public class MultiValuesSourceFieldConfig implements Writeable, ToXContentObject
         if (out.getVersion().onOrAfter(Version.V_7_8_0)) {
             out.writeOptionalNamedWriteable(filter);
         }
+        if (out.getVersion().onOrAfter(Version.V_8_0_0)) {
+            out.writeOptionalWriteable(userValueTypeHint);
+            out.writeOptionalString(format);
+        }
     }
 
     @Override
@@ -150,6 +198,12 @@ public class MultiValuesSourceFieldConfig implements Writeable, ToXContentObject
             builder.field(FILTER.getPreferredName());
             filter.toXContent(builder, params);
         }
+        if(userValueTypeHint != null) {
+            builder.field(AggregationBuilder.CommonFields.VALUE_TYPE.getPreferredName(), userValueTypeHint.getPreferredName());
+        }
+        if (format != null) {
+            builder.field(AggregationBuilder.CommonFields.FORMAT.getPreferredName(), format);
+        }
         builder.endObject();
         return builder;
     }
@@ -163,12 +217,14 @@ public class MultiValuesSourceFieldConfig implements Writeable, ToXContentObject
             && Objects.equals(missing, that.missing)
             && Objects.equals(script, that.script)
             && Objects.equals(timeZone, that.timeZone)
-            && Objects.equals(filter, that.filter);
+            && Objects.equals(filter, that.filter)
+            && Objects.equals(userValueTypeHint, that.userValueTypeHint)
+            && Objects.equals(format, that.format);
     }
 
     @Override
     public int hashCode() {
-        return Objects.hash(fieldName, missing, script, timeZone, filter);
+        return Objects.hash(fieldName, missing, script, timeZone, filter, userValueTypeHint, format);
     }
 
     @Override
@@ -182,6 +238,8 @@ public class MultiValuesSourceFieldConfig implements Writeable, ToXContentObject
         private Script script = null;
         private ZoneId timeZone = null;
         private QueryBuilder filter = null;
+        private ValueType userValueTypeHint = null;
+        private String format = null;
 
         public String getFieldName() {
             return fieldName;
@@ -224,6 +282,24 @@ public class MultiValuesSourceFieldConfig implements Writeable, ToXContentObject
             return this;
         }
 
+        public Builder setUserValueTypeHint(ValueType userValueTypeHint) {
+            this.userValueTypeHint = userValueTypeHint;
+            return this;
+        }
+
+        public ValueType getUserValueTypeHint() {
+            return userValueTypeHint;
+        }
+
+        public Builder setFormat(String format) {
+            this.format = format;
+            return this;
+        }
+
+        public String getFormat() {
+            return format;
+        }
+
         public MultiValuesSourceFieldConfig build() {
             if (Strings.isNullOrEmpty(fieldName) && script == null) {
                 throw new IllegalArgumentException("[" +  ParseField.CommonFields.FIELD.getPreferredName()
@@ -237,7 +313,7 @@ public class MultiValuesSourceFieldConfig implements Writeable, ToXContentObject
                     "Please specify one or the other.");
             }
 
-            return new MultiValuesSourceFieldConfig(fieldName, missing, script, timeZone, filter);
+            return new MultiValuesSourceFieldConfig(fieldName, missing, script, timeZone, filter, userValueTypeHint, format);
         }
     }
 }

+ 14 - 3
server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceParseHelper.java

@@ -37,12 +37,23 @@ public final class MultiValuesSourceParseHelper {
         }
     }
 
-    public static <VS extends ValuesSource, T> void declareField(String fieldName,
+    /**
+     * Declares a field that contains information about a values source
+     *
+     * @param scriptable - allows specifying script in addition to a field as a values source
+     * @param timezoneAware - allows specifying timezone
+     * @param filterable - allows specifying filters on the values
+     * @param heterogeneous - allows specifying value-source specific format and user value type hint
+     * @param <VS> - values source type
+     * @param <T> - parser context
+     */
+    public static <VS extends ValuesSource, T> void declareField(
+        String fieldName,
         AbstractObjectParser<? extends MultiValuesSourceAggregationBuilder<?>, T> objectParser,
-        boolean scriptable, boolean timezoneAware, boolean filterable) {
+        boolean scriptable, boolean timezoneAware, boolean filterable, boolean heterogeneous) {
 
         objectParser.declareField((o, fieldConfig) -> o.field(fieldName, fieldConfig.build()),
-            (p, c) -> MultiValuesSourceFieldConfig.parserBuilder(scriptable, timezoneAware, filterable).parse(p, null),
+            (p, c) -> MultiValuesSourceFieldConfig.parserBuilder(scriptable, timezoneAware, filterable, heterogeneous).parse(p, null),
             new ParseField(fieldName), ObjectParser.ValueType.OBJECT);
     }
 }

+ 14 - 3
server/src/test/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceFieldConfigTests.java

@@ -23,13 +23,14 @@ import java.io.IOException;
 import java.time.ZoneId;
 import java.util.Collections;
 
+import static org.elasticsearch.test.InternalAggregationTestCase.randomNumericDocValueFormat;
 import static org.hamcrest.Matchers.equalTo;
 
 public class MultiValuesSourceFieldConfigTests extends AbstractSerializingTestCase<MultiValuesSourceFieldConfig> {
 
     @Override
     protected MultiValuesSourceFieldConfig doParseInstance(XContentParser parser) throws IOException {
-        return MultiValuesSourceFieldConfig.parserBuilder(true, true, true).apply(parser, null).build();
+        return MultiValuesSourceFieldConfig.parserBuilder(true, true, true, true).apply(parser, null).build();
     }
 
     @Override
@@ -38,8 +39,18 @@ public class MultiValuesSourceFieldConfigTests extends AbstractSerializingTestCa
         Object missing = randomBoolean() ? randomAlphaOfLength(10) : null;
         ZoneId timeZone = randomBoolean() ? randomZone() : null;
         QueryBuilder filter = randomBoolean() ? QueryBuilders.termQuery(randomAlphaOfLength(10), randomAlphaOfLength(10)) : null;
-        return new MultiValuesSourceFieldConfig.Builder()
-            .setFieldName(field).setMissing(missing).setScript(null).setTimeZone(timeZone).setFilter(filter).build();
+        String format = randomBoolean() ? randomNumericDocValueFormat().toString() : null;
+        ValueType userValueTypeHint = randomBoolean()
+            ? randomFrom(ValueType.STRING, ValueType.DOUBLE, ValueType.LONG, ValueType.DATE, ValueType.IP, ValueType.BOOLEAN)
+            : null;
+        return new MultiValuesSourceFieldConfig.Builder().setFieldName(field)
+            .setMissing(missing)
+            .setScript(null)
+            .setTimeZone(timeZone)
+            .setFilter(filter)
+            .setFormat(format)
+            .setUserValueTypeHint(userValueTypeHint)
+            .build();
     }
 
     @Override

+ 1 - 1
test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java

@@ -528,7 +528,7 @@ public abstract class InternalAggregationTestCase<T extends InternalAggregation>
      * @return a random {@link DocValueFormat} that can be used in aggregations which
      * compute numbers.
      */
-    protected static DocValueFormat randomNumericDocValueFormat() {
+    public static DocValueFormat randomNumericDocValueFormat() {
         final List<Supplier<DocValueFormat>> formats = new ArrayList<>(3);
         formats.add(() -> DocValueFormat.RAW);
         formats.add(() -> new DocValueFormat.Decimal(randomFrom("###.##", "###,###.##")));

+ 9 - 1
x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/AnalyticsPlugin.java

@@ -35,6 +35,8 @@ import org.elasticsearch.xpack.analytics.boxplot.InternalBoxplot;
 import org.elasticsearch.xpack.analytics.cumulativecardinality.CumulativeCardinalityPipelineAggregationBuilder;
 import org.elasticsearch.xpack.analytics.mapper.HistogramFieldMapper;
 import org.elasticsearch.xpack.analytics.movingPercentiles.MovingPercentilesPipelineAggregationBuilder;
+import org.elasticsearch.xpack.analytics.multiterms.InternalMultiTerms;
+import org.elasticsearch.xpack.analytics.multiterms.MultiTermsAggregationBuilder;
 import org.elasticsearch.xpack.analytics.normalize.NormalizePipelineAggregationBuilder;
 import org.elasticsearch.xpack.analytics.rate.InternalRate;
 import org.elasticsearch.xpack.analytics.rate.RateAggregationBuilder;
@@ -121,7 +123,13 @@ public class AnalyticsPlugin extends Plugin implements SearchPlugin, ActionPlugi
                 RateAggregationBuilder::new,
                 usage.track(AnalyticsStatsAction.Item.RATE, RateAggregationBuilder.PARSER))
                 .addResultReader(InternalRate::new)
-                .setAggregatorRegistrar(RateAggregationBuilder::registerAggregators)
+                .setAggregatorRegistrar(RateAggregationBuilder::registerAggregators),
+            new AggregationSpec(
+                MultiTermsAggregationBuilder.NAME,
+                MultiTermsAggregationBuilder::new,
+                usage.track(AnalyticsStatsAction.Item.MULTI_TERMS, MultiTermsAggregationBuilder.PARSER))
+                .addResultReader(InternalMultiTerms::new)
+                .setAggregatorRegistrar(MultiTermsAggregationBuilder::registerAggregators)
         );
     }
 

+ 616 - 0
x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/InternalMultiTerms.java

@@ -0,0 +1,616 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+package org.elasticsearch.xpack.analytics.multiterms;
+
+import static org.elasticsearch.search.aggregations.bucket.terms.InternalTerms.DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.DocValueFormat;
+import org.elasticsearch.search.aggregations.AggregationExecutionException;
+import org.elasticsearch.search.aggregations.Aggregations;
+import org.elasticsearch.search.aggregations.BucketOrder;
+import org.elasticsearch.search.aggregations.InternalAggregation;
+import org.elasticsearch.search.aggregations.InternalAggregations;
+import org.elasticsearch.search.aggregations.InternalOrder;
+import org.elasticsearch.search.aggregations.KeyComparable;
+import org.elasticsearch.search.aggregations.bucket.terms.AbstractInternalTerms;
+
+public class InternalMultiTerms extends AbstractInternalTerms<InternalMultiTerms, InternalMultiTerms.Bucket> {
+
+    public static TermsComparator TERMS_COMPARATOR = new TermsComparator();
+
+    public static class Bucket extends AbstractInternalTerms.AbstractTermsBucket implements KeyComparable<Bucket> {
+
+        long bucketOrd;
+
+        protected long docCount;
+        protected InternalAggregations aggregations;
+        protected final boolean showDocCountError;
+        protected long docCountError;
+        protected final List<DocValueFormat> formats;
+        protected List<Object> terms;
+        protected List<KeyConverter> keyConverters;
+
+        public Bucket(
+            List<Object> terms,
+            long docCount,
+            InternalAggregations aggregations,
+            boolean showDocCountError,
+            long docCountError,
+            List<DocValueFormat> formats,
+            List<KeyConverter> keyConverters
+        ) {
+            this.terms = terms;
+            this.docCount = docCount;
+            this.aggregations = aggregations;
+            this.showDocCountError = showDocCountError;
+            this.docCountError = docCountError;
+            this.formats = formats;
+            this.keyConverters = keyConverters;
+        }
+
+        protected Bucket(StreamInput in, List<DocValueFormat> formats, List<KeyConverter> keyConverters, boolean showDocCountError)
+            throws IOException {
+            terms = in.readList(StreamInput::readGenericValue);
+            docCount = in.readVLong();
+            aggregations = InternalAggregations.readFrom(in);
+            this.showDocCountError = showDocCountError;
+            docCountError = -1;
+            if (showDocCountError) {
+                docCountError = in.readLong();
+            }
+            this.formats = formats;
+            this.keyConverters = keyConverters;
+        }
+
+        @Override
+        public void writeTo(StreamOutput out) throws IOException {
+            out.writeCollection(terms, StreamOutput::writeGenericValue);
+            out.writeVLong(docCount);
+            aggregations.writeTo(out);
+            if (showDocCountError) {
+                out.writeLong(docCountError);
+            }
+        }
+
+        @Override
+        public List<Object> getKey() {
+            List<Object> keys = new ArrayList<>(terms.size());
+            for (int i = 0; i < terms.size(); i++) {
+                keys.add(keyConverters.get(i).convert(formats.get(i), terms.get(i)));
+            }
+            return keys;
+        }
+
+        @Override
+        public String getKeyAsString() {
+            StringBuilder keys = new StringBuilder();
+            for (int i = 0; i < terms.size(); i++) {
+                if (i != 0) {
+                    keys.append('|');
+                }
+                keys.append(keyConverters.get(i).convert(formats.get(i), terms.get(i)).toString());
+            }
+            return keys.toString();
+        }
+
+        @Override
+        public long getDocCount() {
+            return docCount;
+        }
+
+        @Override
+        public Aggregations getAggregations() {
+            return aggregations;
+        }
+
+        @Override
+        public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+            builder.startObject();
+            builder.field(CommonFields.KEY.getPreferredName(), getKey());
+            builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), getKeyAsString());
+            builder.field(CommonFields.DOC_COUNT.getPreferredName(), getDocCount());
+            if (getShowDocCountError()) {
+                builder.field(DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME.getPreferredName(), getDocCountError());
+            }
+            aggregations.toXContentInternal(builder, params);
+            builder.endObject();
+            return builder;
+        }
+
+        @Override
+        public long getDocCountError() {
+            if (!showDocCountError) {
+                throw new IllegalStateException("show_terms_doc_count_error is false");
+            }
+            return docCountError;
+        }
+
+        @Override
+        protected void setDocCountError(long docCountError) {
+            this.docCountError = docCountError;
+        }
+
+        @Override
+        protected void updateDocCountError(long docCountErrorDiff) {
+            this.docCountError += docCountErrorDiff;
+        }
+
+        @Override
+        protected boolean getShowDocCountError() {
+            return showDocCountError;
+        }
+
+        @Override
+        public int compareKey(Bucket other) {
+            return TERMS_COMPARATOR.compare(terms, other.terms);
+        }
+    }
+
+    /**
+     * Compares buckets with compound keys
+     */
+    static class TermsComparator implements Comparator<List<Object>> {
+        @SuppressWarnings({ "unchecked", "rawtypes" })
+        @Override
+        public int compare(List<Object> thisTerms, List<Object> otherTerms) {
+            if (thisTerms.size() != otherTerms.size()) {
+                throw new AggregationExecutionException(
+                    "Merging/Reducing the multi_term aggregations failed due to different term list" + " sizes"
+                );
+            }
+            for (int i = 0; i < thisTerms.size(); i++) {
+                final int res;
+                try {
+                    res = ((Comparable) thisTerms.get(i)).compareTo(otherTerms.get(i));
+                } catch (ClassCastException ex) {
+                    throw new AggregationExecutionException(
+                        "Merging/Reducing the multi_term aggregations failed when computing "
+                            + "the aggregation because one of the field you gave in the aggregation query existed as two different "
+                            + "types in two different indices"
+                    );
+                }
+                if (res != 0) {
+                    return res;
+                }
+            }
+            return 0;
+        }
+    }
+
+    /**
+     * Specifies how the key from the internal representation should be converted to user representation
+     */
+    public enum KeyConverter {
+        UNSIGNED_LONG {
+            @Override
+            public Object convert(DocValueFormat format, Object obj) {
+                return format.format((Long) obj).toString();
+            }
+
+            @Override
+            public double toDouble(DocValueFormat format, Object obj) {
+                return ((Number) format.format((Long) obj)).doubleValue();
+            }
+        },
+
+        LONG {
+            @Override
+            public Object convert(DocValueFormat format, Object obj) {
+                return format.format((Long) obj);
+            }
+
+            @Override
+            public double toDouble(DocValueFormat format, Object obj) {
+                return ((Long) obj).doubleValue();
+            }
+        },
+
+        DOUBLE {
+            @Override
+            public Object convert(DocValueFormat format, Object obj) {
+                return format.format((Double) obj);
+            }
+
+            @Override
+            public double toDouble(DocValueFormat format, Object obj) {
+                return (Double) obj;
+            }
+        },
+
+        STRING {
+            @Override
+            public Object convert(DocValueFormat format, Object obj) {
+                return format.format((BytesRef) obj);
+            }
+        },
+
+        IP {
+            @Override
+            public Object convert(DocValueFormat format, Object obj) {
+                return format.format((BytesRef) obj);
+            }
+        };
+
+        public Object convert(DocValueFormat format, Object obj) {
+            throw new UnsupportedOperationException();
+        }
+
+        public double toDouble(DocValueFormat format, Object obj) {
+            throw new UnsupportedOperationException();
+        }
+    }
+
+    protected final BucketOrder reduceOrder;
+    protected final BucketOrder order;
+    protected final int requiredSize;
+    protected final long minDocCount;
+    protected final List<DocValueFormat> formats;
+    protected final List<KeyConverter> keyConverters;
+    protected final int shardSize;
+    protected final boolean showTermDocCountError;
+    protected final long otherDocCount;
+    protected final List<Bucket> buckets;
+    protected long docCountError;
+
+    public InternalMultiTerms(
+        String name,
+        BucketOrder reduceOrder,
+        BucketOrder order,
+        int requiredSize,
+        long minDocCount,
+        int shardSize,
+        boolean showTermDocCountError,
+        long otherDocCount,
+        List<Bucket> buckets,
+        long docCountError,
+        List<DocValueFormat> formats,
+        List<KeyConverter> keyConverters,
+        Map<String, Object> metadata
+    ) {
+        super(name, metadata);
+        this.reduceOrder = reduceOrder;
+        this.order = order;
+        this.requiredSize = requiredSize;
+        this.minDocCount = minDocCount;
+        this.shardSize = shardSize;
+        this.showTermDocCountError = showTermDocCountError;
+        this.otherDocCount = otherDocCount;
+        this.buckets = buckets;
+        this.docCountError = docCountError;
+        this.formats = formats;
+        this.keyConverters = keyConverters;
+    }
+
+    public InternalMultiTerms(StreamInput in) throws IOException {
+        super(in);
+        reduceOrder = InternalOrder.Streams.readOrder(in);
+        order = InternalOrder.Streams.readOrder(in);
+        requiredSize = readSize(in);
+        minDocCount = in.readVLong();
+        docCountError = in.readZLong();
+        shardSize = readSize(in);
+        showTermDocCountError = in.readBoolean();
+        otherDocCount = in.readVLong();
+        formats = in.readList(in1 -> in1.readNamedWriteable(DocValueFormat.class));
+        keyConverters = in.readList(in1 -> in1.readEnum(KeyConverter.class));
+        buckets = in.readList(stream -> new Bucket(stream, formats, keyConverters, showTermDocCountError));
+    }
+
+    @Override
+    protected void doWriteTo(StreamOutput out) throws IOException {
+        reduceOrder.writeTo(out);
+        order.writeTo(out);
+        writeSize(requiredSize, out);
+        out.writeVLong(minDocCount);
+        out.writeZLong(docCountError);
+        writeSize(shardSize, out);
+        out.writeBoolean(showTermDocCountError);
+        out.writeVLong(otherDocCount);
+        out.writeCollection(formats, StreamOutput::writeNamedWriteable);
+        out.writeCollection(keyConverters, StreamOutput::writeEnum);
+        out.writeList(buckets);
+    }
+
+    @Override
+    protected Bucket[] createBucketsArray(int size) {
+        return new Bucket[size];
+    }
+
+    @Override
+    protected InternalMultiTerms create(
+        String name,
+        List<Bucket> buckets,
+        BucketOrder reduceOrder,
+        long docCountError,
+        long otherDocCount
+    ) {
+        return new InternalMultiTerms(
+            name,
+            reduceOrder,
+            order,
+            requiredSize,
+            minDocCount,
+            shardSize,
+            showTermDocCountError,
+            otherDocCount,
+            buckets,
+            docCountError,
+            formats,
+            keyConverters,
+            getMetadata()
+        );
+    }
+
+    @Override
+    protected int getShardSize() {
+        return shardSize;
+    }
+
+    @Override
+    protected BucketOrder getReduceOrder() {
+        return reduceOrder;
+    }
+
+    @Override
+    protected BucketOrder getOrder() {
+        return order;
+    }
+
+    @Override
+    protected long getSumOfOtherDocCounts() {
+        return otherDocCount;
+    }
+
+    @Override
+    protected long getDocCountError() {
+        return docCountError;
+    }
+
+    @Override
+    protected void setDocCountError(long docCountError) {
+        this.docCountError = docCountError;
+    }
+
+    @Override
+    protected long getMinDocCount() {
+        return minDocCount;
+    }
+
+    @Override
+    protected int getRequiredSize() {
+        return requiredSize;
+    }
+
+    @Override
+    protected Bucket createBucket(long docCount, InternalAggregations aggs, long docCountError, Bucket prototype) {
+        return new Bucket(prototype.terms, docCount, aggs, prototype.showDocCountError, docCountError, formats, keyConverters);
+    }
+
+    @Override
+    public InternalMultiTerms create(List<Bucket> buckets) {
+        return new InternalMultiTerms(
+            name,
+            reduceOrder,
+            order,
+            requiredSize,
+            minDocCount,
+            shardSize,
+            showTermDocCountError,
+            otherDocCount,
+            buckets,
+            docCountError,
+            formats,
+            keyConverters,
+            getMetadata()
+        );
+    }
+
+    /**
+     * Checks if any keys need to be promoted to double from long or unsigned_long
+     */
+    private boolean[] needsPromotionToDouble(List<InternalAggregation> aggregations) {
+        if (aggregations.size() < 2) {
+            return null;
+        }
+        boolean[] promotions = null;
+
+        for (int i = 0; i < keyConverters.size(); i++) {
+            boolean hasLong = false;
+            boolean hasUnsignedLong = false;
+            boolean hasDouble = false;
+            boolean hasNonNumber = false;
+            for (InternalAggregation aggregation : aggregations) {
+                InternalMultiTerms agg = (InternalMultiTerms) aggregation;
+                KeyConverter keyConverter = agg.keyConverters.get(i);
+                switch (keyConverter) {
+                    case DOUBLE:
+                        hasDouble = true;
+                        break;
+                    case LONG:
+                        hasLong = true;
+                        break;
+                    case UNSIGNED_LONG:
+                        hasUnsignedLong = true;
+                        break;
+                    default:
+                        hasNonNumber = true;
+                        break;
+                }
+            }
+            if (hasNonNumber && (hasDouble || hasUnsignedLong || hasLong)) {
+                throw new AggregationExecutionException(
+                    "Merging/Reducing the multi_term aggregations failed when computing the aggregation "
+                        + name
+                        + " because the field in the position "
+                        + (i + 1)
+                        + " in the aggregation has two different types in two "
+                        + " different indices"
+                );
+            }
+            // Promotion to double is required if at least 2 of these 3 conditions are true.
+            if ((hasDouble ? 1 : 0) + (hasUnsignedLong ? 1 : 0) + (hasLong ? 1 : 0) > 1) {
+                if (promotions == null) {
+                    promotions = new boolean[keyConverters.size()];
+                }
+                promotions[i] = true;
+            }
+        }
+        return promotions;
+    }
+
+    private InternalAggregation promoteToDouble(InternalAggregation aggregation, boolean[] needsPromotion) {
+        InternalMultiTerms multiTerms = (InternalMultiTerms) aggregation;
+        List<Bucket> buckets = multiTerms.getBuckets();
+        List<List<Object>> newKeys = new ArrayList<>();
+        for (InternalMultiTerms.Bucket bucket : buckets) {
+            newKeys.add(new ArrayList<>(bucket.terms.size()));
+        }
+
+        List<KeyConverter> newKeyConverters = new ArrayList<>(multiTerms.keyConverters.size());
+        for (int i = 0; i < needsPromotion.length; i++) {
+            KeyConverter converter = multiTerms.keyConverters.get(i);
+            DocValueFormat format = formats.get(i);
+            if (needsPromotion[i]) {
+                newKeyConverters.add(KeyConverter.DOUBLE);
+                for (int j = 0; j < buckets.size(); j++) {
+                    newKeys.get(j).add(converter.toDouble(format, buckets.get(j).terms.get(i)));
+                }
+            } else {
+                newKeyConverters.add(converter);
+                for (int j = 0; j < buckets.size(); j++) {
+                    newKeys.get(j).add(buckets.get(j).terms.get(i));
+                }
+            }
+        }
+
+        List<Bucket> newBuckets = new ArrayList<>(buckets.size());
+        for (int i = 0; i < buckets.size(); i++) {
+            Bucket oldBucket = buckets.get(i);
+            newBuckets.add(
+                new Bucket(
+                    newKeys.get(i),
+                    oldBucket.docCount,
+                    oldBucket.aggregations,
+                    oldBucket.showDocCountError,
+                    oldBucket.docCountError,
+                    formats,
+                    newKeyConverters
+                )
+            );
+        }
+
+        // During promotion we might have changed the keys by promoting longs to doubles and loosing precision
+        // that might have caused some keys to now in a wrong order. So we need to resort.
+        newBuckets.sort(reduceOrder.comparator());
+
+        return new InternalMultiTerms(
+            multiTerms.name,
+            multiTerms.reduceOrder,
+            multiTerms.order,
+            multiTerms.requiredSize,
+            multiTerms.minDocCount,
+            multiTerms.shardSize,
+            multiTerms.showTermDocCountError,
+            multiTerms.otherDocCount,
+            newBuckets,
+            multiTerms.docCountError,
+            multiTerms.formats,
+            newKeyConverters,
+            multiTerms.metadata
+        );
+    }
+
+    public InternalAggregation reduce(
+        List<InternalAggregation> aggregations,
+        ReduceContext reduceContext,
+        boolean[] needsPromotionToDouble
+    ) {
+        if (needsPromotionToDouble != null) {
+            List<InternalAggregation> newAggs = new ArrayList<>(aggregations.size());
+            for (InternalAggregation agg : aggregations) {
+                newAggs.add(promoteToDouble(agg, needsPromotionToDouble));
+            }
+            return ((InternalMultiTerms) newAggs.get(0)).reduce(newAggs, reduceContext, null);
+        } else {
+            return super.reduce(aggregations, reduceContext);
+        }
+    }
+
+    @Override
+    public InternalAggregation reduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
+        return reduce(aggregations, reduceContext, needsPromotionToDouble(aggregations));
+    }
+
+    @Override
+    public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) {
+        return new Bucket(prototype.terms, prototype.docCount, aggregations, showTermDocCountError, docCountError, formats, keyConverters);
+    }
+
+    @Override
+    public List<Bucket> getBuckets() {
+        return buckets;
+    }
+
+    @Override
+    public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
+        return doXContentCommon(builder, params, docCountError, otherDocCount, buckets);
+    }
+
+    @Override
+    public String getWriteableName() {
+        return MultiTermsAggregationBuilder.NAME;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+        if (this == o) return true;
+        if (o == null || getClass() != o.getClass()) return false;
+        if (!super.equals(o)) return false;
+        InternalMultiTerms that = (InternalMultiTerms) o;
+        return requiredSize == that.requiredSize
+            && minDocCount == that.minDocCount
+            && shardSize == that.shardSize
+            && showTermDocCountError == that.showTermDocCountError
+            && otherDocCount == that.otherDocCount
+            && docCountError == that.docCountError
+            && Objects.equals(reduceOrder, that.reduceOrder)
+            && Objects.equals(order, that.order)
+            && Objects.equals(formats, that.formats)
+            && Objects.equals(keyConverters, that.keyConverters)
+            && Objects.equals(buckets, that.buckets);
+    }
+
+    @Override
+    public int hashCode() {
+        return Objects.hash(
+            super.hashCode(),
+            reduceOrder,
+            order,
+            requiredSize,
+            minDocCount,
+            formats,
+            keyConverters,
+            shardSize,
+            showTermDocCountError,
+            otherDocCount,
+            buckets,
+            docCountError
+        );
+    }
+}

+ 430 - 0
x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregationBuilder.java

@@ -0,0 +1,430 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+package org.elasticsearch.xpack.analytics.multiterms;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.stream.Collectors;
+
+import org.elasticsearch.common.ParseField;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.ContextParser;
+import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
+import org.elasticsearch.common.xcontent.ObjectParser;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.aggregations.AbstractAggregationBuilder;
+import org.elasticsearch.search.aggregations.AggregationBuilder;
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.AggregatorFactories;
+import org.elasticsearch.search.aggregations.AggregatorFactory;
+import org.elasticsearch.search.aggregations.BucketOrder;
+import org.elasticsearch.search.aggregations.InternalOrder;
+import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator;
+import org.elasticsearch.search.aggregations.support.AggregationContext;
+import org.elasticsearch.search.aggregations.support.CoreValuesSourceType;
+import org.elasticsearch.search.aggregations.support.MultiValuesSourceFieldConfig;
+import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
+import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry;
+
+public class MultiTermsAggregationBuilder extends AbstractAggregationBuilder<MultiTermsAggregationBuilder> {
+    public static final String NAME = "multi_terms";
+    public static final ParseField TERMS_FIELD = new ParseField("terms");
+    public static final ParseField ORDER_FIELD = new ParseField("order");
+    public static final ParseField SHARD_SIZE_FIELD_NAME = new ParseField("shard_size");
+    public static final ParseField MIN_DOC_COUNT_FIELD_NAME = new ParseField("min_doc_count");
+    public static final ParseField SHARD_MIN_DOC_COUNT_FIELD_NAME = new ParseField("shard_min_doc_count");
+    public static final ParseField REQUIRED_SIZE_FIELD_NAME = new ParseField("size");
+    public static final ParseField SHOW_TERM_DOC_COUNT_ERROR = new ParseField("show_term_doc_count_error");
+
+    static final TermsAggregator.BucketCountThresholds DEFAULT_BUCKET_COUNT_THRESHOLDS = new TermsAggregator.BucketCountThresholds(
+        1,
+        0,
+        10,
+        -1
+    );
+
+    public static final ObjectParser<MultiTermsAggregationBuilder, String> PARSER = ObjectParser.fromBuilder(
+        NAME,
+        MultiTermsAggregationBuilder::new
+    );
+
+    static {
+        ContextParser<Void, MultiValuesSourceFieldConfig.Builder> termsParser = MultiValuesSourceFieldConfig.parserBuilder(
+            true,
+            true,
+            false,
+            true
+        );
+
+        PARSER.declareBoolean(MultiTermsAggregationBuilder::showTermDocCountError, MultiTermsAggregationBuilder.SHOW_TERM_DOC_COUNT_ERROR);
+
+        PARSER.declareObjectArray(MultiTermsAggregationBuilder::terms, (p, n) -> termsParser.parse(p, null).build(), TERMS_FIELD);
+
+        PARSER.declareInt(MultiTermsAggregationBuilder::shardSize, SHARD_SIZE_FIELD_NAME);
+
+        PARSER.declareLong(MultiTermsAggregationBuilder::minDocCount, MIN_DOC_COUNT_FIELD_NAME);
+
+        PARSER.declareLong(MultiTermsAggregationBuilder::shardMinDocCount, SHARD_MIN_DOC_COUNT_FIELD_NAME);
+
+        PARSER.declareInt(MultiTermsAggregationBuilder::size, REQUIRED_SIZE_FIELD_NAME);
+
+        PARSER.declareObjectArray(MultiTermsAggregationBuilder::order, (p, c) -> InternalOrder.Parser.parseOrderParam(p), ORDER_FIELD);
+
+        PARSER.declareField(
+            MultiTermsAggregationBuilder::collectMode,
+            (p, c) -> Aggregator.SubAggCollectionMode.parse(p.text(), LoggingDeprecationHandler.INSTANCE),
+            Aggregator.SubAggCollectionMode.KEY,
+            ObjectParser.ValueType.STRING
+        );
+    }
+
+    private List<MultiValuesSourceFieldConfig> terms = Collections.emptyList();
+
+    private BucketOrder order = BucketOrder.compound(BucketOrder.count(false));
+
+    boolean showTermDocCountError = false;
+
+    private Aggregator.SubAggCollectionMode collectMode = null;
+
+    private TermsAggregator.BucketCountThresholds bucketCountThresholds = new TermsAggregator.BucketCountThresholds(
+        DEFAULT_BUCKET_COUNT_THRESHOLDS
+    );
+
+    @FunctionalInterface
+    interface MultiTermValuesSupplier {
+        MultiTermsAggregator.TermValuesSource build(ValuesSourceConfig config);
+    }
+
+    static final ValuesSourceRegistry.RegistryKey<MultiTermValuesSupplier> REGISTRY_KEY = new ValuesSourceRegistry.RegistryKey<>(
+        NAME,
+        MultiTermValuesSupplier.class
+    );
+
+    public static void registerAggregators(ValuesSourceRegistry.Builder registry) {
+        registry.registerUsage(NAME);
+        registry.register(REGISTRY_KEY, List.of(CoreValuesSourceType.NUMERIC), MultiTermsAggregator::buildNumericTermValues, false);
+        registry.register(
+            REGISTRY_KEY,
+            List.of(CoreValuesSourceType.BOOLEAN, CoreValuesSourceType.DATE),
+            MultiTermsAggregator.LongTermValuesSource::new,
+            false
+        );
+        registry.register(REGISTRY_KEY, List.of(CoreValuesSourceType.KEYWORD), MultiTermsAggregator.StringTermValuesSource::new, false);
+        registry.register(REGISTRY_KEY, List.of(CoreValuesSourceType.IP), MultiTermsAggregator.IPTermValuesSource::new, false);
+    }
+
+    public MultiTermsAggregationBuilder(String name) {
+        super(name);
+    }
+
+    public MultiTermsAggregationBuilder(
+        MultiTermsAggregationBuilder clone,
+        AggregatorFactories.Builder factoriesBuilder,
+        Map<String, Object> metadata
+    ) {
+        super(clone, factoriesBuilder, metadata);
+        this.terms = new ArrayList<>(clone.terms);
+        this.order = clone.order;
+        this.collectMode = clone.collectMode;
+        this.showTermDocCountError = clone.showTermDocCountError;
+        this.bucketCountThresholds = new TermsAggregator.BucketCountThresholds(clone.bucketCountThresholds);
+    }
+
+    public MultiTermsAggregationBuilder(StreamInput in) throws IOException {
+        super(in);
+        terms = in.readList(MultiValuesSourceFieldConfig::new);
+        order = InternalOrder.Streams.readOrder(in);
+        collectMode = in.readOptionalWriteable(Aggregator.SubAggCollectionMode::readFromStream);
+        bucketCountThresholds = new TermsAggregator.BucketCountThresholds(in);
+        showTermDocCountError = in.readBoolean();
+    }
+
+    /**
+     * Sets the field to use for this aggregation.
+     */
+    public MultiTermsAggregationBuilder terms(List<MultiValuesSourceFieldConfig> terms) {
+        if (terms == null) {
+            throw new IllegalArgumentException("[terms] must not be null: [" + name + "]");
+        }
+        if (terms.size() < 2) {
+            throw new IllegalArgumentException(
+                "The [terms] parameter in the aggregation ["
+                    + name
+                    + "] must be present and have at least "
+                    + "2 fields or scripts."
+                    + (terms.size() == 1 ? " For a single field user terms aggregation." : "")
+            );
+        }
+        this.terms = terms;
+        return this;
+    }
+
+    /**
+     * Gets the field to use for this aggregation.
+     */
+    public List<MultiValuesSourceFieldConfig> terms() {
+        return terms;
+    }
+
+    @Override
+    protected AggregationBuilder shallowCopy(AggregatorFactories.Builder factoriesBuilder, Map<String, Object> metadata) {
+        return new MultiTermsAggregationBuilder(this, factoriesBuilder, metadata);
+    }
+
+    @Override
+    public BucketCardinality bucketCardinality() {
+        return BucketCardinality.MANY;
+    }
+
+    @Override
+    protected final void doWriteTo(StreamOutput out) throws IOException {
+        out.writeList(terms);
+        order.writeTo(out);
+        out.writeOptionalWriteable(collectMode);
+        bucketCountThresholds.writeTo(out);
+        out.writeBoolean(showTermDocCountError);
+    }
+
+    /**
+     * Get whether doc count error will be return for individual terms
+     */
+    public boolean showTermDocCountError() {
+        return showTermDocCountError;
+    }
+
+    /**
+     * Set whether doc count error will be return for individual terms
+     */
+    public MultiTermsAggregationBuilder showTermDocCountError(boolean showTermDocCountError) {
+        this.showTermDocCountError = showTermDocCountError;
+        return this;
+    }
+
+    /**
+     * Sets the size - indicating how many term buckets should be returned
+     * (defaults to 10)
+     */
+    public MultiTermsAggregationBuilder size(int size) {
+        if (size <= 0) {
+            throw new IllegalArgumentException("[size] must be greater than 0. Found [" + size + "] in [" + name + "]");
+        }
+        bucketCountThresholds.setRequiredSize(size);
+        return this;
+    }
+
+    /**
+     * Returns the number of term buckets currently configured
+     */
+    public int size() {
+        return bucketCountThresholds.getRequiredSize();
+    }
+
+    /**
+     * Sets the shard_size - indicating the number of term buckets each shard
+     * will return to the coordinating node (the node that coordinates the
+     * search execution). The higher the shard size is, the more accurate the
+     * results are.
+     */
+    public MultiTermsAggregationBuilder shardSize(int shardSize) {
+        if (shardSize <= 0) {
+            throw new IllegalArgumentException("[shardSize] must be greater than 0. Found [" + shardSize + "] in [" + name + "]");
+        }
+        bucketCountThresholds.setShardSize(shardSize);
+        return this;
+    }
+
+    /**
+     * Returns the number of term buckets per shard that are currently configured
+     */
+    public int shardSize() {
+        return bucketCountThresholds.getShardSize();
+    }
+
+    /**
+     * Set the minimum document count terms should have in order to appear in
+     * the response.
+     */
+    public MultiTermsAggregationBuilder minDocCount(long minDocCount) {
+        if (minDocCount < 1) {
+            throw new IllegalArgumentException(
+                "[minDocCount] must be greater than or equal to 1. Found [" + minDocCount + "] in [" + name + "]"
+            );
+        }
+        bucketCountThresholds.setMinDocCount(minDocCount);
+        return this;
+    }
+
+    /**
+     * Returns the minimum document count required per term
+     */
+    public long minDocCount() {
+        return bucketCountThresholds.getMinDocCount();
+    }
+
+    /**
+     * Set the minimum document count terms should have on the shard in order to
+     * appear in the response.
+     */
+    public MultiTermsAggregationBuilder shardMinDocCount(long shardMinDocCount) {
+        if (shardMinDocCount < 0) {
+            throw new IllegalArgumentException(
+                "[shardMinDocCount] must be greater than or equal to 0. Found [" + shardMinDocCount + "] in [" + name + "]"
+            );
+        }
+        bucketCountThresholds.setShardMinDocCount(shardMinDocCount);
+        return this;
+    }
+
+    /**
+     * Returns the minimum document count required per term, per shard
+     */
+    public long shardMinDocCount() {
+        return bucketCountThresholds.getShardMinDocCount();
+    }
+
+    /**
+     * Set a new order on this builder and return the builder so that calls
+     * can be chained. A tie-breaker may be added to avoid non-deterministic ordering.
+     */
+    public MultiTermsAggregationBuilder order(BucketOrder order) {
+        if (order == null) {
+            throw new IllegalArgumentException("[order] must not be null: [" + name + "]");
+        }
+        if (order instanceof InternalOrder.CompoundOrder || InternalOrder.isKeyOrder(order)) {
+            this.order = order; // if order already contains a tie-breaker we are good to go
+        } else { // otherwise add a tie-breaker by using a compound order
+            this.order = BucketOrder.compound(order);
+        }
+        return this;
+    }
+
+    /**
+     * Sets the order in which the buckets will be returned. A tie-breaker may be added to avoid non-deterministic
+     * ordering.
+     */
+    public MultiTermsAggregationBuilder order(List<BucketOrder> orders) {
+        if (orders == null) {
+            throw new IllegalArgumentException("[orders] must not be null: [" + name + "]");
+        }
+        // if the list only contains one order use that to avoid inconsistent xcontent
+        order(orders.size() > 1 ? BucketOrder.compound(orders) : orders.get(0));
+        return this;
+    }
+
+    /**
+     * Gets the order in which the buckets will be returned.
+     */
+    public BucketOrder order() {
+        return order;
+    }
+
+    /**
+     * Expert: set the collection mode.
+     */
+    public MultiTermsAggregationBuilder collectMode(Aggregator.SubAggCollectionMode collectMode) {
+        if (collectMode == null) {
+            throw new IllegalArgumentException("[collectMode] must not be null: [" + name + "]");
+        }
+        this.collectMode = collectMode;
+        return this;
+    }
+
+    /**
+     * Expert: get the collection mode.
+     */
+    public Aggregator.SubAggCollectionMode collectMode() {
+        return collectMode;
+    }
+
+    @Override
+    protected final MultiTermsAggregationFactory doBuild(
+        AggregationContext context,
+        AggregatorFactory parent,
+        AggregatorFactories.Builder subFactoriesBuilder
+    ) throws IOException {
+        List<ValuesSourceConfig> configs = resolveConfig(context);
+        return new MultiTermsAggregationFactory(
+            name,
+            configs,
+            configs.stream().map(ValuesSourceConfig::format).collect(Collectors.toList()),
+            showTermDocCountError,
+            order,
+            collectMode,
+            bucketCountThresholds,
+            context,
+            parent,
+            subFactoriesBuilder,
+            metadata
+        );
+    }
+
+    protected List<ValuesSourceConfig> resolveConfig(AggregationContext context) {
+        List<ValuesSourceConfig> configs = new ArrayList<>();
+        for (MultiValuesSourceFieldConfig field : terms) {
+            configs.add(
+                ValuesSourceConfig.resolveUnregistered(
+                    context,
+                    field.getUserValueTypeHint(),
+                    field.getFieldName(),
+                    field.getScript(),
+                    field.getMissing(),
+                    field.getTimeZone(),
+                    field.getFormat(),
+                    CoreValuesSourceType.KEYWORD
+                )
+            );
+
+        }
+        return configs;
+    }
+
+    @Override
+    public final XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException {
+        builder.startObject();
+        bucketCountThresholds.toXContent(builder, params);
+        builder.field(SHOW_TERM_DOC_COUNT_ERROR.getPreferredName(), showTermDocCountError);
+        builder.field(ORDER_FIELD.getPreferredName());
+        order.toXContent(builder, params);
+        if (collectMode != null) {
+            builder.field(Aggregator.SubAggCollectionMode.KEY.getPreferredName(), collectMode.parseField().getPreferredName());
+        }
+        if (terms != null) {
+            builder.field(TERMS_FIELD.getPreferredName(), terms);
+        }
+        builder.endObject();
+        return builder;
+    }
+
+    @Override
+    public String getType() {
+        return NAME;
+    }
+
+    @Override
+    public int hashCode() {
+        return Objects.hash(super.hashCode(), terms, order, collectMode, bucketCountThresholds);
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj) return true;
+        if (obj == null || getClass() != obj.getClass()) return false;
+        if (super.equals(obj) == false) return false;
+
+        MultiTermsAggregationBuilder other = (MultiTermsAggregationBuilder) obj;
+        return Objects.equals(this.terms, other.terms)
+            && Objects.equals(this.order, other.order)
+            && Objects.equals(this.collectMode, other.collectMode)
+            && Objects.equals(this.bucketCountThresholds, other.bucketCountThresholds);
+    }
+}

+ 70 - 0
x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregationFactory.java

@@ -0,0 +1,70 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+package org.elasticsearch.xpack.analytics.multiterms;
+
+import org.elasticsearch.search.DocValueFormat;
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.AggregatorFactories;
+import org.elasticsearch.search.aggregations.AggregatorFactory;
+import org.elasticsearch.search.aggregations.BucketOrder;
+import org.elasticsearch.search.aggregations.CardinalityUpperBound;
+import org.elasticsearch.search.aggregations.InternalOrder;
+import org.elasticsearch.search.aggregations.bucket.BucketUtils;
+import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator;
+import org.elasticsearch.search.aggregations.support.AggregationContext;
+import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+public class MultiTermsAggregationFactory extends AggregatorFactory {
+
+    protected final List<ValuesSourceConfig> configs;
+    protected final List<DocValueFormat> formats;
+    private final boolean showTermDocCountError;
+    private final BucketOrder order;
+    private final Aggregator.SubAggCollectionMode collectMode;
+    private final TermsAggregator.BucketCountThresholds bucketCountThresholds;
+
+    public MultiTermsAggregationFactory(String name, List<ValuesSourceConfig> configs,
+                                        List<DocValueFormat> formats,
+                                        boolean showTermDocCountError,
+                                        BucketOrder order,
+                                        Aggregator.SubAggCollectionMode collectMode,
+                                        TermsAggregator.BucketCountThresholds bucketCountThresholds,
+                                        AggregationContext context,
+                                        AggregatorFactory parent,
+                                        AggregatorFactories.Builder subFactoriesBuilder,
+                                        Map<String, Object> metadata) throws IOException {
+        super(name, context, parent, subFactoriesBuilder, metadata);
+        this.configs = configs;
+        this.formats = formats;
+        this.showTermDocCountError = showTermDocCountError;
+        this.order = order;
+        this.collectMode = collectMode;
+        this.bucketCountThresholds = bucketCountThresholds;
+    }
+
+    @Override
+    protected Aggregator createInternal(Aggregator parent,
+                                        CardinalityUpperBound cardinality,
+                                        Map<String, Object> metadata) throws IOException {
+        TermsAggregator.BucketCountThresholds bucketCountThresholds = new TermsAggregator.BucketCountThresholds(this.bucketCountThresholds);
+        if (InternalOrder.isKeyOrder(order) == false
+            && bucketCountThresholds.getShardSize() == MultiTermsAggregationBuilder.DEFAULT_BUCKET_COUNT_THRESHOLDS.getShardSize()) {
+            // The user has not made a shardSize selection. Use default
+            // heuristic to avoid any wrong-ranking caused by distributed
+            // counting
+            bucketCountThresholds.setShardSize(BucketUtils.suggestShardSideQueueSize(bucketCountThresholds.getRequiredSize()));
+        }
+        bucketCountThresholds.ensureValidity();
+        return new MultiTermsAggregator(name, factories, context, parent, configs, formats, showTermDocCountError, order,
+            collectMode, bucketCountThresholds, cardinality, metadata);
+    }
+}

+ 495 - 0
x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregator.java

@@ -0,0 +1,495 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+package org.elasticsearch.xpack.analytics.multiterms;
+
+import static java.util.Collections.emptyList;
+import static org.elasticsearch.search.aggregations.InternalOrder.isKeyOrder;
+import static org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator.aggsUsedForSorting;
+import static org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator.descendsFromNestedAggregator;
+import static org.elasticsearch.xpack.analytics.multiterms.MultiTermsAggregationBuilder.REGISTRY_KEY;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.SortedNumericDocValues;
+import org.apache.lucene.search.ScoreMode;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefBuilder;
+import org.apache.lucene.util.PriorityQueue;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.common.CheckedConsumer;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.index.fielddata.SortedBinaryDocValues;
+import org.elasticsearch.index.fielddata.SortedNumericDoubleValues;
+import org.elasticsearch.search.DocValueFormat;
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.AggregatorFactories;
+import org.elasticsearch.search.aggregations.BucketOrder;
+import org.elasticsearch.search.aggregations.CardinalityUpperBound;
+import org.elasticsearch.search.aggregations.InternalAggregation;
+import org.elasticsearch.search.aggregations.InternalOrder;
+import org.elasticsearch.search.aggregations.LeafBucketCollector;
+import org.elasticsearch.search.aggregations.LeafBucketCollectorBase;
+import org.elasticsearch.search.aggregations.bucket.DeferableBucketAggregator;
+import org.elasticsearch.search.aggregations.bucket.terms.BucketPriorityQueue;
+import org.elasticsearch.search.aggregations.bucket.terms.BytesKeyedBucketOrds;
+import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator;
+import org.elasticsearch.search.aggregations.support.AggregationContext;
+import org.elasticsearch.search.aggregations.support.ValuesSource;
+import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
+
+/**
+ * Collects the {@code multi_terms} aggregation, which functions like the
+ * {@code terms} aggregation, but supports multiple fields that are treated
+ * as a tuple.
+ */
+class MultiTermsAggregator extends DeferableBucketAggregator {
+
+    protected final List<DocValueFormat> formats;
+    protected final TermsAggregator.BucketCountThresholds bucketCountThresholds;
+    protected final BucketOrder order;
+    protected final Comparator<InternalMultiTerms.Bucket> partiallyBuiltBucketComparator;
+    protected final Set<Aggregator> aggsUsedForSorting;
+    protected final SubAggCollectionMode collectMode;
+    private final List<TermValuesSource> values;
+    private final boolean showTermDocCountError;
+    private final boolean needsScore;
+    private final List<InternalMultiTerms.KeyConverter> keyConverters;
+
+    private final BytesKeyedBucketOrds bucketOrds;
+
+    protected MultiTermsAggregator(
+        String name,
+        AggregatorFactories factories,
+        AggregationContext context,
+        Aggregator parent,
+        List<ValuesSourceConfig> configs,
+        List<DocValueFormat> formats,
+        boolean showTermDocCountError,
+        BucketOrder order,
+        SubAggCollectionMode collectMode,
+        TermsAggregator.BucketCountThresholds bucketCountThresholds,
+        CardinalityUpperBound cardinality,
+        Map<String, Object> metadata
+    ) throws IOException {
+        super(name, factories, context, parent, metadata);
+        this.bucketCountThresholds = bucketCountThresholds;
+        this.order = order;
+        partiallyBuiltBucketComparator = order == null ? null : order.partiallyBuiltBucketComparator(b -> b.bucketOrd, this);
+        this.formats = formats;
+        this.showTermDocCountError = showTermDocCountError;
+        if (subAggsNeedScore() && descendsFromNestedAggregator(parent)) {
+            /**
+             * Force the execution to depth_first because we need to access the score of
+             * nested documents in a sub-aggregation and we are not able to generate this score
+             * while replaying deferred documents.
+             */
+            this.collectMode = SubAggCollectionMode.DEPTH_FIRST;
+        } else {
+            this.collectMode = collectMode;
+        }
+        aggsUsedForSorting = aggsUsedForSorting(this, order);
+        this.needsScore = configs.stream().anyMatch(c -> c.getValuesSource().needsScores());
+        values = configs.stream()
+            .map(c -> context.getValuesSourceRegistry().getAggregator(REGISTRY_KEY, c).build(c))
+            .collect(Collectors.toList());
+        keyConverters = values.stream().map(TermValuesSource::keyConverter).collect(Collectors.toList());
+        bucketOrds = BytesKeyedBucketOrds.build(context.bigArrays(), cardinality);
+
+    }
+
+    private boolean subAggsNeedScore() {
+        for (Aggregator subAgg : subAggregators) {
+            if (subAgg.scoreMode().needsScores()) {
+                return true;
+            }
+        }
+        return false;
+    }
+
+    @Override
+    public ScoreMode scoreMode() {
+        if (needsScore) {
+            return ScoreMode.COMPLETE;
+        }
+        return super.scoreMode();
+    }
+
+    @Override
+    protected boolean shouldDefer(Aggregator aggregator) {
+        return collectMode == SubAggCollectionMode.BREADTH_FIRST && !aggsUsedForSorting.contains(aggregator);
+    }
+
+    List<TermValues> termValuesList(LeafReaderContext ctx) throws IOException {
+        List<TermValues> termValuesList = new ArrayList<>();
+        for (TermValuesSource termValuesSource : values) {
+            termValuesList.add(termValuesSource.getValues(ctx));
+        }
+        return termValuesList;
+    }
+
+    List<List<Object>> docTerms(List<TermValues> termValuesList, int doc) throws IOException {
+        List<List<Object>> terms = new ArrayList<>();
+        for (TermValues termValues : termValuesList) {
+            List<Object> values = termValues.collectValues(doc);
+            if (values == null) {
+                return null;
+            }
+            terms.add(values);
+        }
+        return terms;
+    }
+
+    /**
+     * Packs a list of terms into ByteRef so we can use BytesKeyedBucketOrds
+     *
+     * TODO: this is a temporary solution, we should replace it with a more optimal mechanism instead of relying on BytesKeyedBucketOrds
+     */
+    static BytesRef packKey(List<Object> terms) {
+        try (BytesStreamOutput output = new BytesStreamOutput()) {
+            output.writeCollection(terms, StreamOutput::writeGenericValue);
+            return output.bytes().toBytesRef();
+        } catch (IOException ex) {
+            throw ExceptionsHelper.convertToRuntime(ex);
+        }
+    }
+
+    /**
+     * Unpacks ByteRef back into a list of terms
+     *
+     * TODO: this is a temporary solution, we should replace it with a more optimal mechanism instead of relying on BytesKeyedBucketOrds
+     */
+    static List<Object> unpackTerms(BytesRef termsBytes) {
+        try (StreamInput input = new BytesArray(termsBytes).streamInput()) {
+            return input.readList(StreamInput::readGenericValue);
+        } catch (IOException ex) {
+            throw ExceptionsHelper.convertToRuntime(ex);
+        }
+    }
+
+    @Override
+    public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException {
+        List<TermValues> termValuesList = termValuesList(ctx);
+
+        return new LeafBucketCollectorBase(sub, values) {
+            @Override
+            public void collect(int doc, long owningBucketOrd) throws IOException {
+                List<List<Object>> terms = docTerms(termValuesList, doc);
+                if (terms != null) {
+                    List<Object> path = new ArrayList<>(terms.size());
+                    new CheckedConsumer<Integer, IOException>() {
+                        @Override
+                        public void accept(Integer start) throws IOException {
+                            for (Object term : terms.get(start)) {
+                                if (start == path.size()) {
+                                    path.add(term);
+                                } else {
+                                    path.set(start, term);
+                                }
+                                if (start < terms.size() - 1) {
+                                    this.accept(start + 1);
+                                } else {
+                                    long bucketOrd = bucketOrds.add(owningBucketOrd, packKey(path));
+                                    if (bucketOrd < 0) { // already seen
+                                        bucketOrd = -1 - bucketOrd;
+                                        collectExistingBucket(sub, doc, bucketOrd);
+                                    } else {
+                                        collectBucket(sub, doc, bucketOrd);
+                                    }
+                                }
+                            }
+                        }
+                    }.accept(0);
+                }
+            }
+        };
+    }
+
+    @Override
+    public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException {
+        InternalMultiTerms.Bucket[][] topBucketsPerOrd = new InternalMultiTerms.Bucket[owningBucketOrds.length][];
+        long[] otherDocCounts = new long[owningBucketOrds.length];
+        for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) {
+            long bucketsInOrd = bucketOrds.bucketsInOrd(owningBucketOrds[ordIdx]);
+
+            int size = (int) Math.min(bucketsInOrd, bucketCountThresholds.getShardSize());
+            PriorityQueue<InternalMultiTerms.Bucket> ordered = new BucketPriorityQueue<>(size, partiallyBuiltBucketComparator);
+            InternalMultiTerms.Bucket spare = null;
+            BytesRef spareKey = null;
+            BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds[ordIdx]);
+            while (ordsEnum.next()) {
+                long docCount = bucketDocCount(ordsEnum.ord());
+                otherDocCounts[ordIdx] += docCount;
+                if (docCount < bucketCountThresholds.getShardMinDocCount()) {
+                    continue;
+                }
+                if (spare == null) {
+                    spare = new InternalMultiTerms.Bucket(null, 0, null, showTermDocCountError, 0, formats, keyConverters);
+                    spareKey = new BytesRef();
+                }
+                ordsEnum.readValue(spareKey);
+                spare.terms = unpackTerms(spareKey);
+                spare.docCount = docCount;
+                spare.bucketOrd = ordsEnum.ord();
+                spare = ordered.insertWithOverflow(spare);
+            }
+
+            // Get the top buckets
+            InternalMultiTerms.Bucket[] bucketsForOrd = new InternalMultiTerms.Bucket[ordered.size()];
+            topBucketsPerOrd[ordIdx] = bucketsForOrd;
+            for (int b = ordered.size() - 1; b >= 0; --b) {
+                topBucketsPerOrd[ordIdx][b] = ordered.pop();
+                otherDocCounts[ordIdx] -= topBucketsPerOrd[ordIdx][b].getDocCount();
+            }
+        }
+
+        buildSubAggsForAllBuckets(topBucketsPerOrd, b -> b.bucketOrd, (b, a) -> b.aggregations = a);
+
+        InternalAggregation[] result = new InternalAggregation[owningBucketOrds.length];
+        for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) {
+            result[ordIdx] = buildResult(otherDocCounts[ordIdx], topBucketsPerOrd[ordIdx]);
+        }
+        return result;
+    }
+
+    InternalMultiTerms buildResult(long otherDocCount, InternalMultiTerms.Bucket[] topBuckets) {
+        final BucketOrder reduceOrder;
+        if (isKeyOrder(order) == false) {
+            reduceOrder = InternalOrder.key(true);
+            Arrays.sort(topBuckets, reduceOrder.comparator());
+        } else {
+            reduceOrder = order;
+        }
+        return new InternalMultiTerms(
+            name,
+            reduceOrder,
+            order,
+            bucketCountThresholds.getRequiredSize(),
+            bucketCountThresholds.getMinDocCount(),
+            bucketCountThresholds.getShardSize(),
+            showTermDocCountError,
+            otherDocCount,
+            List.of(topBuckets),
+            0,
+            formats,
+            keyConverters,
+            metadata()
+        );
+    }
+
+    @Override
+    public InternalAggregation buildEmptyAggregation() {
+        return new InternalMultiTerms(
+            name,
+            order,
+            order,
+            bucketCountThresholds.getRequiredSize(),
+            bucketCountThresholds.getMinDocCount(),
+            bucketCountThresholds.getShardSize(),
+            showTermDocCountError,
+            0,
+            emptyList(),
+            0,
+            formats,
+            keyConverters,
+            metadata()
+        );
+    }
+
+    static TermValuesSource buildNumericTermValues(ValuesSourceConfig config) {
+        final ValuesSource.Numeric vs = (ValuesSource.Numeric) config.getValuesSource();
+        if (vs.isFloatingPoint()) {
+            return new DoubleTermValuesSource(config);
+        } else {
+            return new LongTermValuesSource(config);
+        }
+    }
+
+    /**
+     * Capture type-specific functionality of each term that comprises the multi_term key
+     */
+    interface TermValuesSource {
+        /**
+         * used in getLeafCollector to obtain a doc values for the given type
+         */
+        TermValues getValues(LeafReaderContext ctx) throws IOException;
+
+        /**
+         * Returns a key converter that knows how to convert key values into user-friendly representation and format them as a string
+         */
+        InternalMultiTerms.KeyConverter keyConverter();
+    }
+
+    interface TermValues {
+        /**
+         * Returns a list of values retrieved from doc values for the given document
+         */
+        List<Object> collectValues(int doc) throws IOException;
+    }
+
+    /**
+     * Handles non-float and date doc values
+     */
+    static class LongTermValuesSource implements TermValuesSource {
+        ValuesSource.Numeric source;
+        InternalMultiTerms.KeyConverter converter;
+
+        LongTermValuesSource(ValuesSourceConfig config) {
+            this.source = (ValuesSource.Numeric) config.getValuesSource();
+            if (config.format() == DocValueFormat.UNSIGNED_LONG_SHIFTED) {
+                converter = InternalMultiTerms.KeyConverter.UNSIGNED_LONG;
+            } else {
+                converter = InternalMultiTerms.KeyConverter.LONG;
+            }
+        }
+
+        @Override
+        public TermValues getValues(LeafReaderContext ctx) throws IOException {
+            SortedNumericDocValues values = source.longValues(ctx);
+            return doc -> {
+                if (values.advanceExact(doc)) {
+                    List<Object> objects = new ArrayList<>();
+                    int valuesCount = values.docValueCount();
+                    long previous = Long.MAX_VALUE;
+                    for (int i = 0; i < valuesCount; ++i) {
+                        long val = values.nextValue();
+                        if (previous != val || i == 0) {
+                            objects.add(val);
+                            previous = val;
+                        }
+                    }
+                    return objects;
+                } else {
+                    return null;
+                }
+            };
+        }
+
+        @Override
+        public InternalMultiTerms.KeyConverter keyConverter() {
+            return converter;
+        }
+    }
+
+    /**
+     * Handles float and date doc values
+     */
+    static class DoubleTermValuesSource implements TermValuesSource {
+        ValuesSource.Numeric source;
+
+        DoubleTermValuesSource(ValuesSourceConfig config) {
+            this.source = (ValuesSource.Numeric) config.getValuesSource();
+        }
+
+        @Override
+        public TermValues getValues(LeafReaderContext ctx) throws IOException {
+            SortedNumericDoubleValues values = source.doubleValues(ctx);
+            return doc -> {
+                if (values.advanceExact(doc)) {
+                    List<Object> objects = new ArrayList<>();
+                    int valuesCount = values.docValueCount();
+                    double previous = Double.MAX_VALUE;
+                    for (int i = 0; i < valuesCount; ++i) {
+                        double val = values.nextValue();
+                        if (previous != val || i == 0) {
+                            objects.add(val);
+                            previous = val;
+                        }
+                    }
+                    return objects;
+                } else {
+                    return null;
+                }
+            };
+        }
+
+        @Override
+        public InternalMultiTerms.KeyConverter keyConverter() {
+            return InternalMultiTerms.KeyConverter.DOUBLE;
+        }
+    }
+
+    /**
+     * Base class for string and ip doc values
+     */
+    abstract static class BinaryTermValuesSource implements TermValuesSource {
+        private final ValuesSource source;
+        final BytesRefBuilder previous = new BytesRefBuilder();
+
+        BinaryTermValuesSource(ValuesSourceConfig source) {
+            this.source = source.getValuesSource();
+        }
+
+        @Override
+        public TermValues getValues(LeafReaderContext ctx) throws IOException {
+            SortedBinaryDocValues values = source.bytesValues(ctx);
+            return doc -> {
+                if (values.advanceExact(doc)) {
+                    int valuesCount = values.docValueCount();
+                    List<Object> objects = new ArrayList<>(valuesCount);
+                    // SortedBinaryDocValues don't guarantee uniqueness so we
+                    // need to take care of dups
+                    previous.clear();
+                    for (int i = 0; i < valuesCount; ++i) {
+                        BytesRef bytes = values.nextValue();
+                        if (i > 0 && previous.get().equals(bytes)) {
+                            continue;
+                        }
+                        previous.copyBytes(bytes);
+                        objects.add(BytesRef.deepCopyOf(bytes));
+                    }
+                    return objects;
+                } else {
+                    return null;
+                }
+            };
+        }
+    }
+
+    /**
+     * String doc values
+     */
+    static class StringTermValuesSource extends BinaryTermValuesSource {
+
+        StringTermValuesSource(ValuesSourceConfig source) {
+            super(source);
+        }
+
+        @Override
+        public InternalMultiTerms.KeyConverter keyConverter() {
+            return InternalMultiTerms.KeyConverter.STRING;
+        }
+
+    }
+
+    /**
+     * IP doc values
+     */
+    static class IPTermValuesSource extends BinaryTermValuesSource {
+
+        IPTermValuesSource(ValuesSourceConfig source) {
+            super(source);
+        }
+
+        @Override
+        public InternalMultiTerms.KeyConverter keyConverter() {
+            return InternalMultiTerms.KeyConverter.IP;
+        }
+    }
+
+}

+ 1 - 1
x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregationBuilder.java

@@ -85,7 +85,7 @@ public class TopMetricsAggregationBuilder extends AbstractAggregationBuilder<Top
                 ObjectParser.ValueType.OBJECT_ARRAY_OR_STRING);
         PARSER.declareInt(optionalConstructorArg(), SIZE_FIELD);
         ContextParser<Void, MultiValuesSourceFieldConfig.Builder> metricParser =
-            MultiValuesSourceFieldConfig.parserBuilder(true, false, false);
+            MultiValuesSourceFieldConfig.parserBuilder(true, false, false, false);
         PARSER.declareObjectArray(constructorArg(), (p, n) -> metricParser.parse(p, null).build(), METRIC_FIELD);
     }
 

+ 2 - 2
x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/ttest/TTestAggregationBuilder.java

@@ -46,8 +46,8 @@ public class TTestAggregationBuilder extends MultiValuesSourceAggregationBuilder
 
     static {
         MultiValuesSourceParseHelper.declareCommon(PARSER, true, ValueType.NUMERIC);
-        MultiValuesSourceParseHelper.declareField(A_FIELD.getPreferredName(), PARSER, true, false, true);
-        MultiValuesSourceParseHelper.declareField(B_FIELD.getPreferredName(), PARSER, true, false, true);
+        MultiValuesSourceParseHelper.declareField(A_FIELD.getPreferredName(), PARSER, true, false, true, false);
+        MultiValuesSourceParseHelper.declareField(B_FIELD.getPreferredName(), PARSER, true, false, true, false);
         PARSER.declareString(TTestAggregationBuilder::testType, TYPE_FIELD);
         PARSER.declareInt(TTestAggregationBuilder::tails, TAILS_FIELD);
     }

+ 1 - 0
x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/action/AnalyticsStatsActionNodeResponseTests.java

@@ -47,6 +47,7 @@ public class AnalyticsStatsActionNodeResponseTests extends AbstractWireSerializi
         assertThat(AnalyticsStatsAction.Item.MOVING_PERCENTILES.ordinal(), equalTo(i++));
         assertThat(AnalyticsStatsAction.Item.NORMALIZE.ordinal(), equalTo(i++));
         assertThat(AnalyticsStatsAction.Item.RATE.ordinal(), equalTo(i++));
+        assertThat(AnalyticsStatsAction.Item.MULTI_TERMS.ordinal(), equalTo(i++));
         // Please add tests for newly added items here
         assertThat(AnalyticsStatsAction.Item.values().length, equalTo(i));
     }

+ 332 - 0
x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/multiterms/InternalMultiTermsTests.java

@@ -0,0 +1,332 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+package org.elasticsearch.xpack.analytics.multiterms;
+
+import static org.elasticsearch.search.DocValueFormat.UNSIGNED_LONG_SHIFTED;
+import static org.elasticsearch.xpack.analytics.multiterms.InternalMultiTerms.KeyConverter.DOUBLE;
+import static org.elasticsearch.xpack.analytics.multiterms.InternalMultiTerms.KeyConverter.LONG;
+import static org.elasticsearch.xpack.analytics.multiterms.InternalMultiTerms.KeyConverter.UNSIGNED_LONG;
+import static org.hamcrest.Matchers.closeTo;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.hasItem;
+import static org.hamcrest.Matchers.hasSize;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.elasticsearch.common.ParseField;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.CollectionUtils;
+import org.elasticsearch.common.util.MockBigArrays;
+import org.elasticsearch.common.util.MockPageCacheRecycler;
+import org.elasticsearch.common.xcontent.NamedXContentRegistry;
+import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
+import org.elasticsearch.plugins.SearchPlugin;
+import org.elasticsearch.script.ScriptService;
+import org.elasticsearch.search.DocValueFormat;
+import org.elasticsearch.search.aggregations.Aggregation;
+import org.elasticsearch.search.aggregations.BucketOrder;
+import org.elasticsearch.search.aggregations.InternalAggregation;
+import org.elasticsearch.search.aggregations.InternalAggregations;
+import org.elasticsearch.search.aggregations.ParsedAggregation;
+import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
+import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.test.InternalAggregationTestCase;
+import org.elasticsearch.xpack.analytics.AnalyticsPlugin;
+
+public class InternalMultiTermsTests extends InternalAggregationTestCase<InternalMultiTerms> {
+
+    @Override
+    protected SearchPlugin registerPlugin() {
+        return new AnalyticsPlugin();
+    }
+
+    static BucketOrder randomBucketOrder() {
+        return randomBucketOrder(true);
+    }
+
+    private static BucketOrder randomBucketOrder(boolean includeCompound) {
+        switch (randomInt(includeCompound ? 4 : 3)) {
+            case 0:
+                return BucketOrder.key(randomBoolean());
+            case 1:
+                return BucketOrder.count(randomBoolean());
+            case 2:
+                return BucketOrder.aggregation(randomAlphaOfLengthBetween(3, 20), randomBoolean());
+            case 3:
+                return BucketOrder.aggregation(randomAlphaOfLengthBetween(3, 20), randomAlphaOfLengthBetween(3, 20), randomBoolean());
+            case 4:
+                List<BucketOrder> orders = new ArrayList<>();
+                int numOrders = randomIntBetween(2, 5);
+                for (int i = 0; i < numOrders; i++) {
+                    orders.add(randomBucketOrder(false));
+                }
+                return BucketOrder.compound(orders);
+            default:
+                fail();
+        }
+        return null;
+    }
+
+    private List<DocValueFormat> randomFormats(int size) {
+        return randomList(size, size, InternalAggregationTestCase::randomNumericDocValueFormat);
+    }
+
+    private List<InternalMultiTerms.KeyConverter> randomKeyConverters(int size) {
+        return randomList(size, size, () -> randomFrom(InternalMultiTerms.KeyConverter.values()));
+    }
+
+    private List<InternalMultiTerms.Bucket> randomBuckets() {
+        return Collections.emptyList();
+    }
+
+    @Override
+    protected InternalMultiTerms createTestInstance(String name, Map<String, Object> metadata) {
+        int termSize = randomIntBetween(1, 10);
+        return new InternalMultiTerms(
+            name,
+            randomBucketOrder(),
+            randomBucketOrder(),
+            randomIntBetween(1, 1000),
+            randomIntBetween(0, 1000),
+            randomIntBetween(1, 1000),
+            randomBoolean(),
+            randomNonNegativeLong(),
+            randomBuckets(),
+            randomNonNegativeLong(),
+            randomFormats(termSize),
+            randomKeyConverters(termSize),
+            metadata
+        );
+    }
+
+    @Override
+    protected List<InternalMultiTerms> randomResultsToReduce(String name, int size) {
+        List<InternalMultiTerms> terms = new ArrayList<>();
+        BucketOrder reduceOrder = BucketOrder.key(true);
+        BucketOrder order = BucketOrder.key(true);
+        int requiredSize = 10;
+        long minDocCount = 1;
+        int shardSize = 10;
+        boolean showTermDocCountError = randomBoolean();
+        int fieldCount = randomIntBetween(1, 10);
+        List<DocValueFormat> formats = randomFormats(fieldCount);
+        List<InternalMultiTerms.KeyConverter> keyConverters = Collections.nCopies(fieldCount, LONG);
+        List<List<Object>> bucketKeys = new ArrayList<>();
+        for (int i = 0; i < shardSize; i++) {
+            List<Object> key;
+            do {
+                key = randomList(fieldCount, fieldCount, ESTestCase::randomLong);
+            } while (bucketKeys.contains(key));
+            bucketKeys.add(key);
+        }
+
+        for (int i = 0; i < size; i++) {
+            long otherDocCount = randomLongBetween(0, Long.MAX_VALUE / size);
+            List<InternalMultiTerms.Bucket> bucketList = new ArrayList<>();
+            int numberOfBuckets = randomIntBetween(0, shardSize);
+            List<List<Object>> visitedKeys = new ArrayList<>(randomSubsetOf(numberOfBuckets, bucketKeys));
+            visitedKeys.sort(InternalMultiTerms.TERMS_COMPARATOR);
+            for (int j = 0; j < numberOfBuckets; j++) {
+                long docCount = randomLongBetween(0, Long.MAX_VALUE / (size * numberOfBuckets));
+                long docCountError = showTermDocCountError ? randomLongBetween(0, Long.MAX_VALUE / (size * numberOfBuckets)) : -1;
+                bucketList.add(
+                    new InternalMultiTerms.Bucket(
+                        visitedKeys.get(j),
+                        docCount,
+                        InternalAggregations.EMPTY,
+                        showTermDocCountError,
+                        docCountError,
+                        formats,
+                        keyConverters
+                    )
+                );
+            }
+            long docErrorCount = -1;
+            terms.add(
+                new InternalMultiTerms(
+                    name,
+                    reduceOrder,
+                    order,
+                    requiredSize,
+                    minDocCount,
+                    shardSize,
+                    showTermDocCountError,
+                    otherDocCount,
+                    bucketList,
+                    docErrorCount,
+                    formats,
+                    keyConverters,
+                    null
+                )
+            );
+        }
+        return terms;
+    }
+
+    @Override
+    protected void assertReduced(InternalMultiTerms reduced, List<InternalMultiTerms> inputs) {
+        long otherDocExpected = inputs.stream().mapToLong(a -> a.otherDocCount).sum();
+        assertEquals(otherDocExpected, reduced.otherDocCount);
+
+        Map<List<Object>, Long> bucketCounts = new HashMap<>();
+        for (InternalMultiTerms input : inputs) {
+            for (InternalMultiTerms.Bucket bucket : input.buckets) {
+                List<Object> key = bucket.getKey();
+                bucketCounts.put(key, bucketCounts.getOrDefault(key, 0L) + bucket.docCount);
+            }
+        }
+        for (InternalMultiTerms.Bucket bucket : reduced.buckets) {
+            List<Object> key = bucket.getKey();
+            assertThat(bucketCounts.keySet(), hasItem(equalTo(key)));
+            assertThat(bucketCounts.get(key), equalTo(bucket.docCount));
+        }
+    }
+
+    @Override
+    protected void assertFromXContent(InternalMultiTerms min, ParsedAggregation parsedAggregation) {
+        // There is no ParsedMultiTerms yet so we cannot test it here
+    }
+
+    @Override
+    protected InternalMultiTerms mutateInstance(InternalMultiTerms instance) {
+        String name = instance.getName();
+        Map<String, Object> metadata = instance.getMetadata();
+        BucketOrder order = instance.order;
+        switch (between(0, 2)) {
+            case 0:
+                name += randomAlphaOfLength(5);
+                break;
+            case 1:
+                order = randomValueOtherThan(order, InternalMultiTermsTests::randomBucketOrder);
+                break;
+            case 2:
+                if (metadata == null) {
+                    metadata = new HashMap<>(1);
+                } else {
+                    metadata = new HashMap<>(instance.getMetadata());
+                }
+                metadata.put(randomAlphaOfLength(15), randomInt());
+                break;
+            default:
+                throw new AssertionError("Illegal randomisation branch");
+        }
+        return new InternalMultiTerms(
+            name,
+            order,
+            instance.reduceOrder,
+            instance.requiredSize,
+            instance.minDocCount,
+            instance.shardSize,
+            instance.showTermDocCountError,
+            instance.otherDocCount,
+            instance.buckets,
+            instance.docCountError,
+            instance.formats,
+            instance.keyConverters,
+            metadata
+        );
+    }
+
+    @Override
+    protected List<NamedXContentRegistry.Entry> getNamedXContents() {
+        return CollectionUtils.appendToCopy(
+            super.getNamedXContents(),
+            new NamedXContentRegistry.Entry(Aggregation.class, new ParseField(MultiTermsAggregationBuilder.NAME), (p, c) -> {
+                assumeTrue("There is no ParsedMultiTerms yet", false);
+                return null;
+            })
+        );
+    }
+
+    public void testKeyConverters() {
+        assertThat(
+            UNSIGNED_LONG.toDouble(UNSIGNED_LONG_SHIFTED, UNSIGNED_LONG_SHIFTED.parseLong("123", false, () -> 0L)),
+            closeTo(123.0, 0.0001)
+        );
+        assertThat(
+            UNSIGNED_LONG.toDouble(UNSIGNED_LONG_SHIFTED, UNSIGNED_LONG_SHIFTED.parseLong("9223372036854775813", false, () -> 0L)),
+            closeTo(9223372036854775813.0, 0.0001)
+        );
+    }
+
+    public void testReduceWithDoublePromotion() {
+        MockBigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService());
+        ScriptService mockScriptService = mockScriptService();
+        List<DocValueFormat> formats1 = List.of(DocValueFormat.RAW, UNSIGNED_LONG_SHIFTED, DocValueFormat.RAW);
+        List<DocValueFormat> formats2 = List.of(DocValueFormat.RAW, DocValueFormat.RAW, DocValueFormat.RAW);
+        List<InternalMultiTerms.KeyConverter> keyConverters1 = List.of(LONG, UNSIGNED_LONG, LONG);
+        List<InternalMultiTerms.KeyConverter> keyConverters2 = List.of(LONG, LONG, DOUBLE);
+        BucketOrder order = BucketOrder.compound(BucketOrder.count(false), BucketOrder.key(true));
+        InternalAggregations subs = InternalAggregations.EMPTY;
+
+        InternalMultiTerms terms1 = new InternalMultiTerms(
+            "test",
+            order,
+            order,
+            10,
+            1,
+            10,
+            false,
+            0,
+            List.of(
+                new InternalMultiTerms.Bucket(List.of(3L, ul("9223372036854775813"), 3L), 1, subs, false, 0, formats1, keyConverters1),
+                new InternalMultiTerms.Bucket(List.of(4L, ul("9223372036854775805"), 1L), 1, subs, false, 0, formats1, keyConverters1),
+                new InternalMultiTerms.Bucket(List.of(4L, ul("9223372036854775805"), 1L), 1, subs, false, 0, formats1, keyConverters1),
+                new InternalMultiTerms.Bucket(List.of(4L, ul("9223372036854775814"), 1L), 1, subs, false, 0, formats1, keyConverters1)
+            ),
+            0,
+            formats1,
+            keyConverters1,
+            null
+        );
+
+        InternalMultiTerms terms2 = new InternalMultiTerms(
+            "test",
+            order,
+            order,
+            10,
+            1,
+            10,
+            false,
+            0,
+            List.of(
+                new InternalMultiTerms.Bucket(List.of(3L, 9223372036854775803L, 3.0), 1, subs, false, 0, formats2, keyConverters2),
+                new InternalMultiTerms.Bucket(List.of(4L, 9223372036854775804L, 4.0), 1, subs, false, 0, formats2, keyConverters2),
+                new InternalMultiTerms.Bucket(List.of(4L, 9223372036854775805L, 4.0), 1, subs, false, 0, formats2, keyConverters2),
+                new InternalMultiTerms.Bucket(List.of(4L, 9223372036854775805L, 4.0), 1, subs, false, 0, formats2, keyConverters2)
+
+            ),
+            0,
+            formats2,
+            keyConverters2,
+            null
+        );
+        InternalAggregation.ReduceContext context = InternalAggregation.ReduceContext.forPartialReduction(
+            bigArrays,
+            mockScriptService,
+            () -> PipelineAggregator.PipelineTree.EMPTY
+        );
+
+        InternalMultiTerms result = (InternalMultiTerms) terms1.reduce(List.of(terms1, terms2), context);
+        assertThat(result.buckets, hasSize(3));
+        assertThat(result.buckets.get(0).getKeyAsString(), equalTo("4|9.223372036854776E18|4.0"));
+        assertThat(result.buckets.get(0).getDocCount(), equalTo(3L));
+        assertThat(result.buckets.get(1).getKeyAsString(), equalTo("4|9.223372036854776E18|1.0"));
+        assertThat(result.buckets.get(1).getDocCount(), equalTo(3L));
+        assertThat(result.buckets.get(2).getKeyAsString(), equalTo("3|9.223372036854776E18|3.0"));
+        assertThat(result.buckets.get(2).getDocCount(), equalTo(2L));
+    }
+
+    long ul(String val) {
+        return UNSIGNED_LONG_SHIFTED.parseLong(val, false, () -> 0L);
+    }
+}

+ 128 - 0
x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregationBuilderTests.java

@@ -0,0 +1,128 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+package org.elasticsearch.xpack.analytics.multiterms;
+
+import static org.elasticsearch.test.InternalAggregationTestCase.randomNumericDocValueFormat;
+import static org.elasticsearch.xpack.analytics.multiterms.InternalMultiTermsTests.randomBucketOrder;
+import static org.hamcrest.Matchers.hasSize;
+
+import java.io.IOException;
+import java.time.ZoneId;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import org.elasticsearch.common.ParseField;
+import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
+import org.elasticsearch.common.io.stream.Writeable;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.NamedXContentRegistry;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.search.SearchModule;
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.AggregatorFactories;
+import org.elasticsearch.search.aggregations.BaseAggregationBuilder;
+import org.elasticsearch.search.aggregations.support.MultiValuesSourceFieldConfig;
+import org.elasticsearch.search.aggregations.support.ValueType;
+import org.elasticsearch.test.AbstractSerializingTestCase;
+import org.junit.Before;
+
+public class MultiTermsAggregationBuilderTests extends AbstractSerializingTestCase<MultiTermsAggregationBuilder> {
+    String aggregationName;
+
+    @Before
+    public void setupName() {
+        aggregationName = randomAlphaOfLength(10);
+    }
+
+    @Override
+    protected MultiTermsAggregationBuilder doParseInstance(XContentParser parser) throws IOException {
+        assertSame(XContentParser.Token.START_OBJECT, parser.nextToken());
+        AggregatorFactories.Builder parsed = AggregatorFactories.parseAggregators(parser);
+        assertThat(parsed.getAggregatorFactories(), hasSize(1));
+        assertThat(parsed.getPipelineAggregatorFactories(), hasSize(0));
+        MultiTermsAggregationBuilder agg = (MultiTermsAggregationBuilder) parsed.getAggregatorFactories().iterator().next();
+        assertNull(parser.nextToken());
+        assertNotNull(agg);
+        return agg;
+    }
+
+    static MultiValuesSourceFieldConfig randomTermConfig() {
+        String field = randomAlphaOfLength(10);
+        Object missing = randomBoolean() ? randomAlphaOfLength(10) : null;
+        ZoneId timeZone = randomBoolean() ? randomZone() : null;
+        String format = randomBoolean() ? randomNumericDocValueFormat().toString() : null;
+        ValueType userValueTypeHint = randomBoolean()
+            ? randomFrom(ValueType.STRING, ValueType.DOUBLE, ValueType.LONG, ValueType.DATE, ValueType.IP, ValueType.BOOLEAN)
+            : null;
+        return new MultiValuesSourceFieldConfig.Builder().setFieldName(field)
+                .setMissing(missing)
+                .setScript(null)
+                .setTimeZone(timeZone)
+                .setFormat(format)
+                .setUserValueTypeHint(userValueTypeHint)
+                .build();
+    }
+
+    @Override
+    protected MultiTermsAggregationBuilder createTestInstance() {
+        MultiTermsAggregationBuilder aggregationBuilder = new MultiTermsAggregationBuilder(aggregationName);
+
+        int termCount = randomIntBetween(2, 10);
+        List<MultiValuesSourceFieldConfig> terms = new ArrayList<>();
+        for (int i = 0; i < termCount; i++) {
+            terms.add(randomTermConfig());
+        }
+        aggregationBuilder.terms(terms);
+        if (randomBoolean()) {
+            if (randomBoolean()) {
+                aggregationBuilder.showTermDocCountError(randomBoolean());
+            }
+        }
+        if (randomBoolean()) {
+            aggregationBuilder.size(randomIntBetween(1, 1000));
+        }
+        if (randomBoolean()) {
+            aggregationBuilder.shardSize(randomIntBetween(1, 1000));
+        }
+        if (randomBoolean()) {
+            aggregationBuilder.order(randomBucketOrder());
+        }
+        if (randomBoolean()) {
+            aggregationBuilder.collectMode(randomFrom(Aggregator.SubAggCollectionMode.values()));
+        }
+        if (randomBoolean()) {
+            aggregationBuilder.collectMode(randomFrom(Aggregator.SubAggCollectionMode.values()));
+        }
+        return aggregationBuilder;
+    }
+
+    @Override
+    protected Writeable.Reader<MultiTermsAggregationBuilder> instanceReader() {
+        return MultiTermsAggregationBuilder::new;
+    }
+
+    @Override
+    protected NamedWriteableRegistry getNamedWriteableRegistry() {
+        return new NamedWriteableRegistry(new SearchModule(Settings.EMPTY, Collections.emptyList()).getNamedWriteables());
+    }
+
+    @Override
+    protected NamedXContentRegistry xContentRegistry() {
+        List<NamedXContentRegistry.Entry> namedXContent = new ArrayList<>();
+        namedXContent.add(
+            new NamedXContentRegistry.Entry(
+                BaseAggregationBuilder.class,
+                new ParseField(MultiTermsAggregationBuilder.NAME),
+                (p, n) -> MultiTermsAggregationBuilder.PARSER.apply(p, (String) n)
+            )
+        );
+        namedXContent.addAll(new SearchModule(Settings.EMPTY, Collections.emptyList()).getNamedXContents());
+        return new NamedXContentRegistry(namedXContent);
+    }
+}

+ 667 - 0
x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregatorTests.java

@@ -0,0 +1,667 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+package org.elasticsearch.xpack.analytics.multiterms;
+
+import static org.elasticsearch.xpack.analytics.multiterms.MultiTermsAggregationBuilderTests.randomTermConfig;
+import static org.hamcrest.Matchers.closeTo;
+import static org.hamcrest.Matchers.contains;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.hasSize;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.function.Consumer;
+import java.util.function.Function;
+
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FloatDocValuesField;
+import org.apache.lucene.document.LongPoint;
+import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.document.SortedNumericDocValuesField;
+import org.apache.lucene.document.SortedSetDocValuesField;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.CheckedConsumer;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.time.DateFormatter;
+import org.elasticsearch.index.fielddata.ScriptDocValues;
+import org.elasticsearch.index.mapper.DateFieldMapper;
+import org.elasticsearch.index.mapper.KeywordFieldMapper;
+import org.elasticsearch.index.mapper.MappedFieldType;
+import org.elasticsearch.index.mapper.NumberFieldMapper;
+import org.elasticsearch.plugins.SearchPlugin;
+import org.elasticsearch.script.MockScriptEngine;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.script.ScriptEngine;
+import org.elasticsearch.script.ScriptModule;
+import org.elasticsearch.script.ScriptService;
+import org.elasticsearch.script.ScriptType;
+import org.elasticsearch.search.aggregations.AggregationBuilder;
+import org.elasticsearch.search.aggregations.AggregatorTestCase;
+import org.elasticsearch.search.aggregations.BucketOrder;
+import org.elasticsearch.search.aggregations.metrics.InternalMax;
+import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder;
+import org.elasticsearch.search.aggregations.support.CoreValuesSourceType;
+import org.elasticsearch.search.aggregations.support.MultiValuesSourceFieldConfig;
+import org.elasticsearch.search.aggregations.support.ValueType;
+import org.elasticsearch.search.aggregations.support.ValuesSourceType;
+import org.elasticsearch.search.lookup.LeafDocLookup;
+import org.elasticsearch.xpack.analytics.AnalyticsPlugin;
+
+public class MultiTermsAggregatorTests extends AggregatorTestCase {
+
+    /**
+     * Script to return the {@code _value} provided by aggs framework.
+     */
+    public static final String ADD_ONE_SCRIPT = "add_one";
+
+    public static final String DATE_FIELD = "tVal";
+    public static final String INT_FIELD = "iVal";
+    public static final String FLOAT_FIELD = "fVal";
+    public static final String KEYWORD_FIELD = "kVal";
+
+    @Override
+    protected AggregationBuilder createAggBuilderForTypeTest(MappedFieldType fieldType, String fieldName) {
+        logger.info(fieldType);
+        return new MultiTermsAggregationBuilder("my_terms").terms(
+            List.of(
+                new MultiValuesSourceFieldConfig.Builder().setFieldName(fieldName).build(),
+                new MultiValuesSourceFieldConfig.Builder().setFieldName(fieldName).build()
+            )
+        );
+    }
+
+    @Override
+    protected List<ValuesSourceType> getSupportedValuesSourceTypes() {
+        return List.of(
+            CoreValuesSourceType.NUMERIC,
+            CoreValuesSourceType.DATE,
+            CoreValuesSourceType.BOOLEAN,
+            CoreValuesSourceType.KEYWORD,
+            CoreValuesSourceType.IP
+        );
+    }
+
+    @Override
+    protected ScriptService getMockScriptService() {
+        Map<String, Function<Map<String, Object>, Object>> scripts = new HashMap<>();
+
+        scripts.put(ADD_ONE_SCRIPT, vars -> {
+            LeafDocLookup leafDocLookup = (LeafDocLookup) vars.get("doc");
+            String fieldname = (String) vars.get("fieldname");
+            ScriptDocValues<?> scriptDocValues = leafDocLookup.get(fieldname);
+            return ((Number) scriptDocValues.get(0)).doubleValue() + 1.0;
+        });
+
+        MockScriptEngine scriptEngine = new MockScriptEngine(MockScriptEngine.NAME, scripts, Collections.emptyMap());
+        Map<String, ScriptEngine> engines = Collections.singletonMap(scriptEngine.getType(), scriptEngine);
+
+        return new ScriptService(Settings.EMPTY, engines, ScriptModule.CORE_CONTEXTS);
+    }
+
+    public void testIntegersFloatsAndStrings() throws IOException {
+        testCase(new MatchAllDocsQuery(), new String[] { KEYWORD_FIELD, INT_FIELD, FLOAT_FIELD }, null, iw -> {
+            iw.addDocument(
+                List.of(
+                    new NumericDocValuesField(INT_FIELD, 3),
+                    new FloatDocValuesField(FLOAT_FIELD, 1.0f),
+                    new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("a"))
+                )
+            );
+            iw.addDocument(
+                List.of(
+                    new NumericDocValuesField(INT_FIELD, 3),
+                    new FloatDocValuesField(FLOAT_FIELD, 1.0f),
+                    new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("b"))
+                )
+            );
+            iw.addDocument(
+                List.of(
+                    new NumericDocValuesField(INT_FIELD, 1),
+                    new FloatDocValuesField(FLOAT_FIELD, 1.0f),
+                    new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("a"))
+                )
+            );
+            iw.addDocument(
+                List.of(
+                    new NumericDocValuesField(INT_FIELD, 2),
+                    new FloatDocValuesField(FLOAT_FIELD, 2.0f),
+                    new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("a"))
+                )
+            );
+            iw.addDocument(
+                List.of(
+                    new NumericDocValuesField(INT_FIELD, 3),
+                    new FloatDocValuesField(FLOAT_FIELD, 1.0f),
+                    new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("a"))
+                )
+            );
+        }, h -> {
+            assertThat(h.getBuckets(), hasSize(4));
+            assertThat(h.getBuckets().get(0).getKey(), contains(equalTo("a"), equalTo(3L), equalTo(1.0)));
+            assertThat(h.getBuckets().get(0).getDocCount(), equalTo(2L));
+            assertThat(h.getBuckets().get(1).getKey(), contains(equalTo("a"), equalTo(1L), equalTo(1.0)));
+            assertThat(h.getBuckets().get(1).getDocCount(), equalTo(1L));
+            assertThat(h.getBuckets().get(2).getKey(), contains(equalTo("a"), equalTo(2L), equalTo(2.0)));
+            assertThat(h.getBuckets().get(2).getDocCount(), equalTo(1L));
+            assertThat(h.getBuckets().get(3).getKey(), contains(equalTo("b"), equalTo(3L), equalTo(1.0)));
+            assertThat(h.getBuckets().get(3).getDocCount(), equalTo(1L));
+        });
+    }
+
+    public void testNullFields() throws IOException {
+        testCase(new MatchAllDocsQuery(), new String[] { KEYWORD_FIELD, INT_FIELD, FLOAT_FIELD }, null, iw -> {
+            iw.addDocument(
+                List.of(new FloatDocValuesField(FLOAT_FIELD, 1.0f), new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("a")))
+            );
+            iw.addDocument(List.of(new NumericDocValuesField(INT_FIELD, 3), new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("b"))));
+            iw.addDocument(List.of(new NumericDocValuesField(INT_FIELD, 1), new FloatDocValuesField(FLOAT_FIELD, 1.0f)));
+            iw.addDocument(
+                List.of(
+                    new NumericDocValuesField(INT_FIELD, 2),
+                    new FloatDocValuesField(FLOAT_FIELD, 2.0f),
+                    new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("a"))
+                )
+            );
+            iw.addDocument(
+                List.of(
+                    new NumericDocValuesField(INT_FIELD, 3),
+                    new FloatDocValuesField(FLOAT_FIELD, 1.0f),
+                    new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("a"))
+                )
+            );
+            iw.addDocument(
+                List.of(
+                    new NumericDocValuesField(INT_FIELD, 3),
+                    new FloatDocValuesField(FLOAT_FIELD, 1.0f),
+                    new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("a"))
+                )
+            );
+            iw.addDocument(List.of(new NumericDocValuesField("wrong_val", 3)));
+        }, h -> {
+            assertThat(h.getBuckets(), hasSize(2));
+            assertThat(h.getBuckets().get(0).getKey(), contains(equalTo("a"), equalTo(3L), equalTo(1.0)));
+            assertThat(h.getBuckets().get(0).getDocCount(), equalTo(2L));
+            assertThat(h.getBuckets().get(1).getKey(), contains(equalTo("a"), equalTo(2L), equalTo(2.0)));
+            assertThat(h.getBuckets().get(1).getDocCount(), equalTo(1L));
+        });
+    }
+
+    public void testMissingFields() throws IOException {
+        testCase(
+            new MatchAllDocsQuery(),
+            List.of(
+                new MultiValuesSourceFieldConfig.Builder().setFieldName(KEYWORD_FIELD).setMissing("z").build(),
+                new MultiValuesSourceFieldConfig.Builder().setFieldName(INT_FIELD).setMissing(0).build(),
+                new MultiValuesSourceFieldConfig.Builder().setFieldName(FLOAT_FIELD).setMissing(-1.0f).build()
+            ),
+            null,
+            iw -> {
+                iw.addDocument(
+                    List.of(new FloatDocValuesField(FLOAT_FIELD, 1.0f), new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("a")))
+                );
+                iw.addDocument(
+                    List.of(new NumericDocValuesField(INT_FIELD, 3), new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("b")))
+                );
+                iw.addDocument(List.of(new NumericDocValuesField(INT_FIELD, 1), new FloatDocValuesField(FLOAT_FIELD, 1.0f)));
+                iw.addDocument(
+                    List.of(
+                        new NumericDocValuesField(INT_FIELD, 2),
+                        new FloatDocValuesField(FLOAT_FIELD, 2.0f),
+                        new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("a"))
+                    )
+                );
+                iw.addDocument(
+                    List.of(
+                        new NumericDocValuesField(INT_FIELD, 3),
+                        new FloatDocValuesField(FLOAT_FIELD, 1.0f),
+                        new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("a"))
+                    )
+                );
+                iw.addDocument(
+                    List.of(
+                        new NumericDocValuesField(INT_FIELD, 3),
+                        new FloatDocValuesField(FLOAT_FIELD, 1.0f),
+                        new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("a"))
+                    )
+                );
+                iw.addDocument(List.of(new NumericDocValuesField("wrong_val", 3)));
+            },
+            h -> {
+                assertThat(h.getBuckets(), hasSize(6));
+                assertThat(h.getBuckets().get(0).getKey(), contains(equalTo("a"), equalTo(3L), equalTo(1.0)));
+                assertThat(h.getBuckets().get(0).getDocCount(), equalTo(2L));
+                assertThat(h.getBuckets().get(1).getKey(), contains(equalTo("a"), equalTo(0L), equalTo(1.0)));
+                assertThat(h.getBuckets().get(1).getDocCount(), equalTo(1L));
+                assertThat(h.getBuckets().get(2).getKey(), contains(equalTo("a"), equalTo(2L), equalTo(2.0)));
+                assertThat(h.getBuckets().get(2).getDocCount(), equalTo(1L));
+                assertThat(h.getBuckets().get(3).getKey(), contains(equalTo("b"), equalTo(3L), equalTo(-1.0)));
+                assertThat(h.getBuckets().get(3).getDocCount(), equalTo(1L));
+                assertThat(h.getBuckets().get(4).getKey(), contains(equalTo("z"), equalTo(0L), equalTo(-1.0)));
+                assertThat(h.getBuckets().get(4).getDocCount(), equalTo(1L));
+                assertThat(h.getBuckets().get(5).getKey(), contains(equalTo("z"), equalTo(1L), equalTo(1.0)));
+                assertThat(h.getBuckets().get(5).getDocCount(), equalTo(1L));
+            }
+        );
+    }
+
+    public void testSortedNumericDocValues() throws IOException {
+        testCase(new MatchAllDocsQuery(), new String[] { KEYWORD_FIELD, INT_FIELD }, null, iw -> {
+            iw.addDocument(
+                List.of(new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("a")), new SortedNumericDocValuesField(INT_FIELD, 1))
+            );
+            iw.addDocument(
+                List.of(new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("a")), new SortedNumericDocValuesField(INT_FIELD, 2))
+            );
+            iw.addDocument(
+                List.of(new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("b")), new SortedNumericDocValuesField(INT_FIELD, 3))
+            );
+            iw.addDocument(
+                List.of(new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("b")), new SortedNumericDocValuesField(INT_FIELD, 3))
+            );
+        }, h -> {
+            assertThat(h.getBuckets(), hasSize(3));
+            assertThat(h.getBuckets().get(0).getKey(), contains(equalTo("b"), equalTo(3L)));
+            assertThat(h.getBuckets().get(0).getDocCount(), equalTo(2L));
+            assertThat(h.getBuckets().get(1).getKey(), contains(equalTo("a"), equalTo(1L)));
+            assertThat(h.getBuckets().get(1).getDocCount(), equalTo(1L));
+            assertThat(h.getBuckets().get(2).getKey(), contains(equalTo("a"), equalTo(2L)));
+            assertThat(h.getBuckets().get(2).getDocCount(), equalTo(1L));
+        });
+    }
+
+    public void testMultiValues() throws IOException {
+        testCase(new MatchAllDocsQuery(), new String[] { KEYWORD_FIELD, INT_FIELD }, null, iw -> {
+            iw.addDocument(
+                List.of(
+                    new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("a")),
+                    new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("b")),
+                    new SortedNumericDocValuesField(INT_FIELD, 1),
+                    new SortedNumericDocValuesField(INT_FIELD, 2)
+                )
+            );
+            iw.addDocument(
+                List.of(
+                    new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("b")),
+                    new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("c")),
+                    new SortedNumericDocValuesField(INT_FIELD, 2),
+                    new SortedNumericDocValuesField(INT_FIELD, 3)
+                )
+            );
+        }, h -> {
+            assertThat(h.getBuckets(), hasSize(7));
+            assertThat(h.getBuckets().get(0).getKey(), contains(equalTo("b"), equalTo(2L)));
+            assertThat(h.getBuckets().get(0).getDocCount(), equalTo(2L));
+            assertThat(h.getBuckets().get(1).getKey(), contains(equalTo("a"), equalTo(1L)));
+            assertThat(h.getBuckets().get(1).getDocCount(), equalTo(1L));
+            assertThat(h.getBuckets().get(2).getKey(), contains(equalTo("a"), equalTo(2L)));
+            assertThat(h.getBuckets().get(2).getDocCount(), equalTo(1L));
+            assertThat(h.getBuckets().get(3).getKey(), contains(equalTo("b"), equalTo(1L)));
+            assertThat(h.getBuckets().get(3).getDocCount(), equalTo(1L));
+            assertThat(h.getBuckets().get(4).getKey(), contains(equalTo("b"), equalTo(3L)));
+            assertThat(h.getBuckets().get(4).getDocCount(), equalTo(1L));
+            assertThat(h.getBuckets().get(5).getKey(), contains(equalTo("c"), equalTo(2L)));
+            assertThat(h.getBuckets().get(5).getDocCount(), equalTo(1L));
+            assertThat(h.getBuckets().get(6).getKey(), contains(equalTo("c"), equalTo(3L)));
+            assertThat(h.getBuckets().get(6).getDocCount(), equalTo(1L));
+        });
+    }
+
+    public void testScripts() throws IOException {
+        testCase(
+            new MatchAllDocsQuery(),
+            List.of(
+                new MultiValuesSourceFieldConfig.Builder().setFieldName(KEYWORD_FIELD).build(),
+                new MultiValuesSourceFieldConfig.Builder().setScript(
+                    new Script(ScriptType.INLINE, MockScriptEngine.NAME, ADD_ONE_SCRIPT, Collections.singletonMap("fieldname", INT_FIELD))
+                ).setUserValueTypeHint(ValueType.LONG).build(),
+                new MultiValuesSourceFieldConfig.Builder().setFieldName(INT_FIELD).build()
+            ),
+            null,
+            iw -> {
+                iw.addDocument(
+                    List.of(new NumericDocValuesField(INT_FIELD, 3), new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("a")))
+                );
+                iw.addDocument(
+                    List.of(new NumericDocValuesField(INT_FIELD, 3), new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("b")))
+                );
+                iw.addDocument(
+                    List.of(new NumericDocValuesField(INT_FIELD, 1), new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("a")))
+                );
+                iw.addDocument(
+                    List.of(new NumericDocValuesField(INT_FIELD, 2), new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("a")))
+                );
+                iw.addDocument(
+                    List.of(new NumericDocValuesField(INT_FIELD, 3), new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("a")))
+                );
+            },
+            h -> {
+                assertThat(h.getBuckets(), hasSize(4));
+                assertThat(h.getBuckets().get(0).getKey(), contains(equalTo("a"), equalTo(4L), equalTo(3L)));
+                assertThat(h.getBuckets().get(0).getDocCount(), equalTo(2L));
+                assertThat(h.getBuckets().get(1).getKey(), contains(equalTo("a"), equalTo(2L), equalTo(1L)));
+                assertThat(h.getBuckets().get(1).getDocCount(), equalTo(1L));
+                assertThat(h.getBuckets().get(2).getKey(), contains(equalTo("a"), equalTo(3L), equalTo(2L)));
+                assertThat(h.getBuckets().get(2).getDocCount(), equalTo(1L));
+                assertThat(h.getBuckets().get(3).getKey(), contains(equalTo("b"), equalTo(4L), equalTo(3L)));
+                assertThat(h.getBuckets().get(3).getDocCount(), equalTo(1L));
+            }
+        );
+    }
+
+    public void testFilter() throws IOException {
+        testCase(new TermQuery(new Term(KEYWORD_FIELD, "a")), new String[] { KEYWORD_FIELD, INT_FIELD }, null, iw -> {
+            iw.addDocument(
+                List.of(
+                    new NumericDocValuesField(INT_FIELD, 3),
+                    new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("a")),
+                    new StringField(KEYWORD_FIELD, "a", Field.Store.NO)
+                )
+            );
+            iw.addDocument(
+                List.of(
+                    new NumericDocValuesField(INT_FIELD, 3),
+                    new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("a")),
+                    new StringField(KEYWORD_FIELD, "b", Field.Store.NO)
+                )
+            );
+            iw.addDocument(
+                List.of(
+                    new NumericDocValuesField(INT_FIELD, 1),
+                    new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("a")),
+                    new StringField(KEYWORD_FIELD, "a", Field.Store.NO)
+                )
+            );
+            iw.addDocument(
+                List.of(
+                    new NumericDocValuesField(INT_FIELD, 2),
+                    new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("a")),
+                    new StringField(KEYWORD_FIELD, "a", Field.Store.NO)
+                )
+            );
+            iw.addDocument(
+                List.of(
+                    new NumericDocValuesField(INT_FIELD, 3),
+                    new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("a")),
+                    new StringField(KEYWORD_FIELD, "a", Field.Store.NO)
+                )
+            );
+        }, h -> {
+            assertThat(h.getBuckets(), hasSize(3));
+            assertThat(h.getBuckets().get(0).getKey(), contains(equalTo("a"), equalTo(3L)));
+            assertThat(h.getBuckets().get(0).getDocCount(), equalTo(2L));
+            assertThat(h.getBuckets().get(1).getKey(), contains(equalTo("a"), equalTo(1L)));
+            assertThat(h.getBuckets().get(1).getDocCount(), equalTo(1L));
+            assertThat(h.getBuckets().get(2).getKey(), contains(equalTo("a"), equalTo(2L)));
+            assertThat(h.getBuckets().get(2).getDocCount(), equalTo(1L));
+        });
+    }
+
+    public void testSort() throws IOException {
+        testCase(new MatchAllDocsQuery(), new String[] { INT_FIELD, KEYWORD_FIELD }, b -> {
+            b.order(BucketOrder.aggregation("max_float", true));
+            b.subAggregation(new MaxAggregationBuilder("max_float").field(FLOAT_FIELD));
+        }, iw -> {
+            iw.addDocument(
+                List.of(
+                    new NumericDocValuesField(INT_FIELD, 3),
+                    new FloatDocValuesField(FLOAT_FIELD, 1.0f),
+                    new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("a"))
+                )
+            );
+            iw.addDocument(
+                List.of(
+                    new NumericDocValuesField(INT_FIELD, 3),
+                    new FloatDocValuesField(FLOAT_FIELD, 2.0f),
+                    new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("b"))
+                )
+            );
+            iw.addDocument(
+                List.of(
+                    new NumericDocValuesField(INT_FIELD, 1),
+                    new FloatDocValuesField(FLOAT_FIELD, 3.0f),
+                    new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("a"))
+                )
+            );
+            iw.addDocument(
+                List.of(
+                    new NumericDocValuesField(INT_FIELD, 2),
+                    new FloatDocValuesField(FLOAT_FIELD, 4.0f),
+                    new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("a"))
+                )
+            );
+            iw.addDocument(
+                List.of(
+                    new NumericDocValuesField(INT_FIELD, 3),
+                    new FloatDocValuesField(FLOAT_FIELD, 5.0f),
+                    new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("a"))
+                )
+            );
+        }, h -> {
+            assertThat(h.getBuckets(), hasSize(4));
+
+            assertThat(h.getBuckets().get(0).getKey(), contains(equalTo(3L), equalTo("b")));
+            assertThat(h.getBuckets().get(0).getDocCount(), equalTo(1L));
+            assertThat(((InternalMax) (h.getBuckets().get(0).getAggregations().get("max_float"))).value(), closeTo(2.0, 0.00001));
+
+            assertThat(h.getBuckets().get(1).getKey(), contains(equalTo(1L), equalTo("a")));
+            assertThat(h.getBuckets().get(1).getDocCount(), equalTo(1L));
+            assertThat(((InternalMax) (h.getBuckets().get(1).getAggregations().get("max_float"))).value(), closeTo(3.0, 0.00001));
+
+            assertThat(h.getBuckets().get(2).getKey(), contains(equalTo(2L), equalTo("a")));
+            assertThat(h.getBuckets().get(2).getDocCount(), equalTo(1L));
+            assertThat(((InternalMax) (h.getBuckets().get(2).getAggregations().get("max_float"))).value(), closeTo(4.0, 0.00001));
+
+            assertThat(h.getBuckets().get(3).getKey(), contains(equalTo(3L), equalTo("a")));
+            assertThat(h.getBuckets().get(3).getDocCount(), equalTo(2L));
+            assertThat(((InternalMax) (h.getBuckets().get(3).getAggregations().get("max_float"))).value(), closeTo(5.0, 0.00001));
+        });
+    }
+
+    public void testFormatter() throws IOException {
+        testCase(
+            new MatchAllDocsQuery(),
+            List.of(
+                new MultiValuesSourceFieldConfig.Builder().setFieldName(KEYWORD_FIELD).build(),
+                new MultiValuesSourceFieldConfig.Builder().setFieldName(INT_FIELD).setFormat("0000").build()
+            ),
+            null,
+            iw -> {
+                iw.addDocument(
+                    List.of(new NumericDocValuesField(INT_FIELD, 3), new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("a")))
+                );
+                iw.addDocument(
+                    List.of(new NumericDocValuesField(INT_FIELD, 3), new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("b")))
+                );
+                iw.addDocument(
+                    List.of(new NumericDocValuesField(INT_FIELD, 1), new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("a")))
+                );
+                iw.addDocument(
+                    List.of(new NumericDocValuesField(INT_FIELD, 2), new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("a")))
+                );
+                iw.addDocument(
+                    List.of(new NumericDocValuesField(INT_FIELD, 3), new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("a")))
+                );
+            },
+            h -> {
+                assertThat(h.getBuckets(), hasSize(4));
+                assertThat(h.getBuckets().get(0).getKeyAsString(), equalTo("a|0003"));
+                assertThat(h.getBuckets().get(0).getDocCount(), equalTo(2L));
+                assertThat(h.getBuckets().get(1).getKeyAsString(), equalTo("a|0001"));
+                assertThat(h.getBuckets().get(1).getDocCount(), equalTo(1L));
+                assertThat(h.getBuckets().get(2).getKeyAsString(), equalTo("a|0002"));
+                assertThat(h.getBuckets().get(2).getDocCount(), equalTo(1L));
+                assertThat(h.getBuckets().get(3).getKeyAsString(), equalTo("b|0003"));
+                assertThat(h.getBuckets().get(3).getDocCount(), equalTo(1L));
+            }
+        );
+    }
+
+    public void testDates() throws IOException {
+        testCase(
+            new MatchAllDocsQuery(),
+            List.of(
+                new MultiValuesSourceFieldConfig.Builder().setFieldName(DATE_FIELD).build(),
+                new MultiValuesSourceFieldConfig.Builder().setFieldName(INT_FIELD).setFormat("0000").build()
+            ),
+            null,
+            iw -> {
+                iw.addDocument(docWithDate("2020-01-01", new NumericDocValuesField(INT_FIELD, 3)));
+                iw.addDocument(docWithDate("2020-01-01", new NumericDocValuesField(INT_FIELD, 4)));
+                iw.addDocument(docWithDate("2020-01-01", new NumericDocValuesField(INT_FIELD, 3)));
+                iw.addDocument(docWithDate("2020-01-02", new NumericDocValuesField(INT_FIELD, 5)));
+
+            },
+            h -> {
+                assertThat(h.getBuckets(), hasSize(3));
+                assertThat(h.getBuckets().get(0).getKeyAsString(), equalTo("2020-01-01|0003"));
+                assertThat(h.getBuckets().get(0).getDocCount(), equalTo(2L));
+                assertThat(h.getBuckets().get(1).getKeyAsString(), equalTo("2020-01-01|0004"));
+                assertThat(h.getBuckets().get(1).getDocCount(), equalTo(1L));
+                assertThat(h.getBuckets().get(2).getKeyAsString(), equalTo("2020-01-02|0005"));
+                assertThat(h.getBuckets().get(2).getDocCount(), equalTo(1L));
+            }
+        );
+    }
+
+    public void testMinDocCount() throws IOException {
+        testCase(
+            new MatchAllDocsQuery(),
+            List.of(
+                new MultiValuesSourceFieldConfig.Builder().setFieldName(DATE_FIELD).build(),
+                new MultiValuesSourceFieldConfig.Builder().setFieldName(INT_FIELD).setFormat("0000").build()
+            ),
+            ab -> ab.minDocCount(2),
+            iw -> {
+                iw.addDocument(docWithDate("2020-01-01", new NumericDocValuesField(INT_FIELD, 3)));
+                iw.addDocument(docWithDate("2020-01-01", new NumericDocValuesField(INT_FIELD, 4)));
+                iw.addDocument(docWithDate("2020-01-01", new NumericDocValuesField(INT_FIELD, 3)));
+                iw.addDocument(docWithDate("2020-01-02", new NumericDocValuesField(INT_FIELD, 5)));
+
+            },
+            h -> {
+                assertThat(h.getBuckets(), hasSize(1));
+                assertThat(h.getBuckets().get(0).getKeyAsString(), equalTo("2020-01-01|0003"));
+                assertThat(h.getBuckets().get(0).getDocCount(), equalTo(2L));
+            }
+        );
+    }
+
+    public void testNoTerms() {
+        for (List<MultiValuesSourceFieldConfig> terms : Arrays.<List<MultiValuesSourceFieldConfig>>asList(
+            Collections.singletonList(randomTermConfig()),
+            Collections.emptyList(),
+            null
+        )) {
+
+            IllegalArgumentException ex = expectThrows(
+                IllegalArgumentException.class,
+                () -> testCase(
+                    new MatchAllDocsQuery(),
+                    terms,
+                    null,
+                    iw -> { iw.addDocument(docWithDate("2020-01-01", new NumericDocValuesField(INT_FIELD, 3))); },
+                    h -> fail("Should have thrown exception")
+                )
+            );
+            if (terms == null) {
+                assertEquals("[terms] must not be null: [my_terms]", ex.getMessage());
+            } else {
+                assertEquals(
+                    "The [terms] parameter in the aggregation [my_terms] must be present and have at least 2 fields or scripts."
+                        + (terms.isEmpty() == false ? " For a single field user terms aggregation." : ""),
+                    ex.getMessage()
+                );
+            }
+        }
+
+    }
+
+    private void testCase(
+        Query query,
+        String[] terms,
+        Consumer<MultiTermsAggregationBuilder> builderSetup,
+        CheckedConsumer<RandomIndexWriter, IOException> buildIndex,
+        Consumer<InternalMultiTerms> verify
+    ) throws IOException {
+        List<MultiValuesSourceFieldConfig> termConfigs = new ArrayList<>();
+        for (String term : terms) {
+            termConfigs.add(new MultiValuesSourceFieldConfig.Builder().setFieldName(term).build());
+        }
+        testCase(query, termConfigs, builderSetup, buildIndex, verify);
+    }
+
+    private void testCase(
+        Query query,
+        List<MultiValuesSourceFieldConfig> terms,
+        Consumer<MultiTermsAggregationBuilder> builderSetup,
+        CheckedConsumer<RandomIndexWriter, IOException> buildIndex,
+        Consumer<InternalMultiTerms> verify
+    ) throws IOException {
+        MappedFieldType dateType = dateFieldType(DATE_FIELD);
+        MappedFieldType intType = new NumberFieldMapper.NumberFieldType(INT_FIELD, NumberFieldMapper.NumberType.INTEGER);
+        MappedFieldType floatType = new NumberFieldMapper.NumberFieldType(FLOAT_FIELD, NumberFieldMapper.NumberType.FLOAT);
+        MappedFieldType keywordType = new KeywordFieldMapper.KeywordFieldType(KEYWORD_FIELD);
+        MultiTermsAggregationBuilder builder = new MultiTermsAggregationBuilder("my_terms");
+        builder.terms(terms);
+        if (builderSetup != null) {
+            builderSetup.accept(builder);
+        } else {
+            // Set some random settings that shouldn't affect most of tests
+            if (randomBoolean()) {
+                builder.showTermDocCountError(randomBoolean());
+            }
+            if (randomBoolean()) {
+                builder.shardSize(randomIntBetween(10, 200));
+            }
+            if (randomBoolean()) {
+                builder.size(randomIntBetween(10, 200));
+            }
+        }
+        testCase(builder, query, buildIndex, verify, dateType, intType, floatType, keywordType);
+    }
+
+    @Override
+    protected List<SearchPlugin> getSearchPlugins() {
+        return Collections.singletonList(new AnalyticsPlugin());
+    }
+
+    private DateFieldMapper.DateFieldType dateFieldType(String name) {
+        return new DateFieldMapper.DateFieldType(
+            name,
+            true,
+            false,
+            true,
+            DateFormatter.forPattern("strict_date"),
+            DateFieldMapper.Resolution.MILLISECONDS,
+            null,
+            Collections.emptyMap()
+        );
+    }
+
+    private Iterable<IndexableField> docWithDate(String date, IndexableField... fields) {
+        List<IndexableField> indexableFields = new ArrayList<>();
+        long instant = dateFieldType(DATE_FIELD).parse(date);
+        indexableFields.add(new SortedNumericDocValuesField(DATE_FIELD, instant));
+        indexableFields.add(new LongPoint(DATE_FIELD, instant));
+        indexableFields.addAll(Arrays.asList(fields));
+        return indexableFields;
+    }
+
+}

+ 2 - 1
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/analytics/action/AnalyticsStatsAction.java

@@ -47,7 +47,8 @@ public class AnalyticsStatsAction extends ActionType<AnalyticsStatsAction.Respon
         T_TEST,
         MOVING_PERCENTILES,
         NORMALIZE,
-        RATE;
+        RATE,
+        MULTI_TERMS;
     }
 
     public static class Request extends BaseNodesRequest<Request> implements ToXContentObject {

+ 2 - 2
x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/GeoLineAggregationBuilder.java

@@ -48,8 +48,8 @@ public class GeoLineAggregationBuilder
         ObjectParser.fromBuilder(NAME, GeoLineAggregationBuilder::new);
     static {
         MultiValuesSourceParseHelper.declareCommon(PARSER, true, ValueType.NUMERIC);
-        MultiValuesSourceParseHelper.declareField(POINT_FIELD.getPreferredName(), PARSER, true, false, false);
-        MultiValuesSourceParseHelper.declareField(SORT_FIELD.getPreferredName(), PARSER, true, false, false);
+        MultiValuesSourceParseHelper.declareField(POINT_FIELD.getPreferredName(), PARSER, true, false, false, false);
+        MultiValuesSourceParseHelper.declareField(SORT_FIELD.getPreferredName(), PARSER, true, false, false, false);
         PARSER.declareString((builder, order) -> builder.sortOrder(SortOrder.fromString(order)), ORDER_FIELD);
         PARSER.declareBoolean(GeoLineAggregationBuilder::includeSort, INCLUDE_SORT_FIELD);
         PARSER.declareInt(GeoLineAggregationBuilder::size, SIZE_FIELD);

+ 211 - 0
x-pack/plugin/src/test/resources/rest-api-spec/test/analytics/multi_terms.yml

@@ -0,0 +1,211 @@
+---
+setup:
+  - do:
+      indices.create:
+        index: test1
+        body:
+          mappings:
+            properties:
+              val:
+                type: long
+              uval:
+                type: unsigned_long
+              addr:
+                type: ip
+
+  - do:
+      indices.create:
+        index: test2
+        body:
+          mappings:
+            properties:
+              val:
+                type: integer
+              uval:
+                type: long
+              addr:
+                type: ip
+
+  - do:
+      indices.create:
+        index: test_with_child
+        body:
+          mappings:
+            properties:
+              val:
+                type: integer
+              lval:
+                type: long
+              join:
+                type: join
+                relations:
+                  data: address
+
+  - do:
+      bulk:
+        index: test1
+        refresh: true
+        body:
+          - '{"index": {}}'
+          - '{"val": 3, "uval": 9223372036854775813, "addr": "192.168.0.13"}'
+          - '{"index": {}}'
+          - '{"val": 4, "uval": 9223372036854775814, "addr": "192.168.0.4"}'
+          - '{"index": {}}'
+          - '{"val": 4, "uval": 9223372036854775805, "addr": "192.168.0.4"}'
+          - '{"index": {}}'
+          - '{"val": 4, "uval": 9223372036854775805, "addr": "192.168.0.5"}'
+
+  - do:
+      bulk:
+        index: test2
+        refresh: true
+        body:
+          - '{"index": {}}'
+          - '{"val": 3, "uval": 9223372036854775803, "addr": "192.168.0.13"}'
+          - '{"index": {}}'
+          - '{"val": 4, "uval": 9223372036854775804, "addr": "192.168.0.4"}'
+          - '{"index": {}}'
+          - '{"val": 4, "uval": 9223372036854775805, "addr": "192.168.0.4"}'
+          - '{"index": {}}'
+          - '{"val": 4, "uval": 9223372036854775805, "addr": "192.168.0.5"}'
+
+  - do:
+      bulk:
+        index: test_with_child
+        refresh: true
+        body:
+          - '{"index": {"_id": "1"}}'
+          - '{"val": 3, "lval": 4, "join": {"name": "data"}}'
+          - '{"index": {"routing": "1"}}'
+          - '{"addr": "192.168.0.13", "join": {"name": "address", "parent": "1"}}'
+          - '{"index": {"routing": "1"}}'
+          - '{"addr": "192.168.0.14", "join": {"name": "address", "parent": "1"}}'
+          - '{"index": {"_id": "2"}}'
+          - '{"val": 4, "lval": 4, "join": {"name": "data"}}'
+          - '{"index": {"routing": "2"}}'
+          - '{"addr": "192.168.0.1", "join": {"name": "address", "parent": "2"}}'
+          - '{"index": {"routing": "2"}}'
+          - '{"addr": "192.168.0.2", "join": {"name": "address", "parent": "2"}}'
+          - '{"index": {"_id": "3"}}'
+          - '{"val": 3, "lval": 4, "join": {"name": "data"}}'
+          - '{"index": {"routing": "3"}}'
+          - '{"addr": "192.168.0.3", "join": {"name": "address", "parent": "3"}}'
+          - '{"index": {"routing": "3"}}'
+          - '{"addr": "192.168.0.4", "join": {"name": "address", "parent": "3"}}'
+
+---
+"val and uval":
+  - do:
+      search:
+        size: 0
+        index: "test1"
+        body:
+          aggs:
+            m_terms:
+              multi_terms:
+                terms:
+                  - field: val
+                  - field: uval
+
+  - length: { aggregations.m_terms.buckets: 3 }
+  - match: { aggregations.m_terms.buckets.0.key_as_string: "4|9223372036854775805" }
+  - match: { aggregations.m_terms.buckets.0.doc_count: 2 }
+  - match: { aggregations.m_terms.buckets.1.key_as_string: "3|9223372036854775813" }
+  - match: { aggregations.m_terms.buckets.1.doc_count: 1 }
+  - match: { aggregations.m_terms.buckets.2.key_as_string: "4|9223372036854775814" }
+  - match: { aggregations.m_terms.buckets.2.doc_count: 1 }
+
+---
+"addr and val":
+  - do:
+      search:
+        size: 0
+        index: "test1"
+        body:
+          aggs:
+            m_terms:
+              multi_terms:
+                terms:
+                  - field: addr
+                  - field: val
+
+  - length: { aggregations.m_terms.buckets: 3 }
+  - match: { aggregations.m_terms.buckets.0.key_as_string: "192.168.0.4|4" }
+  - match: { aggregations.m_terms.buckets.0.doc_count: 2 }
+  - match: { aggregations.m_terms.buckets.1.key_as_string: "192.168.0.5|4" }
+  - match: { aggregations.m_terms.buckets.1.doc_count: 1 }
+  - match: { aggregations.m_terms.buckets.2.key_as_string: "192.168.0.13|3" }
+  - match: { aggregations.m_terms.buckets.2.doc_count: 1 }
+
+---
+"addr and uval":
+  - do:
+      search:
+        size: 0
+        index: "test2"
+        body:
+          aggs:
+            m_terms:
+              multi_terms:
+                terms:
+                  - field: addr
+                  - field: uval
+
+  - length: { aggregations.m_terms.buckets: 4 }
+  - match: { aggregations.m_terms.buckets.0.key_as_string: "192.168.0.4|9223372036854775804" }
+  - match: { aggregations.m_terms.buckets.0.doc_count: 1 }
+  - match: { aggregations.m_terms.buckets.1.key_as_string: "192.168.0.4|9223372036854775805" }
+  - match: { aggregations.m_terms.buckets.1.doc_count: 1 }
+  - match: { aggregations.m_terms.buckets.2.key_as_string: "192.168.0.5|9223372036854775805" }
+  - match: { aggregations.m_terms.buckets.2.doc_count: 1 }
+  - match: { aggregations.m_terms.buckets.3.key_as_string: "192.168.0.13|9223372036854775803" }
+  - match: { aggregations.m_terms.buckets.3.doc_count: 1 }
+
+---
+"mix and match":
+  - do:
+      search:
+        size: 0
+        index: "test1,test2"
+        body:
+          aggs:
+            m_terms:
+              multi_terms:
+                terms:
+                  - field: val
+                  - field: uval
+                  - field: addr
+
+  - length: { aggregations.m_terms.buckets: 3 }
+  - match: { aggregations.m_terms.buckets.0.key_as_string: "4|9.223372036854776E18|192.168.0.4" }
+  - match: { aggregations.m_terms.buckets.0.doc_count: 4 }
+  - match: { aggregations.m_terms.buckets.1.key_as_string: "3|9.223372036854776E18|192.168.0.13" }
+  - match: { aggregations.m_terms.buckets.1.doc_count: 2 }
+  - match: { aggregations.m_terms.buckets.2.key_as_string: "4|9.223372036854776E18|192.168.0.5" }
+  - match: { aggregations.m_terms.buckets.2.doc_count: 2 }
+
+---
+"aggs with children":
+  - do:
+      search:
+        size: 0
+        index: "test_with_child"
+        body:
+          aggs:
+            m_terms:
+              multi_terms:
+                terms:
+                  - field: val
+                  - field: lval
+              aggs:
+                ips:
+                  children:
+                    type: address
+
+  - length: { aggregations.m_terms.buckets: 2 }
+  - match: { aggregations.m_terms.buckets.0.key_as_string: "3|4" }
+  - match: { aggregations.m_terms.buckets.0.doc_count: 2 }
+  - match: { aggregations.m_terms.buckets.0.ips.doc_count: 4 }
+  - match: { aggregations.m_terms.buckets.1.key_as_string: "4|4" }
+  - match: { aggregations.m_terms.buckets.1.doc_count: 1 }
+  - match: { aggregations.m_terms.buckets.1.ips.doc_count: 2 }

+ 24 - 0
x-pack/plugin/src/test/resources/rest-api-spec/test/analytics/usage.yml

@@ -29,6 +29,7 @@ setup:
   - set: {analytics.stats.moving_percentiles_usage: moving_percentiles_usage}
   - set: { analytics.stats.normalize_usage: normalize_usage }
   - set: { analytics.stats.rate_usage: rate_usage }
+  - set: { analytics.stats.multi_terms_usage: multi_terms_usage }
 
   # use boxplot agg
   - do:
@@ -56,6 +57,7 @@ setup:
   - match: {analytics.stats.moving_percentiles_usage: $moving_percentiles_usage}
   - match: { analytics.stats.normalize_usage: $normalize_usage }
   - match: { analytics.stats.rate_usage: $rate_usage }
+  - match: { analytics.stats.multi_terms_usage: $multi_terms_usage }
 
   # use top_metrics agg
   - do:
@@ -86,6 +88,7 @@ setup:
   - match: {analytics.stats.moving_percentiles_usage: $moving_percentiles_usage}
   - match: { analytics.stats.normalize_usage: $normalize_usage }
   - match: { analytics.stats.rate_usage: $rate_usage }
+  - match: { analytics.stats.multi_terms_usage: $multi_terms_usage }
 
   # use cumulative_cardinality agg
   - do:
@@ -120,6 +123,7 @@ setup:
   - match: {analytics.stats.moving_percentiles_usage: $moving_percentiles_usage}
   - match: { analytics.stats.normalize_usage: $normalize_usage }
   - match: { analytics.stats.rate_usage: $rate_usage }
+  - match: { analytics.stats.multi_terms_usage: $multi_terms_usage }
 
   # use t-test agg
   - do:
@@ -148,6 +152,7 @@ setup:
   - match: {analytics.stats.moving_percentiles_usage: $moving_percentiles_usage}
   - match: { analytics.stats.normalize_usage: $normalize_usage }
   - match: { analytics.stats.rate_usage: $rate_usage }
+  - match: { analytics.stats.multi_terms_usage: $multi_terms_usage }
 
   - do:
       search:
@@ -172,6 +177,7 @@ setup:
   - match: {analytics.stats.moving_percentiles_usage: $moving_percentiles_usage}
   - match: { analytics.stats.normalize_usage: $normalize_usage }
   - match: { analytics.stats.rate_usage: $rate_usage }
+  - match: { analytics.stats.multi_terms_usage: $multi_terms_usage }
 
   # use moving_percentile agg
   - do:
@@ -207,6 +213,7 @@ setup:
   - set: {analytics.stats.moving_percentiles_usage: moving_percentiles_usage}
   - match: { analytics.stats.normalize_usage: $normalize_usage }
   - match: { analytics.stats.rate_usage: $rate_usage }
+  - match: { analytics.stats.multi_terms_usage: $multi_terms_usage }
 
   # use normalize agg
   - do:
@@ -242,6 +249,7 @@ setup:
   - gt: { analytics.stats.normalize_usage: $normalize_usage }
   - set: {analytics.stats.normalize_usage: normalize_usage}
   - match: { analytics.stats.rate_usage: $rate_usage }
+  - match: { analytics.stats.multi_terms_usage: $multi_terms_usage }
 
   # use rate agg
   - do:
@@ -273,3 +281,19 @@ setup:
   - match: {analytics.stats.moving_percentiles_usage: $moving_percentiles_usage}
   - gt: { analytics.stats.rate_usage: $rate_usage }
   - set: {analytics.stats.rate_usage: rate_usage}
+  - match: { analytics.stats.multi_terms_usage: $multi_terms_usage }
+
+  # use multi_terms agg
+  - do:
+      search:
+        size: 0
+        index: "test"
+        body:
+          aggs:
+            m_terms:
+              multi_terms:
+                terms:
+                  - field: timestamp
+                  - field: str.keyword
+
+  - length: { aggregations.m_terms.buckets: 2 }

+ 2 - 1
x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/TransformAggregations.java

@@ -79,7 +79,8 @@ public final class TransformAggregations {
         "top_metrics", // https://github.com/elastic/elasticsearch/issues/52236
         "t_test", // https://github.com/elastic/elasticsearch/issues/54503,
         "variable_width_histogram", // https://github.com/elastic/elasticsearch/issues/58140
-        "rate" // https://github.com/elastic/elasticsearch/issues/61351
+        "rate", // https://github.com/elastic/elasticsearch/issues/61351
+        "multi_terms" // https://github.com/elastic/elasticsearch/issues/67609
     );
 
     private TransformAggregations() {}