Browse Source

Merge pull request ESQL-1047 from elastic/main

🤖 ESQL: Merge upstream
elasticsearchmachine 2 years ago
parent
commit
b83c8a0787
86 changed files with 2412 additions and 822 deletions
  1. 5 0
      docs/changelog/95405.yaml
  2. 5 0
      docs/changelog/95440.yaml
  3. 275 0
      docs/reference/ccr/bi-directional-disaster-recovery.asciidoc
  4. BIN
      docs/reference/ccr/images/ccr-bi-directional-disaster-recovery.png
  5. BIN
      docs/reference/ccr/images/ccr-uni-directional-disaster-recovery.png
  6. 2 0
      docs/reference/ccr/index.asciidoc
  7. 194 0
      docs/reference/ccr/uni-directional-disaster-recovery.asciidoc
  8. 131 5
      modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/composite.yml
  9. 1 1
      modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java
  10. 1 2
      server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java
  11. 34 5
      server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java
  12. 5 0
      server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java
  13. 3 3
      server/src/main/java/org/elasticsearch/cluster/coordination/Publication.java
  14. 1 1
      server/src/main/java/org/elasticsearch/cluster/coordination/stateless/StoreHeartbeatService.java
  15. 1 3
      server/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java
  16. 4 3
      server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java
  17. 1 1
      server/src/main/java/org/elasticsearch/search/profile/query/CollectorResult.java
  18. 4 4
      server/src/main/java/org/elasticsearch/search/profile/query/InternalProfileCollector.java
  19. 0 169
      server/src/main/java/org/elasticsearch/search/query/QueryCollectorContext.java
  20. 87 37
      server/src/main/java/org/elasticsearch/search/query/QueryPhase.java
  21. 55 50
      server/src/main/java/org/elasticsearch/search/query/TopDocsCollectorFactory.java
  22. 2 0
      server/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java
  23. 2 0
      server/src/main/java/org/elasticsearch/search/suggest/SuggestPhase.java
  24. 78 3
      server/src/test/java/org/elasticsearch/cluster/coordination/AtomicRegisterCoordinatorTests.java
  25. 65 0
      server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java
  26. 3 3
      server/src/test/java/org/elasticsearch/cluster/coordination/PublicationTests.java
  27. 15 3
      server/src/test/java/org/elasticsearch/cluster/coordination/stateless/StoreHeartbeatServiceTests.java
  28. 5 5
      server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java
  29. 4 4
      server/src/test/java/org/elasticsearch/search/query/TopDocsCollectorFactoryTests.java
  30. 6 2
      test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java
  31. 12 4
      x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java
  32. 7 4
      x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java
  33. 4 4
      x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/RemoteClusterFeatureSetUsage.java
  34. 80 32
      x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetTrainedModelsStatsAction.java
  35. 5 1
      x-pack/plugin/core/src/main/resources/org/elasticsearch/xpack/entsearch/analytics/behavioral_analytics-events-default_policy.json
  36. 6 0
      x-pack/plugin/core/src/main/resources/org/elasticsearch/xpack/entsearch/analytics/behavioral_analytics-events-final_pipeline.json
  37. 10 75
      x-pack/plugin/core/src/main/resources/org/elasticsearch/xpack/entsearch/analytics/behavioral_analytics-events-mappings.json
  38. 5 1
      x-pack/plugin/core/src/main/resources/org/elasticsearch/xpack/entsearch/analytics/behavioral_analytics-events-settings.json
  39. 3 1
      x-pack/plugin/core/src/main/resources/org/elasticsearch/xpack/entsearch/analytics/behavioral_analytics-events-template.json
  40. 6 6
      x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/RemoteClusterFeatureSetUsageTests.java
  41. 5 4
      x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearch.java
  42. 17 3
      x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/ingest/AnalyticsEventEmitter.java
  43. 39 15
      x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/ingest/AnalyticsEventIngestConfig.java
  44. 3 5
      x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/ingest/BulkProcessorFactory.java
  45. 7 3
      x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/analytics/ingest/AnalyticsEventEmitterTests.java
  46. 35 18
      x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/analytics/ingest/AnalyticsEventIngestConfigTests.java
  47. 7 3
      x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/analytics/ingest/BulkProcessorFactoryTests.java
  48. 68 6
      x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/MachineLearningPackageLoader.java
  49. 24 1
      x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/ModelLoaderUtils.java
  50. 2 1
      x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/TransportGetTrainedModelPackageConfigAction.java
  51. 0 2
      x-pack/plugin/ml-package-loader/src/main/plugin-metadata/plugin-security.policy
  52. 56 0
      x-pack/plugin/ml-package-loader/src/test/java/org/elasticsearch/xpack/ml/packageloader/MachineLearningPackageLoaderTests.java
  53. 60 0
      x-pack/plugin/ml-package-loader/src/test/java/org/elasticsearch/xpack/ml/packageloader/action/ModelLoaderUtilsTests.java
  54. 4 1
      x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlDailyMaintenanceServiceIT.java
  55. 141 6
      x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MultipleDeploymentsIT.java
  56. 19 9
      x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelRestTestCase.java
  57. 2 0
      x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/ChunkedTrainedModelPersisterIT.java
  58. 15 0
      x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/DefaultMachineLearningExtension.java
  59. 245 196
      x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java
  60. 6 0
      x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningExtension.java
  61. 75 33
      x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceService.java
  62. 8 2
      x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlInitializationService.java
  63. 1 1
      x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAction.java
  64. 1 0
      x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsAction.java
  65. 147 37
      x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsAction.java
  66. 11 0
      x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentMetadata.java
  67. 19 0
      x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProvider.java
  68. 3 3
      x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/InferenceProcessorInfoExtractor.java
  69. 148 0
      x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningTests.java
  70. 27 1
      x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceServiceTests.java
  71. 8 2
      x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlInitializationServiceTests.java
  72. 36 1
      x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentMetadataTests.java
  73. 1 4
      x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/InferenceProcessorInfoExtractorTests.java
  74. 4 4
      x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityLicensingAndFeatureUsageRestIT.java
  75. 8 8
      x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/crossclusteraccess/CrossClusterAccessHeadersForCcsRestIT.java
  76. 2 2
      x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java
  77. 2 2
      x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityUsageTransportAction.java
  78. 2 2
      x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/CrossClusterAccessServerTransportFilter.java
  79. 2 2
      x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java
  80. 1 1
      x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityInfoTransportActionTests.java
  81. 4 1
      x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/FileRolesStoreTests.java
  82. 5 4
      x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java
  83. 3 3
      x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java
  84. 3 3
      x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterTests.java
  85. 8 0
      x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/3rd_party_deployment.yml
  86. 1 1
      x-pack/plugin/voting-only-node/src/test/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodeCoordinatorTests.java

+ 5 - 0
docs/changelog/95405.yaml

@@ -0,0 +1,5 @@
+pr: 95405
+summary: Behavioral Analytics event ingest tuning
+area: Application
+type: enhancement
+issues: []

+ 5 - 0
docs/changelog/95440.yaml

@@ -0,0 +1,5 @@
+pr: 95440
+summary: "[ML] Get trained model stats by deployment id or model id"
+area: Machine Learning
+type: enhancement
+issues: []

+ 275 - 0
docs/reference/ccr/bi-directional-disaster-recovery.asciidoc

@@ -0,0 +1,275 @@
+[role="xpack"]
+[[ccr-disaster-recovery-bi-directional-tutorial]]
+=== Tutorial: Disaster recovery based on bi-directional {ccr}
+++++
+<titleabbrev>Bi-directional disaster recovery</titleabbrev>
+++++
+
+////
+[source,console]
+----
+PUT _data_stream/logs-generic-default
+----
+// TESTSETUP 
+
+[source,console]
+----
+DELETE /_data_stream/*
+----
+// TEARDOWN
+////
+
+Learn how to set up disaster recovery between two clusters based on
+bi-directional {ccr}. The following tutorial is designed for data streams which support 
+<<update-docs-in-a-data-stream-by-query,update by query>> and <<delete-docs-in-a-data-stream-by-query,delete by query>>. You can only perform these actions on the leader index. 
+
+This tutorial works with {ls} as the source of ingestion. It takes advantage of a {ls} feature where {logstash-ref}/plugins-outputs-elasticsearch.html[the {ls} output to {es}] can be load balanced across an array of hosts specified. {beats} and {agents} currently do not 
+support multiple outputs. It should also be possible to set up a proxy 
+(load balancer) to redirect traffic without {ls} in this tutorial. 
+
+* Setting up a remote cluster on `clusterA` and `clusterB`.
+* Setting up bi-directional cross-cluster replication with exclusion patterns.
+* Setting up {ls} with multiple hosts to allow automatic load balancing and switching during disasters.
+
+image::images/ccr-bi-directional-disaster-recovery.png[Bi-directional cross cluster replication failover and failback]
+
+[[ccr-tutorial-initial-setup]]
+==== Initial setup
+. Set up a remote cluster on both clusters.
++
+[source,console]
+----
+### On cluster A ###
+PUT _cluster/settings
+{
+  "persistent": {
+    "cluster": {
+      "remote": {
+        "clusterB": {
+          "mode": "proxy",
+          "skip_unavailable": true,
+          "server_name": "clusterb.es.region-b.gcp.elastic-cloud.com",
+          "proxy_socket_connections": 18,
+          "proxy_address": "clusterb.es.region-b.gcp.elastic-cloud.com:9400"
+        }
+      }
+    }
+  }
+}
+### On cluster B ###
+PUT _cluster/settings
+{
+  "persistent": {
+    "cluster": {
+      "remote": {
+        "clusterA": {
+          "mode": "proxy",
+          "skip_unavailable": true,
+          "server_name": "clustera.es.region-a.gcp.elastic-cloud.com",
+          "proxy_socket_connections": 18,
+          "proxy_address": "clustera.es.region-a.gcp.elastic-cloud.com:9400"
+        }
+      }
+    }
+  }
+}
+----
+// TEST[setup:host]
+// TEST[s/"server_name": "clustera.es.region-a.gcp.elastic-cloud.com",//]
+// TEST[s/"server_name": "clusterb.es.region-b.gcp.elastic-cloud.com",//]
+// TEST[s/"proxy_socket_connections": 18,//]
+// TEST[s/clustera.es.region-a.gcp.elastic-cloud.com:9400/\${transport_host}/]
+// TEST[s/clusterb.es.region-b.gcp.elastic-cloud.com:9400/\${transport_host}/]
+
+. Set up bi-directional cross-cluster replication.
++
+[source,console]
+----
+### On cluster A ###
+PUT /_ccr/auto_follow/logs-generic-default
+{
+  "remote_cluster": "clusterB",
+  "leader_index_patterns": [
+    ".ds-logs-generic-default-20*"
+  ],
+  "leader_index_exclusion_patterns":"{{leader_index}}-replicated_from_clustera",
+  "follow_index_pattern": "{{leader_index}}-replicated_from_clusterb"
+}
+
+### On cluster B ###
+PUT /_ccr/auto_follow/logs-generic-default
+{
+  "remote_cluster": "clusterA",
+  "leader_index_patterns": [
+    ".ds-logs-generic-default-20*"
+  ],
+  "leader_index_exclusion_patterns":"{{leader_index}}-replicated_from_clusterb",
+  "follow_index_pattern": "{{leader_index}}-replicated_from_clustera"
+}
+----
+// TEST[setup:remote_cluster]
+// TEST[s/clusterA/remote_cluster/]
+// TEST[s/clusterB/remote_cluster/]
++
+IMPORTANT: Existing data on the cluster will not be replicated by
+`_ccr/auto_follow` even though the patterns may match. This function will only
+replicate newly created backing indices (as part of the data stream).
++
+IMPORTANT: Use `leader_index_exclusion_patterns` to avoid recursion.
++
+TIP: `follow_index_pattern` allows lowercase characters only.
++
+TIP: This step cannot be executed via the {kib} UI due to the lack of an exclusion
+pattern in the UI. Use the API in this step.
+
+. Set up the {ls} configuration file.
++
+This example uses the input generator to demonstrate the document
+count in the clusters. Reconfigure this section
+to suit your own use case. 
++
+[source,logstash]
+----
+### On Logstash server ###
+### This is a logstash config file ###
+input {
+  generator{
+    message => 'Hello World'
+    count => 100
+  }
+}
+output {
+  elasticsearch {
+    hosts => ["https://clustera.es.region-a.gcp.elastic-cloud.com:9243","https://clusterb.es.region-b.gcp.elastic-cloud.com:9243"]
+    user => "logstash-user"
+    password => "same_password_for_both_clusters"
+  }
+}
+----
++
+IMPORTANT: The key point is that when `cluster A` is down, all traffic will be
+automatically redirected to `cluster B`. Once `cluster A` comes back, traffic
+is automatically redirected back to `cluster A` again. This is achieved by the
+option `hosts` where multiple ES cluster endpoints are specified in the
+array `[clusterA, clusterB]`.
++
+TIP: Set up the same password for the same user on both clusters to use this load-balancing feature.
+
+. Start {ls} with the earlier configuration file.
++
+[source,sh]
+----
+### On Logstash server ###
+bin/logstash -f multiple_hosts.conf
+----
+
+. Observe document counts in data streams.
++
+The setup creates a data stream named `logs-generic-default` on each of the clusters. {ls} will write 50% of the documents to `cluster A` and 50% of the documents to `cluster B` when both clusters are up.
++
+Bi-directional {ccr} will create one more data stream on each of the clusters
+with the `-replication_from_cluster{a|b}` suffix. At the end of this step:
++
+* data streams on cluster A contain:
+** 50 documents in `logs-generic-default-replicated_from_clusterb` 
+** 50 documents in `logs-generic-default`
+* data streams on cluster B contain:
+** 50 documents in `logs-generic-default-replicated_from_clustera`
+** 50 documents in `logs-generic-default`
+
+. Queries should be set up to search across both data streams.
+A query on `logs*`, on either of the clusters, returns 100
+hits in total. 
++
+[source,console]
+----
+GET logs*/_search?size=0
+----
+
+
+==== Failover when `clusterA` is down
+. You can simulate this by shutting down either of the clusters. Let's shut down
+`cluster A` in this tutorial.
+. Start {ls} with the same configuration file. (This step is not required in real
+use cases where {ls} ingests continuously.)
++
+[source,sh]
+----
+### On Logstash server ###
+bin/logstash -f multiple_hosts.conf
+----
+
+. Observe all {ls} traffic will be redirected to `cluster B` automatically. 
++
+TIP: You should also redirect all search traffic to the `clusterB` cluster during this time. 
+
+. The two data streams on `cluster B` now contain a different number of documents. 
++
+* data streams on cluster A (down) 
+** 50 documents in `logs-generic-default-replicated_from_clusterb` 
+** 50 documents in `logs-generic-default`
+* data streams On cluster B (up) 
+** 50 documents in `logs-generic-default-replicated_from_clustera`
+** 150 documents in `logs-generic-default`
+
+
+==== Failback when `clusterA` comes back
+. You can simulate this by turning `cluster A` back on. 
+. Data ingested to `cluster B` during `cluster A` 's downtime will be
+automatically replicated. 
++
+* data streams on cluster A
+** 150 documents in `logs-generic-default-replicated_from_clusterb` 
+** 50 documents in `logs-generic-default`
+* data streams on cluster B
+** 50 documents in `logs-generic-default-replicated_from_clustera`
+** 150 documents in `logs-generic-default`
+
+. If you have {ls} running at this time, you will also observe traffic is
+sent to both clusters.
+
+==== Perform update or delete by query
+It is possible to update or delete the documents but you can only perform these actions on the leader index.
+
+. First identify which backing index contains the document you want to update.
++
+[source,console]
+----
+### On either of the cluster ###
+GET logs-generic-default*/_search?filter_path=hits.hits._index
+{
+"query": {
+    "match": {
+      "event.sequence": "97"
+    }
+  }
+}
+----
++
+* If the hits returns `"_index": ".ds-logs-generic-default-replicated_from_clustera-<yyyy.MM.dd>-*"`, then you need to proceed to the next step on `cluster A`.
+* If the hits returns `"_index": ".ds-logs-generic-default-replicated_from_clusterb-<yyyy.MM.dd>-*"`, then you need to proceed to the next step on `cluster B`.
+* If the hits returns `"_index": ".ds-logs-generic-default-<yyyy.MM.dd>-*"`, then you need to proceed to the next step on the same cluster where you performed the search query.
+
+. Perform the update (or delete) by query:
++
+[source,console]
+----
+### On the cluster identified from the previous step ###
+POST logs-generic-default/_update_by_query
+{
+  "query": {
+    "match": {
+      "event.sequence": "97"
+    }
+  },
+  "script": {
+    "source": "ctx._source.event.original = params.new_event",
+    "lang": "painless",
+    "params": {
+      "new_event": "FOOBAR"
+    }
+  }
+}
+----
++ 
+TIP: If a soft delete is merged away before it can be replicated to a follower the following process will fail due to incomplete history on the leader, see <<ccr-index-soft-deletes-retention-period, index.soft_deletes.retention_lease.period>> for more details.

BIN
docs/reference/ccr/images/ccr-bi-directional-disaster-recovery.png


BIN
docs/reference/ccr/images/ccr-uni-directional-disaster-recovery.png


+ 2 - 0
docs/reference/ccr/index.asciidoc

@@ -343,3 +343,5 @@ include::getting-started.asciidoc[]
 include::managing.asciidoc[]
 include::auto-follow.asciidoc[]
 include::upgrading.asciidoc[]
+include::uni-directional-disaster-recovery.asciidoc[]
+include::bi-directional-disaster-recovery.asciidoc[]

+ 194 - 0
docs/reference/ccr/uni-directional-disaster-recovery.asciidoc

@@ -0,0 +1,194 @@
+[role="xpack"]
+[[ccr-disaster-recovery-uni-directional-tutorial]]
+=== Tutorial: Disaster recovery based on uni-directional {ccr}
+++++
+<titleabbrev>Uni-directional disaster recovery</titleabbrev>
+++++
+
+////
+[source,console]
+----
+PUT kibana_sample_data_ecommerce
+----
+// TESTSETUP 
+
+[source,console]
+----
+DELETE kibana_sample_data_ecommerce
+----
+// TEARDOWN
+////
+
+
+Learn how to failover and failback between two clusters based on uni-directional {ccr}. You can also visit <<ccr-disaster-recovery-bi-directional-tutorial>> to set up replicating data streams that automatically failover and failback without human intervention.
+
+* Setting up uni-directional {ccr} replicated from `clusterA`
+to `clusterB`.
+* Failover - If `clusterA` goes offline, `clusterB` needs to "promote" follower
+indices to regular indices to allow write operations. All ingestion will need to
+be redirected to `clusterB`, this is controlled by the clients ({ls}, {beats},
+{agents}, etc). 
+* Failback - When `clusterA` is back online, it assumes the role of a follower
+and replicates the leader indices from `clusterB`.
+
+image::images/ccr-uni-directional-disaster-recovery.png[Uni-directional cross cluster replication failover and failback]
+
+NOTE: {ccr-cap} provides functionality to replicate user-generated indices only.
+{ccr-cap} isn't designed for replicating system-generated indices or snapshot
+settings, and can't replicate {ilm-init} or {slm-init} policies across clusters.
+Learn more in {ccr} <<ccr-limitations,limitations>>.
+
+==== Prerequisites
+Before completing this tutorial,
+<<ccr-getting-started-tutorial,set up cross-cluster replication>> to connect two 
+clusters and configure a follower index. 
+
+In this tutorial, `kibana_sample_data_ecommerce` is replicated from `clusterA` to `clusterB`.
+
+[source,console]
+----
+### On clusterB ###
+PUT _cluster/settings
+{
+  "persistent": {
+    "cluster": {
+      "remote": {
+        "clusterA": {
+          "mode": "proxy",
+          "skip_unavailable": "true",
+          "server_name": "clustera.es.region-a.gcp.elastic-cloud.com",
+          "proxy_socket_connections": "18",
+          "proxy_address": "clustera.es.region-a.gcp.elastic-cloud.com:9400"
+        }
+      }
+    }
+  }
+}
+----
+// TEST[setup:host]
+// TEST[s/"server_name": "clustera.es.region-a.gcp.elastic-cloud.com",//]
+// TEST[s/"proxy_socket_connections": 18,//]
+// TEST[s/clustera.es.region-a.gcp.elastic-cloud.com:9400/\${transport_host}/]
+// TEST[s/clusterA/remote_cluster/]
+
+[source,console]
+----
+### On clusterB ###
+PUT /kibana_sample_data_ecommerce2/_ccr/follow?wait_for_active_shards=1
+{
+  "remote_cluster": "clusterA",
+  "leader_index": "kibana_sample_data_ecommerce"
+}
+----
+// TEST[continued]
+// TEST[s/clusterA/remote_cluster/]
+
+IMPORTANT: Writes (such as ingestion or updates) should occur only on the leader
+index. Follower indices are read-only and will reject any writes.
+
+
+==== Failover when `clusterA` is down
+
+. Promote the follower indices in `clusterB` into regular indices so 
+that they accept writes. This can be achieved by:
+* First, pause indexing following for the follower index.
+* Next, close the follower index.
+* Unfollow the leader index.
+* Finally, open the follower index (which at this point is a regular index).
+
++
+[source,console]
+----
+### On clusterB ###
+POST /kibana_sample_data_ecommerce2/_ccr/pause_follow
+POST /kibana_sample_data_ecommerce2/_close           
+POST /kibana_sample_data_ecommerce2/_ccr/unfollow    
+POST /kibana_sample_data_ecommerce2/_open
+----
+// TEST[continued]
+
+. On the client side ({ls}, {beats}, {agent}), manually re-enable ingestion of
+`kibana_sample_data_ecommerce2` and redirect traffic to the `clusterB`. You should
+also redirect all search traffic to the `clusterB` cluster during
+this time. You can simulate this by ingesting documents into this index. You should
+notice this index is now writable.
++
+[source,console]
+----
+### On clusterB ###
+POST kibana_sample_data_ecommerce2/_doc/
+{
+  "user": "kimchy"
+}
+----
+// TEST[continued]
+
+==== Failback when `clusterA` comes back
+
+When `clusterA` comes back, `clusterB` becomes the new leader and `clusterA` becomes the follower. 
+
+. Set up remote cluster `clusterB` on `clusterA`.
++
+[source,console]
+----
+### On clusterA ###
+PUT _cluster/settings
+{
+  "persistent": {
+    "cluster": {
+      "remote": {
+        "clusterB": {
+          "mode": "proxy",
+          "skip_unavailable": "true",
+          "server_name": "clusterb.es.region-b.gcp.elastic-cloud.com",
+          "proxy_socket_connections": "18",
+          "proxy_address": "clusterb.es.region-b.gcp.elastic-cloud.com:9400"
+        }
+      }
+    }
+  }
+}
+----
+// TEST[setup:host]
+// TEST[s/"server_name": "clusterb.es.region-b.gcp.elastic-cloud.com",//]
+// TEST[s/"proxy_socket_connections": 18,//]
+// TEST[s/clusterb.es.region-b.gcp.elastic-cloud.com:9400/\${transport_host}/]
+// TEST[s/clusterB/remote_cluster/]
+
+. Existing data needs to be discarded before you can turn any index into a
+follower. Ensure the most up-to-date data is available on `clusterB` prior to
+deleting any indices on `clusterA`.  
++
+[source,console]
+----
+### On clusterA ###
+DELETE kibana_sample_data_ecommerce
+----
+// TEST[skip:need dual cluster setup]
+
+
+. Create a follower index on `clusterA`, now following the leader index in
+`clusterB`.  
++
+[source,console]
+----
+### On clusterA ###
+PUT /kibana_sample_data_ecommerce/_ccr/follow?wait_for_active_shards=1
+{ 
+  "remote_cluster": "clusterB", 
+  "leader_index": "kibana_sample_data_ecommerce2" 
+}
+----
+// TEST[continued]
+// TEST[s/clusterB/remote_cluster/]
+
+. The index on the follower cluster now contains the updated documents.
++
+[source,console]
+----
+### On clusterA ###
+GET kibana_sample_data_ecommerce/_search?q=kimchy
+----
+// TEST[continued]
++ 
+TIP: If a soft delete is merged away before it can be replicated to a follower the following process will fail due to incomplete history on the leader, see <<ccr-index-soft-deletes-retention-period, index.soft_deletes.retention_lease.period>> for more details.

+ 131 - 5
modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/composite.yml

@@ -268,7 +268,7 @@ setup:
   - match: { aggregations.test.buckets.1.doc_count: 2 }
 
 ---
-"Nested Composite aggregation":
+"Composite aggregation with multiple terms sources":
   - do:
       search:
         rest_total_hits_as_int: true
@@ -311,6 +311,134 @@ setup:
   - match: { aggregations.test.buckets.4.key.long: 1000 }
   - match: { aggregations.test.buckets.4.key.kw: "bar" }
   - match: { aggregations.test.buckets.4.doc_count: 1 }
+---
+"Basic numeric histogram":
+  - skip:
+      version: " - 8.3.99"
+      reason:  Multivalue Handling changed in 8.4
+  - do:
+      search:
+        rest_total_hits_as_int: true
+        index: test
+        body:
+          aggregations:
+            test:
+              composite:
+                sources: [
+                  "histo": {
+                    "histogram": {
+                      "field": "long",
+                      "interval": 50
+                    }
+                  }
+                ]
+
+  - match: {hits.total: 6}
+  - length: { aggregations.test.buckets: 3 }
+  - match: { aggregations.test.buckets.0.key.histo: 0}
+  - match: { aggregations.test.buckets.0.doc_count: 3}
+  - match: { aggregations.test.buckets.1.key.histo: 100}
+  - match: { aggregations.test.buckets.1.doc_count: 1}
+  - match: { aggregations.test.buckets.2.key.histo: 1000}
+  - match: { aggregations.test.buckets.2.doc_count: 1}
+---
+"Basic numeric histogram legacy multivalue":
+  - skip:
+      version: " 8.4.0 - "
+      reason:  Multivalue Handling changed in 8.4
+  - do:
+      search:
+        rest_total_hits_as_int: true
+        index: test
+        body:
+          aggregations:
+            test:
+              composite:
+                sources: [
+                  "histo": {
+                    "histogram": {
+                      "field": "long",
+                      "interval": 50
+                    }
+                  }
+                ]
+
+  - match: {hits.total: 6}
+  - length: { aggregations.test.buckets: 3 }
+  - match: { aggregations.test.buckets.0.key.histo: 0}
+  - match: { aggregations.test.buckets.0.doc_count: 4}
+  - match: { aggregations.test.buckets.1.key.histo: 100}
+  - match: { aggregations.test.buckets.1.doc_count: 1}
+  - match: { aggregations.test.buckets.2.key.histo: 1000}
+  - match: { aggregations.test.buckets.2.doc_count: 1}
+---
+"Basic numeric histogram with missing bucket":
+  - skip:
+      version: " - 8.3.99"
+      reason:  Multivalue Handling changed in 8.4
+  - do:
+      search:
+        rest_total_hits_as_int: true
+        index: test
+        body:
+          aggregations:
+            test:
+              composite:
+                sources: [
+                  "histo": {
+                    "histogram": {
+                      "field": "long",
+                      "interval": 50,
+                      "missing_bucket": true
+                    }
+                  }
+                ]
+
+  - match: {hits.total: 6}
+  - length: { aggregations.test.buckets: 4 }
+  - match: { aggregations.test.buckets.0.key.histo: null}
+  - match: { aggregations.test.buckets.0.doc_count: 3}
+  - match: { aggregations.test.buckets.1.key.histo: 0}
+  - match: { aggregations.test.buckets.1.doc_count: 3}
+  - match: { aggregations.test.buckets.2.key.histo: 100}
+  - match: { aggregations.test.buckets.2.doc_count: 1}
+  - match: { aggregations.test.buckets.3.key.histo: 1000}
+  - match: { aggregations.test.buckets.3.doc_count: 1}
+---
+"Basic numeric histogram with missing bucket order desc":
+  - skip:
+      version: " - 8.3.99"
+      reason:  Multivalue Handling changed in 8.4
+  - do:
+      search:
+        rest_total_hits_as_int: true
+        index: test
+        body:
+          aggregations:
+            test:
+              composite:
+                sources: [
+                  "histo": {
+                    "histogram": {
+                      "field": "long",
+                      "interval": 50,
+                      "order": "desc",
+                      "missing_bucket": true
+                    }
+                  }
+                ]
+
+  - match: {hits.total: 6}
+  - length: { aggregations.test.buckets: 4 }
+  - match: { aggregations.test.buckets.3.key.histo: null}
+  - match: { aggregations.test.buckets.3.doc_count: 3}
+  - match: { aggregations.test.buckets.2.key.histo: 0}
+  - match: { aggregations.test.buckets.2.doc_count: 3}
+  - match: { aggregations.test.buckets.1.key.histo: 100}
+  - match: { aggregations.test.buckets.1.doc_count: 1}
+  - match: { aggregations.test.buckets.0.key.histo: 1000}
+  - match: { aggregations.test.buckets.0.doc_count: 1}
+
 
 ---
 "Aggregate After":
@@ -492,10 +620,8 @@ setup:
 ---
 "Composite aggregation with lossy format":
   - skip:
-      # version: " - 7.13.99"
-      # reason:  After key parse checking added in 7.14
-      version: "all"
-      reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/95386"
+      version: " - 7.13.99"
+      reason:  After key parse checking added in 7.14
 
   - do:
       catch: /created output it couldn't parse/

+ 1 - 1
modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java

@@ -40,7 +40,7 @@ import static org.hamcrest.Matchers.equalTo;
 import static org.hamcrest.Matchers.not;
 
 public class AzureStorageCleanupThirdPartyTests extends AbstractThirdPartyRepositoryTestCase {
-    private static final boolean USE_FIXTURE = Booleans.parseBoolean(System.getProperty("azure_use_fixture", "true"));
+    private static final boolean USE_FIXTURE = Booleans.parseBoolean(System.getProperty("test.azure.fixture", "true"));
 
     @ClassRule
     public static AzureHttpFixture fixture = new AzureHttpFixture(

+ 1 - 2
server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java

@@ -61,7 +61,6 @@ public class DynamicMappingIT extends ESIntegTestCase {
         return Collections.singleton(InternalSettingsPlugin.class);
     }
 
-    @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/95047")
     public void testConflictingDynamicMappings() {
         // we don't use indexRandom because the order of requests is important here
         createIndex("index");
@@ -72,7 +71,7 @@ public class DynamicMappingIT extends ESIntegTestCase {
         } catch (DocumentParsingException e) {
             // general case, the parsing code complains that it can't parse "bar" as a "long"
             assertThat(e.getMessage(), Matchers.containsString("failed to parse field [foo] of type [long]"));
-        } catch (MapperParsingException e) {
+        } catch (IllegalArgumentException e) {
             // rare case: the node that processes the index request doesn't have the mappings
             // yet and sends a mapping update to the master node to map "bar" as "text". This
             // fails as it had been already mapped as a long by the previous index request.

+ 34 - 5
server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java

@@ -546,7 +546,11 @@ public class Coordinator extends AbstractLifecycleComponent implements ClusterSt
 
             @Override
             public void onFailure(Exception e) {
-                logger.debug(Strings.format("election attempt for [%s] in term [%d] failed", candidateMasterNode, term), e);
+                logger.log(
+                    e instanceof CoordinationStateRejectedException ? Level.DEBUG : Level.WARN,
+                    Strings.format("election attempt for [%s] in term [%d] failed", candidateMasterNode, term),
+                    e
+                );
             }
         });
     }
@@ -909,7 +913,7 @@ public class Coordinator extends AbstractLifecycleComponent implements ClusterSt
                 @Override
                 public void onFailure(Exception e) {
                     // TODO tests for heartbeat failures
-                    logger.debug(() -> Strings.format("heartbeat failure in term [%s]", leaderTerm), e);
+                    logger.warn(() -> Strings.format("failed to write heartbeat for term [%s]", leaderTerm), e);
                     synchronized (mutex) {
                         if (getCurrentTerm() == leaderTerm) {
                             becomeCandidate("leaderHeartbeatService");
@@ -1946,7 +1950,15 @@ public class Coordinator extends AbstractLifecycleComponent implements ClusterSt
                     removePublicationAndPossiblyBecomeCandidate("Publication.onCompletion(false)");
                     cancelTimeoutHandlers();
 
-                    final FailedToCommitClusterStateException exception = new FailedToCommitClusterStateException("publication failed", e);
+                    final FailedToCommitClusterStateException exception = new FailedToCommitClusterStateException(
+                        Strings.format(
+                            "publication of cluster state version [%d] in term [%d] failed [committed={}]",
+                            publishRequest.getAcceptedState().version(),
+                            publishRequest.getAcceptedState().term(),
+                            committed
+                        ),
+                        e
+                    );
                     ackListener.onNodeAck(getLocalNode(), exception); // other nodes have acked, but not the master.
                     publishListener.onFailure(exception);
                 }
@@ -1974,15 +1986,32 @@ public class Coordinator extends AbstractLifecycleComponent implements ClusterSt
         }
 
         @Override
-        protected Optional<ListenableFuture<ApplyCommitRequest>> handlePublishResponse(
+        protected Optional<SubscribableListener<ApplyCommitRequest>> handlePublishResponse(
             DiscoveryNode sourceNode,
             PublishResponse publishResponse
         ) {
             assert Thread.holdsLock(mutex) : "Coordinator mutex not held";
             assert getCurrentTerm() >= publishResponse.getTerm();
             return coordinationState.get().handlePublishResponse(sourceNode, publishResponse).map(applyCommitRequest -> {
-                final var future = new ListenableFuture<ApplyCommitRequest>();
+                final var future = new SubscribableListener<ApplyCommitRequest>();
                 beforeCommit(applyCommitRequest.getTerm(), applyCommitRequest.getVersion(), future.map(ignored -> applyCommitRequest));
+                future.addListener(new ActionListener<>() {
+                    @Override
+                    public void onResponse(ApplyCommitRequest applyCommitRequest) {}
+
+                    @Override
+                    public void onFailure(Exception e) {
+                        logger.log(
+                            e instanceof CoordinationStateRejectedException ? Level.DEBUG : Level.WARN,
+                            Strings.format(
+                                "publication of cluster state version [%d] in term [%d] failed to commit after reaching quorum",
+                                publishRequest.getAcceptedState().version(),
+                                publishRequest.getAcceptedState().term()
+                            ),
+                            e
+                        );
+                    }
+                });
                 return future;
             });
         }

+ 5 - 0
server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java

@@ -24,6 +24,7 @@ import org.elasticsearch.cluster.service.ClusterApplierService;
 import org.elasticsearch.cluster.service.MasterService;
 import org.elasticsearch.cluster.service.MasterServiceTaskQueue;
 import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.Strings;
 import org.elasticsearch.common.breaker.CircuitBreaker;
 import org.elasticsearch.common.breaker.CircuitBreakingException;
 import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
@@ -481,6 +482,10 @@ public class JoinHelper {
 
                     @Override
                     public void onFailure(Exception e) {
+                        logger.warn(
+                            Strings.format("failed to retrieve latest stored state after winning election in term [%d]", joiningTerm),
+                            e
+                        );
                         joinRequestAccumulator.values().forEach(joinCallback -> joinCallback.v2().onFailure(e));
                     }
                 }, joiningTerm);

+ 3 - 3
server/src/main/java/org/elasticsearch/cluster/coordination/Publication.java

@@ -13,10 +13,10 @@ import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.Logger;
 import org.elasticsearch.ElasticsearchException;
 import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.SubscribableListener;
 import org.elasticsearch.cluster.ClusterState;
 import org.elasticsearch.cluster.coordination.ClusterStatePublisher.AckListener;
 import org.elasticsearch.cluster.node.DiscoveryNode;
-import org.elasticsearch.common.util.concurrent.ListenableFuture;
 import org.elasticsearch.core.TimeValue;
 import org.elasticsearch.transport.TransportException;
 import org.elasticsearch.transport.TransportResponse;
@@ -38,7 +38,7 @@ public abstract class Publication {
     private final LongSupplier currentTimeSupplier;
     private final long startTime;
 
-    private Optional<ListenableFuture<ApplyCommitRequest>> applyCommitRequest; // set when state is committed
+    private Optional<SubscribableListener<ApplyCommitRequest>> applyCommitRequest; // set when state is committed
     private boolean isCompleted; // set when publication is completed
     private boolean cancelled; // set when publication is cancelled
 
@@ -174,7 +174,7 @@ public abstract class Publication {
 
     protected abstract boolean isPublishQuorum(CoordinationState.VoteCollection votes);
 
-    protected abstract Optional<ListenableFuture<ApplyCommitRequest>> handlePublishResponse(
+    protected abstract Optional<SubscribableListener<ApplyCommitRequest>> handlePublishResponse(
         DiscoveryNode sourceNode,
         PublishResponse publishResponse
     );

+ 1 - 1
server/src/main/java/org/elasticsearch/cluster/coordination/stateless/StoreHeartbeatService.java

@@ -109,7 +109,7 @@ public class StoreHeartbeatService implements LeaderHeartbeatService {
 
             @Override
             public void onFailure(Exception e) {
-                logger.trace("runIfNoRecentLeader: readLatestHeartbeat failed", e);
+                logger.warn("failed to read heartbeat from store", e);
             }
         });
     }

+ 1 - 3
server/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java

@@ -9,7 +9,6 @@ package org.elasticsearch.search.aggregations;
 
 import org.apache.lucene.search.Collector;
 import org.elasticsearch.action.search.SearchShardTask;
-import org.elasticsearch.common.inject.Inject;
 import org.elasticsearch.search.SearchService;
 import org.elasticsearch.search.aggregations.support.TimeSeriesIndexSearcher;
 import org.elasticsearch.search.internal.SearchContext;
@@ -26,8 +25,7 @@ import java.util.List;
  */
 public class AggregationPhase {
 
-    @Inject
-    public AggregationPhase() {}
+    private AggregationPhase() {}
 
     public static void preProcess(SearchContext context) {
         if (context.aggregations() == null) {

+ 4 - 3
server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java

@@ -69,13 +69,12 @@ public class InternalComposite extends InternalMultiBucketAggregation<InternalCo
         this.reverseMuls = reverseMuls;
         this.missingOrders = missingOrders;
         this.earlyTerminated = earlyTerminated;
-        validateAfterKey();
     }
 
     /**
      * Checks that the afterKey formatting does not result in loss of information
      *
-     * Only called when a new InternalComposite() is built directly.  We can't validate afterKeys from
+     * Only called when a new InternalComposite() is built after a reduce.  We can't validate afterKeys from
      * InternalComposites built from a StreamInput because they may be coming from nodes that do not
      * do validation, and errors thrown during StreamInput deserialization can kill a node.  However,
      * InternalComposites that come from remote nodes will always be reduced on the co-ordinator, and
@@ -261,7 +260,7 @@ public class InternalComposite extends InternalMultiBucketAggregation<InternalCo
             lastKey = lastBucket.getRawKey();
         }
         reduceContext.consumeBucketsAndMaybeBreak(result.size());
-        return new InternalComposite(
+        InternalComposite reduced = new InternalComposite(
             name,
             size,
             sourceNames,
@@ -273,6 +272,8 @@ public class InternalComposite extends InternalMultiBucketAggregation<InternalCo
             earlyTerminated,
             metadata
         );
+        reduced.validateAfterKey();
+        return reduced;
     }
 
     @Override

+ 1 - 1
server/src/main/java/org/elasticsearch/search/profile/query/CollectorResult.java

@@ -67,7 +67,7 @@ public class CollectorResult implements ToXContentObject, Writeable {
     /**
      * A list of children collectors "embedded" inside this collector
      */
-    private List<CollectorResult> children;
+    private final List<CollectorResult> children;
 
     public CollectorResult(String collectorName, String reason, long time, List<CollectorResult> children) {
         this.collectorName = collectorName;

+ 4 - 4
server/src/main/java/org/elasticsearch/search/profile/query/InternalProfileCollector.java

@@ -47,15 +47,15 @@ public class InternalProfileCollector implements Collector {
      */
     private final InternalProfileCollector[] children;
 
-    public InternalProfileCollector(Collector collector, String reason, InternalProfileCollector... children) {
+    public InternalProfileCollector(Collector collector, String reason, Collector... children) {
         this.collector = new ProfileCollector(collector);
         this.reason = reason;
         this.collectorName = deriveCollectorName(collector);
         Objects.requireNonNull(children, "children collectors cannot be null");
-        for (InternalProfileCollector child : children) {
-            Objects.requireNonNull(child, "child collector cannot be null");
+        this.children = new InternalProfileCollector[children.length];
+        for (int i = 0; i < children.length; i++) {
+            this.children[i] = (InternalProfileCollector) Objects.requireNonNull(children[i], "child collector cannot be null");
         }
-        this.children = children;
     }
 
     /**

+ 0 - 169
server/src/main/java/org/elasticsearch/search/query/QueryCollectorContext.java

@@ -1,169 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the Elastic License
- * 2.0 and the Server Side Public License, v 1; you may not use this file except
- * in compliance with, at your election, the Elastic License 2.0 or the Server
- * Side Public License, v 1.
- */
-
-package org.elasticsearch.search.query;
-
-import org.apache.lucene.search.Collector;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.MultiCollector;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.ScoreMode;
-import org.apache.lucene.search.SimpleCollector;
-import org.apache.lucene.search.Weight;
-import org.elasticsearch.common.lucene.MinimumScoreCollector;
-import org.elasticsearch.common.lucene.search.FilteredCollector;
-import org.elasticsearch.search.profile.query.InternalProfileCollector;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import static org.elasticsearch.search.profile.query.CollectorResult.REASON_SEARCH_MIN_SCORE;
-import static org.elasticsearch.search.profile.query.CollectorResult.REASON_SEARCH_MULTI;
-import static org.elasticsearch.search.profile.query.CollectorResult.REASON_SEARCH_POST_FILTER;
-import static org.elasticsearch.search.profile.query.CollectorResult.REASON_SEARCH_TERMINATE_AFTER_COUNT;
-
-abstract class QueryCollectorContext {
-    private static final Collector EMPTY_COLLECTOR = new SimpleCollector() {
-        @Override
-        public void collect(int doc) {}
-
-        @Override
-        public ScoreMode scoreMode() {
-            return ScoreMode.COMPLETE_NO_SCORES;
-        }
-    };
-
-    private final String profilerName;
-
-    QueryCollectorContext(String profilerName) {
-        this.profilerName = profilerName;
-    }
-
-    /**
-     * Creates a collector that delegates documents to the provided <code>in</code> collector.
-     * @param in The delegate collector
-     */
-    abstract Collector create(Collector in) throws IOException;
-
-    /**
-     * Wraps this collector with a profiler
-     */
-    protected InternalProfileCollector createWithProfiler(InternalProfileCollector in) throws IOException {
-        final Collector collector = create(in);
-        if (in == null) {
-            return new InternalProfileCollector(collector, profilerName);
-        }
-        return new InternalProfileCollector(collector, profilerName, in);
-    }
-
-    /**
-     * Post-process <code>result</code> after search execution.
-     *
-     * @param result The query search result to populate
-     */
-    void postProcess(QuerySearchResult result) throws IOException {}
-
-    /**
-     * Creates the collector tree from the provided <code>collectors</code>
-     * @param collectors Ordered list of collector context
-     */
-    static Collector createQueryCollector(List<QueryCollectorContext> collectors) throws IOException {
-        Collector collector = null;
-        for (QueryCollectorContext ctx : collectors) {
-            collector = ctx.create(collector);
-        }
-        return collector;
-    }
-
-    /**
-     * Creates the collector tree from the provided <code>collectors</code> and wraps each collector with a profiler
-     * @param collectors Ordered list of collector context
-     */
-    static InternalProfileCollector createQueryCollectorWithProfiler(List<QueryCollectorContext> collectors) throws IOException {
-        InternalProfileCollector collector = null;
-        for (QueryCollectorContext ctx : collectors) {
-            collector = ctx.createWithProfiler(collector);
-        }
-        return collector;
-    }
-
-    /**
-     * Filters documents with a query score greater than <code>minScore</code>
-     * @param minScore The minimum score filter
-     */
-    static QueryCollectorContext createMinScoreCollectorContext(float minScore) {
-        return new QueryCollectorContext(REASON_SEARCH_MIN_SCORE) {
-            @Override
-            Collector create(Collector in) {
-                return new MinimumScoreCollector(in, minScore);
-            }
-        };
-    }
-
-    /**
-     * Filters documents based on the provided <code>query</code>
-     */
-    static QueryCollectorContext createFilteredCollectorContext(IndexSearcher searcher, Query query) {
-        return new QueryCollectorContext(REASON_SEARCH_POST_FILTER) {
-            @Override
-            Collector create(Collector in) throws IOException {
-                final Weight filterWeight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1f);
-                return new FilteredCollector(in, filterWeight);
-            }
-        };
-    }
-
-    /**
-     * Creates a multi collector from the provided sub-collector
-     */
-    static QueryCollectorContext createAggsCollectorContext(Collector subCollector) {
-        return new QueryCollectorContext(REASON_SEARCH_MULTI) {
-            @Override
-            Collector create(Collector in) {
-                List<Collector> subCollectors = new ArrayList<>();
-                subCollectors.add(in);
-                subCollectors.add(subCollector);
-                return MultiCollector.wrap(subCollectors);
-            }
-
-            @Override
-            protected InternalProfileCollector createWithProfiler(InternalProfileCollector in) {
-                if (subCollector instanceof InternalProfileCollector == false) {
-                    throw new IllegalArgumentException("non-profiling collector");
-                }
-                final Collector collector = MultiCollector.wrap(in, subCollector);
-                return new InternalProfileCollector(collector, REASON_SEARCH_MULTI, in, (InternalProfileCollector) subCollector);
-            }
-        };
-    }
-
-    /**
-     * Creates collector limiting the collection to the first <code>numHits</code> documents
-     */
-    static QueryCollectorContext createEarlyTerminationCollectorContext(int numHits) {
-        return new QueryCollectorContext(REASON_SEARCH_TERMINATE_AFTER_COUNT) {
-            private Collector collector;
-
-            /**
-             * Creates a {@link MultiCollector} to ensure that the {@link EarlyTerminatingCollector}
-             * can terminate the collection independently of the provided <code>in</code> {@link Collector}.
-             */
-            @Override
-            Collector create(Collector in) {
-                assert collector == null;
-
-                List<Collector> subCollectors = new ArrayList<>();
-                subCollectors.add(new EarlyTerminatingCollector(EMPTY_COLLECTOR, numHits, true));
-                subCollectors.add(in);
-                this.collector = MultiCollector.wrap(subCollectors);
-                return collector;
-            }
-        };
-    }
-}

+ 87 - 37
server/src/main/java/org/elasticsearch/search/query/QueryPhase.java

@@ -16,12 +16,18 @@ import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.Collector;
 import org.apache.lucene.search.FieldDoc;
+import org.apache.lucene.search.MultiCollector;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.ScoreMode;
+import org.apache.lucene.search.SimpleCollector;
 import org.apache.lucene.search.Sort;
 import org.apache.lucene.search.TopDocs;
 import org.apache.lucene.search.TotalHits;
+import org.apache.lucene.search.Weight;
 import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.lucene.MinimumScoreCollector;
+import org.elasticsearch.common.lucene.search.FilteredCollector;
 import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore;
 import org.elasticsearch.common.util.concurrent.EWMATrackingEsThreadPoolExecutor;
 import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor;
@@ -33,6 +39,7 @@ import org.elasticsearch.search.aggregations.AggregationPhase;
 import org.elasticsearch.search.internal.ContextIndexSearcher;
 import org.elasticsearch.search.internal.ScrollContext;
 import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.search.profile.Profilers;
 import org.elasticsearch.search.profile.query.InternalProfileCollector;
 import org.elasticsearch.search.rescore.RescorePhase;
 import org.elasticsearch.search.sort.SortAndFormats;
@@ -40,14 +47,13 @@ import org.elasticsearch.search.suggest.SuggestPhase;
 import org.elasticsearch.threadpool.ThreadPool;
 
 import java.io.IOException;
-import java.util.LinkedList;
 import java.util.concurrent.ExecutorService;
 
-import static org.elasticsearch.search.query.QueryCollectorContext.createAggsCollectorContext;
-import static org.elasticsearch.search.query.QueryCollectorContext.createEarlyTerminationCollectorContext;
-import static org.elasticsearch.search.query.QueryCollectorContext.createFilteredCollectorContext;
-import static org.elasticsearch.search.query.QueryCollectorContext.createMinScoreCollectorContext;
-import static org.elasticsearch.search.query.TopDocsCollectorContext.createTopDocsCollectorContext;
+import static org.elasticsearch.search.profile.query.CollectorResult.REASON_SEARCH_MIN_SCORE;
+import static org.elasticsearch.search.profile.query.CollectorResult.REASON_SEARCH_MULTI;
+import static org.elasticsearch.search.profile.query.CollectorResult.REASON_SEARCH_POST_FILTER;
+import static org.elasticsearch.search.profile.query.CollectorResult.REASON_SEARCH_TERMINATE_AFTER_COUNT;
+import static org.elasticsearch.search.query.TopDocsCollectorFactory.createTopDocsCollectorFactory;
 
 /**
  * Query phase of a search request, used to run the query and get back from each shard information about the matching documents
@@ -56,7 +62,7 @@ import static org.elasticsearch.search.query.TopDocsCollectorContext.createTopDo
 public class QueryPhase {
     private static final Logger LOGGER = LogManager.getLogger(QueryPhase.class);
 
-    public QueryPhase() {}
+    private QueryPhase() {}
 
     public static void execute(SearchContext searchContext) throws QueryPhaseExecutionException {
         if (searchContext.hasOnlySuggest()) {
@@ -125,30 +131,65 @@ public class QueryPhase {
                 }
             }
 
-            final LinkedList<QueryCollectorContext> collectors = new LinkedList<>();
-            // whether the chain contains a collector that filters documents
-            boolean hasFilterCollector = false;
+            // create the top docs collector last when the other collectors are known
+            final TopDocsCollectorFactory topDocsFactory = createTopDocsCollectorFactory(
+                searchContext,
+                searchContext.parsedPostFilter() != null || searchContext.minimumScore() != null
+            );
+
+            Collector collector = wrapWithProfilerCollectorIfNeeded(
+                searchContext.getProfilers(),
+                topDocsFactory.collector(),
+                topDocsFactory.profilerName
+            );
+
             if (searchContext.terminateAfter() != SearchContext.DEFAULT_TERMINATE_AFTER) {
                 // add terminate_after before the filter collectors
                 // it will only be applied on documents accepted by these filter collectors
-                collectors.add(createEarlyTerminationCollectorContext(searchContext.terminateAfter()));
+                EarlyTerminatingCollector earlyTerminatingCollector = new EarlyTerminatingCollector(
+                    EMPTY_COLLECTOR,
+                    searchContext.terminateAfter(),
+                    true
+                );
+                collector = wrapWithProfilerCollectorIfNeeded(
+                    searchContext.getProfilers(),
+                    MultiCollector.wrap(earlyTerminatingCollector, collector),
+                    REASON_SEARCH_TERMINATE_AFTER_COUNT,
+                    collector
+                );
             }
             if (searchContext.parsedPostFilter() != null) {
                 // add post filters before aggregations
                 // it will only be applied to top hits
-                collectors.add(createFilteredCollectorContext(searcher, searchContext.parsedPostFilter().query()));
-                // this collector can filter documents during the collection
-                hasFilterCollector = true;
+                final Weight filterWeight = searcher.createWeight(
+                    searcher.rewrite(searchContext.parsedPostFilter().query()),
+                    ScoreMode.COMPLETE_NO_SCORES,
+                    1f
+                );
+                collector = wrapWithProfilerCollectorIfNeeded(
+                    searchContext.getProfilers(),
+                    new FilteredCollector(collector, filterWeight),
+                    REASON_SEARCH_POST_FILTER,
+                    collector
+                );
             }
             if (searchContext.getAggsCollector() != null) {
-                // plug in additional collectors, like aggregations
-                collectors.add(createAggsCollectorContext(searchContext.getAggsCollector()));
+                collector = wrapWithProfilerCollectorIfNeeded(
+                    searchContext.getProfilers(),
+                    MultiCollector.wrap(collector, searchContext.getAggsCollector()),
+                    REASON_SEARCH_MULTI,
+                    collector,
+                    searchContext.getAggsCollector()
+                );
             }
             if (searchContext.minimumScore() != null) {
                 // apply the minimum score after multi collector so we filter aggs as well
-                collectors.add(createMinScoreCollectorContext(searchContext.minimumScore()));
-                // this collector can filter documents during the collection
-                hasFilterCollector = true;
+                collector = wrapWithProfilerCollectorIfNeeded(
+                    searchContext.getProfilers(),
+                    new MinimumScoreCollector(collector, searchContext.minimumScore()),
+                    REASON_SEARCH_MIN_SCORE,
+                    collector
+                );
             }
 
             boolean timeoutSet = scrollContext == null
@@ -171,7 +212,8 @@ public class QueryPhase {
             }
 
             try {
-                searchWithCollector(searchContext, searcher, query, collectors, hasFilterCollector, timeoutSet);
+                searchWithCollector(searchContext, searcher, query, collector, timeoutSet);
+                queryResult.topDocs(topDocsFactory.topDocsAndMaxScore(), topDocsFactory.sortValueFormats);
                 ExecutorService executor = searchContext.indexShard().getThreadPool().executor(ThreadPool.Names.SEARCH);
                 assert executor instanceof EWMATrackingEsThreadPoolExecutor
                     || (executor instanceof EsThreadPoolExecutor == false /* in case thread pool is mocked out in tests */)
@@ -192,30 +234,31 @@ public class QueryPhase {
         }
     }
 
+    private static Collector wrapWithProfilerCollectorIfNeeded(
+        Profilers profilers,
+        Collector collector,
+        String profilerName,
+        Collector... children
+    ) {
+        if (profilers == null) {
+            return collector;
+        }
+        return new InternalProfileCollector(collector, profilerName, children);
+    }
+
     private static void searchWithCollector(
         SearchContext searchContext,
         ContextIndexSearcher searcher,
         Query query,
-        LinkedList<QueryCollectorContext> collectors,
-        boolean hasFilterCollector,
+        Collector collector,
         boolean timeoutSet
     ) throws IOException {
-        // create the top docs collector last when the other collectors are known
-        final TopDocsCollectorContext topDocsFactory = createTopDocsCollectorContext(searchContext, hasFilterCollector);
-        // add the top docs collector, the first collector context in the chain
-        collectors.addFirst(topDocsFactory);
-
-        final Collector queryCollector;
         if (searchContext.getProfilers() != null) {
-            InternalProfileCollector profileCollector = QueryCollectorContext.createQueryCollectorWithProfiler(collectors);
-            searchContext.getProfilers().getCurrentQueryProfiler().setCollector(profileCollector);
-            queryCollector = profileCollector;
-        } else {
-            queryCollector = QueryCollectorContext.createQueryCollector(collectors);
+            searchContext.getProfilers().getCurrentQueryProfiler().setCollector((InternalProfileCollector) collector);
         }
         QuerySearchResult queryResult = searchContext.queryResult();
         try {
-            searcher.search(query, queryCollector);
+            searcher.search(query, collector);
         } catch (EarlyTerminatingCollector.EarlyTerminationException e) {
             queryResult.terminatedEarly(true);
         } catch (TimeExceededException e) {
@@ -229,9 +272,6 @@ public class QueryPhase {
         if (searchContext.terminateAfter() != SearchContext.DEFAULT_TERMINATE_AFTER && queryResult.terminatedEarly() == null) {
             queryResult.terminatedEarly(false);
         }
-        for (QueryCollectorContext ctx : collectors) {
-            ctx.postProcess(queryResult);
-        }
     }
 
     /**
@@ -253,4 +293,14 @@ public class QueryPhase {
     }
 
     public static class TimeExceededException extends RuntimeException {}
+
+    private static final Collector EMPTY_COLLECTOR = new SimpleCollector() {
+        @Override
+        public void collect(int doc) {}
+
+        @Override
+        public ScoreMode scoreMode() {
+            return ScoreMode.COMPLETE_NO_SCORES;
+        }
+    };
 }

+ 55 - 50
server/src/main/java/org/elasticsearch/search/query/TopDocsCollectorContext.java → server/src/main/java/org/elasticsearch/search/query/TopDocsCollectorFactory.java

@@ -64,17 +64,30 @@ import static org.elasticsearch.search.profile.query.CollectorResult.REASON_SEAR
 import static org.elasticsearch.search.profile.query.CollectorResult.REASON_SEARCH_TOP_HITS;
 
 /**
- * A {@link QueryCollectorContext} that creates top docs collector
+ * Creates and holds the main {@link Collector} that will be used to search and collect top hits.
+ * Once the returned collector has been used to collect hits, allows to enrich the collected top docs to be set to the search context.
  */
-abstract class TopDocsCollectorContext extends QueryCollectorContext {
-    protected final int numHits;
+abstract class TopDocsCollectorFactory {
+    final String profilerName;
+    final DocValueFormat[] sortValueFormats;
 
-    TopDocsCollectorContext(String profilerName, int numHits) {
-        super(profilerName);
-        this.numHits = numHits;
+    TopDocsCollectorFactory(String profilerName, DocValueFormat[] sortValueFormats) {
+        this.profilerName = profilerName;
+        this.sortValueFormats = sortValueFormats;
     }
 
-    static class EmptyTopDocsCollectorContext extends TopDocsCollectorContext {
+    /**
+     * Returns the collector used to collect top hits, created depending on the incoming request options
+     */
+    abstract Collector collector();
+
+    /**
+     * Returns the collected top docs to be set to the {@link QuerySearchResult} within the search context.
+     * To be called after collection, to enrich the top docs and wrap them with our {@link TopDocsAndMaxScore}.
+     */
+    abstract TopDocsAndMaxScore topDocsAndMaxScore() throws IOException;
+
+    static class EmptyTopDocsCollectorFactory extends TopDocsCollectorFactory {
         private final Sort sort;
         private final Collector collector;
         private final Supplier<TotalHits> hitCountSupplier;
@@ -84,8 +97,8 @@ abstract class TopDocsCollectorContext extends QueryCollectorContext {
          * @param sortAndFormats The sort clause if provided
          * @param trackTotalHitsUpTo The threshold up to which total hit count needs to be tracked
          */
-        private EmptyTopDocsCollectorContext(@Nullable SortAndFormats sortAndFormats, int trackTotalHitsUpTo) {
-            super(REASON_SEARCH_COUNT, 0);
+        private EmptyTopDocsCollectorFactory(@Nullable SortAndFormats sortAndFormats, int trackTotalHitsUpTo) {
+            super(REASON_SEARCH_COUNT, null);
             this.sort = sortAndFormats == null ? null : sortAndFormats.sort;
             if (trackTotalHitsUpTo == SearchContext.TRACK_TOTAL_HITS_DISABLED) {
                 this.collector = new EarlyTerminatingCollector(new TotalHitCountCollector(), 0, false);
@@ -108,13 +121,12 @@ abstract class TopDocsCollectorContext extends QueryCollectorContext {
         }
 
         @Override
-        Collector create(Collector in) {
-            assert in == null;
+        Collector collector() {
             return collector;
         }
 
         @Override
-        void postProcess(QuerySearchResult result) {
+        TopDocsAndMaxScore topDocsAndMaxScore() {
             final TotalHits totalHitCount = hitCountSupplier.get();
             final TopDocs topDocs;
             if (sort != null) {
@@ -122,12 +134,11 @@ abstract class TopDocsCollectorContext extends QueryCollectorContext {
             } else {
                 topDocs = new TopDocs(totalHitCount, Lucene.EMPTY_SCORE_DOCS);
             }
-            result.topDocs(new TopDocsAndMaxScore(topDocs, Float.NaN), null);
+            return new TopDocsAndMaxScore(topDocs, Float.NaN);
         }
     }
 
-    static class CollapsingTopDocsCollectorContext extends TopDocsCollectorContext {
-        private final DocValueFormat[] sortFmt;
+    static class CollapsingTopDocsCollectorFactory extends TopDocsCollectorFactory {
         private final SinglePassGroupingCollector<?> topDocsCollector;
         private final Supplier<Float> maxScoreSupplier;
 
@@ -138,18 +149,17 @@ abstract class TopDocsCollectorContext extends QueryCollectorContext {
          * @param numHits The number of collapsed top hits to retrieve.
          * @param trackMaxScore True if max score should be tracked
          */
-        private CollapsingTopDocsCollectorContext(
+        private CollapsingTopDocsCollectorFactory(
             CollapseContext collapseContext,
             @Nullable SortAndFormats sortAndFormats,
             int numHits,
             boolean trackMaxScore,
             @Nullable FieldDoc after
         ) {
-            super(REASON_SEARCH_TOP_HITS, numHits);
+            super(REASON_SEARCH_TOP_HITS, sortAndFormats == null ? new DocValueFormat[] { DocValueFormat.RAW } : sortAndFormats.formats);
             assert numHits > 0;
             assert collapseContext != null;
             Sort sort = sortAndFormats == null ? Sort.RELEVANCE : sortAndFormats.sort;
-            this.sortFmt = sortAndFormats == null ? new DocValueFormat[] { DocValueFormat.RAW } : sortAndFormats.formats;
             this.topDocsCollector = collapseContext.createTopDocs(sort, numHits, after);
 
             MaxScoreCollector maxScoreCollector;
@@ -162,19 +172,18 @@ abstract class TopDocsCollectorContext extends QueryCollectorContext {
         }
 
         @Override
-        Collector create(Collector in) throws IOException {
-            assert in == null;
+        Collector collector() {
             return topDocsCollector;
         }
 
         @Override
-        void postProcess(QuerySearchResult result) throws IOException {
+        TopDocsAndMaxScore topDocsAndMaxScore() throws IOException {
             TopFieldGroups topDocs = topDocsCollector.getTopGroups(0);
-            result.topDocs(new TopDocsAndMaxScore(topDocs, maxScoreSupplier.get()), sortFmt);
+            return new TopDocsAndMaxScore(topDocs, maxScoreSupplier.get());
         }
     }
 
-    static class SimpleTopDocsCollectorContext extends TopDocsCollectorContext {
+    static class SimpleTopDocsCollectorFactory extends TopDocsCollectorFactory {
 
         private static TopDocsCollector<?> createCollector(
             @Nullable SortAndFormats sortAndFormats,
@@ -189,7 +198,6 @@ abstract class TopDocsCollectorContext extends QueryCollectorContext {
             }
         }
 
-        protected final @Nullable SortAndFormats sortAndFormats;
         private final Collector collector;
         private final Supplier<TotalHits> totalHitsSupplier;
         private final Supplier<TopDocs> topDocsSupplier;
@@ -197,16 +205,17 @@ abstract class TopDocsCollectorContext extends QueryCollectorContext {
 
         /**
          * Ctr
-         * @param reader The index reader
-         * @param query The Lucene query
-         * @param sortAndFormats The sort clause if provided
-         * @param numHits The number of top hits to retrieve
-         * @param searchAfter The doc this request should "search after"
-         * @param trackMaxScore True if max score should be tracked
+         *
+         * @param reader             The index reader
+         * @param query              The Lucene query
+         * @param sortAndFormats     The sort clause if provided
+         * @param numHits            The number of top hits to retrieve
+         * @param searchAfter        The doc this request should "search after"
+         * @param trackMaxScore      True if max score should be tracked
          * @param trackTotalHitsUpTo Threshold up to which total hit count should be tracked
          * @param hasFilterCollector True if the collector chain contains at least one collector that can filter documents out
          */
-        private SimpleTopDocsCollectorContext(
+        private SimpleTopDocsCollectorFactory(
             IndexReader reader,
             Query query,
             @Nullable SortAndFormats sortAndFormats,
@@ -216,11 +225,9 @@ abstract class TopDocsCollectorContext extends QueryCollectorContext {
             int trackTotalHitsUpTo,
             boolean hasFilterCollector
         ) throws IOException {
-            super(REASON_SEARCH_TOP_HITS, numHits);
-            this.sortAndFormats = sortAndFormats;
+            super(REASON_SEARCH_TOP_HITS, sortAndFormats == null ? null : sortAndFormats.formats);
 
             final TopDocsCollector<?> topDocsCollector;
-
             if ((sortAndFormats == null || SortField.FIELD_SCORE.equals(sortAndFormats.sort.getSort()[0])) && hasInfMaxScore(query)) {
                 // disable max score optimization since we have a mandatory clause
                 // that doesn't track the maximum score
@@ -268,8 +275,7 @@ abstract class TopDocsCollectorContext extends QueryCollectorContext {
         }
 
         @Override
-        Collector create(Collector in) {
-            assert in == null;
+        Collector collector() {
             return collector;
         }
 
@@ -286,17 +292,16 @@ abstract class TopDocsCollectorContext extends QueryCollectorContext {
         }
 
         @Override
-        void postProcess(QuerySearchResult result) throws IOException {
-            final TopDocsAndMaxScore topDocs = newTopDocs();
-            result.topDocs(topDocs, sortAndFormats == null ? null : sortAndFormats.formats);
+        TopDocsAndMaxScore topDocsAndMaxScore() {
+            return newTopDocs();
         }
     }
 
-    static class ScrollingTopDocsCollectorContext extends SimpleTopDocsCollectorContext {
+    static class ScrollingTopDocsCollectorFactory extends SimpleTopDocsCollectorFactory {
         private final ScrollContext scrollContext;
         private final int numberOfShards;
 
-        private ScrollingTopDocsCollectorContext(
+        private ScrollingTopDocsCollectorFactory(
             IndexReader reader,
             Query query,
             ScrollContext scrollContext,
@@ -322,7 +327,7 @@ abstract class TopDocsCollectorContext extends QueryCollectorContext {
         }
 
         @Override
-        void postProcess(QuerySearchResult result) throws IOException {
+        TopDocsAndMaxScore topDocsAndMaxScore() {
             final TopDocsAndMaxScore topDocs = newTopDocs();
             if (scrollContext.totalHits == null) {
                 // first round
@@ -341,7 +346,7 @@ abstract class TopDocsCollectorContext extends QueryCollectorContext {
                     scrollContext.lastEmittedDoc = topDocs.topDocs.scoreDocs[topDocs.topDocs.scoreDocs.length - 1];
                 }
             }
-            result.topDocs(topDocs, sortAndFormats == null ? null : sortAndFormats.formats);
+            return topDocs;
         }
     }
 
@@ -406,10 +411,10 @@ abstract class TopDocsCollectorContext extends QueryCollectorContext {
     }
 
     /**
-     * Creates a {@link TopDocsCollectorContext} from the provided <code>searchContext</code>.
+     * Creates a {@link TopDocsCollectorFactory} from the provided <code>searchContext</code>.
      * @param hasFilterCollector True if the collector chain contains at least one collector that can filters document.
      */
-    static TopDocsCollectorContext createTopDocsCollectorContext(SearchContext searchContext, boolean hasFilterCollector)
+    static TopDocsCollectorFactory createTopDocsCollectorFactory(SearchContext searchContext, boolean hasFilterCollector)
         throws IOException {
         final IndexReader reader = searchContext.searcher().getIndexReader();
         final Query query = searchContext.rewrittenQuery();
@@ -417,7 +422,7 @@ abstract class TopDocsCollectorContext extends QueryCollectorContext {
         final int totalNumDocs = Math.max(1, reader.numDocs());
         if (searchContext.size() == 0) {
             // no matter what the value of from is
-            return new EmptyTopDocsCollectorContext(searchContext.sort(), searchContext.trackTotalHitsUpTo());
+            return new EmptyTopDocsCollectorFactory(searchContext.sort(), searchContext.trackTotalHitsUpTo());
         } else if (searchContext.scrollContext() != null) {
             // we can disable the tracking of total hits after the initial scroll query
             // since the total hits is preserved in the scroll context.
@@ -426,7 +431,7 @@ abstract class TopDocsCollectorContext extends QueryCollectorContext {
                 : SearchContext.TRACK_TOTAL_HITS_ACCURATE;
             // no matter what the value of from is
             int numDocs = Math.min(searchContext.size(), totalNumDocs);
-            return new ScrollingTopDocsCollectorContext(
+            return new ScrollingTopDocsCollectorFactory(
                 reader,
                 query,
                 searchContext.scrollContext(),
@@ -438,9 +443,9 @@ abstract class TopDocsCollectorContext extends QueryCollectorContext {
                 hasFilterCollector
             );
         } else if (searchContext.collapse() != null) {
-            boolean trackScores = searchContext.sort() == null ? true : searchContext.trackScores();
+            boolean trackScores = searchContext.sort() == null || searchContext.trackScores();
             int numDocs = Math.min(searchContext.from() + searchContext.size(), totalNumDocs);
-            return new CollapsingTopDocsCollectorContext(
+            return new CollapsingTopDocsCollectorFactory(
                 searchContext.collapse(),
                 searchContext.sort(),
                 numDocs,
@@ -456,7 +461,7 @@ abstract class TopDocsCollectorContext extends QueryCollectorContext {
                     numDocs = Math.max(numDocs, rescoreContext.getWindowSize());
                 }
             }
-            return new SimpleTopDocsCollectorContext(
+            return new SimpleTopDocsCollectorFactory(
                 reader,
                 query,
                 searchContext.sort(),

+ 2 - 0
server/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java

@@ -21,6 +21,8 @@ import java.io.IOException;
  */
 public class RescorePhase {
 
+    private RescorePhase() {}
+
     public static void execute(SearchContext context) {
         if (context.size() == 0 || context.collapse() != null || context.rescore() == null || context.rescore().isEmpty()) {
             return;

+ 2 - 0
server/src/main/java/org/elasticsearch/search/suggest/SuggestPhase.java

@@ -25,6 +25,8 @@ import java.util.Map;
  */
 public class SuggestPhase {
 
+    private SuggestPhase() {}
+
     public static void execute(SearchContext context) {
         final SuggestionSearchContext suggest = context.suggest();
         if (suggest == null) {

+ 78 - 3
server/src/test/java/org/elasticsearch/cluster/coordination/AtomicRegisterCoordinatorTests.java

@@ -8,6 +8,7 @@
 
 package org.elasticsearch.cluster.coordination;
 
+import org.apache.logging.log4j.Level;
 import org.elasticsearch.action.ActionListener;
 import org.elasticsearch.cluster.ClusterName;
 import org.elasticsearch.cluster.ClusterState;
@@ -26,6 +27,7 @@ import org.elasticsearch.common.settings.ClusterSettings;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.core.TimeValue;
 import org.elasticsearch.gateway.ClusterStateUpdaters;
+import org.elasticsearch.test.MockLogAppender;
 import org.elasticsearch.test.junit.annotations.TestLogging;
 import org.elasticsearch.threadpool.ThreadPool;
 
@@ -120,8 +122,63 @@ public class AtomicRegisterCoordinatorTests extends CoordinatorTests {
         }
     }
 
+    @TestLogging(reason = "testing WARN logging", value = "org.elasticsearch.cluster.coordination:WARN")
+    public void testWarnLoggingOnRegisterFailures() {
+        try (Cluster cluster = new Cluster(1)) {
+            final var coordinatorStrategy = (AtomicRegisterCoordinatorStrategy) cluster.getCoordinatorStrategy();
+            cluster.runRandomly();
+            cluster.stabilise();
+            final var clusterNode = cluster.getAnyLeader();
+
+            final var mockAppender = new MockLogAppender();
+            try (var ignored = mockAppender.capturing(Coordinator.class, Coordinator.CoordinatorPublication.class)) {
+
+                clusterNode.disconnect();
+                mockAppender.addExpectation(
+                    new MockLogAppender.SeenEventExpectation(
+                        "write heartbeat failure",
+                        Coordinator.class.getCanonicalName(),
+                        Level.WARN,
+                        "failed to write heartbeat for term [" + clusterNode.coordinator.getCurrentTerm() + "]"
+                    )
+                );
+                cluster.runFor(HEARTBEAT_FREQUENCY.get(Settings.EMPTY).millis(), "warnings");
+                mockAppender.assertAllExpectationsMatched();
+                clusterNode.heal();
+
+                coordinatorStrategy.disruptElections = true;
+                mockAppender.addExpectation(
+                    new MockLogAppender.SeenEventExpectation(
+                        "acquire term failure",
+                        Coordinator.class.getCanonicalName(),
+                        Level.WARN,
+                        "election attempt for [*] in term [" + (clusterNode.coordinator.getCurrentTerm() + 1) + "] failed"
+                    )
+                );
+                cluster.runFor(DEFAULT_ELECTION_DELAY, "warnings");
+                mockAppender.assertAllExpectationsMatched();
+                coordinatorStrategy.disruptElections = false;
+
+                coordinatorStrategy.disruptPublications = true;
+                mockAppender.addExpectation(
+                    new MockLogAppender.SeenEventExpectation(
+                        "verify term failure",
+                        Coordinator.CoordinatorPublication.class.getCanonicalName(),
+                        Level.WARN,
+                        "publication of cluster state version [*] in term [*] failed to commit after reaching quorum"
+                    )
+                );
+                cluster.runFor(DEFAULT_ELECTION_DELAY + DEFAULT_CLUSTER_STATE_UPDATE_DELAY, "publication warnings");
+                mockAppender.assertAllExpectationsMatched();
+                coordinatorStrategy.disruptPublications = false;
+            }
+
+            cluster.stabilise();
+        }
+    }
+
     @Override
-    protected CoordinatorStrategy getCoordinatorStrategy() {
+    protected CoordinatorStrategy createCoordinatorStrategy() {
         return new AtomicRegisterCoordinatorStrategy();
     }
 
@@ -129,6 +186,8 @@ public class AtomicRegisterCoordinatorTests extends CoordinatorTests {
         private final AtomicLong currentTermRef = new AtomicLong();
         private final AtomicReference<Heartbeat> heartBeatRef = new AtomicReference<>();
         private final SharedStore sharedStore = new SharedStore();
+        private boolean disruptElections;
+        private boolean disruptPublications;
 
         @Override
         public CoordinationServices getCoordinationServices(
@@ -152,7 +211,7 @@ public class AtomicRegisterCoordinatorTests extends CoordinatorTests {
                 listener -> ActionListener.completeWith(listener, () -> OptionalLong.of(atomicRegister.readCurrentTerm()))
             );
             var reconfigurator = new SingleNodeReconfigurator(settings, clusterSettings);
-            var electionStrategy = new AtomicRegisterElectionStrategy(atomicRegister);
+            var electionStrategy = new AtomicRegisterElectionStrategy(atomicRegister, () -> disruptElections, () -> disruptPublications);
             return new CoordinationServices() {
                 @Override
                 public ElectionStrategy getElectionStrategy() {
@@ -208,9 +267,17 @@ public class AtomicRegisterCoordinatorTests extends CoordinatorTests {
 
     static class AtomicRegisterElectionStrategy extends ElectionStrategy {
         private final AtomicRegister register;
+        private final BooleanSupplier disruptElectionsSupplier;
+        private final BooleanSupplier disruptPublicationsSupplier;
 
-        AtomicRegisterElectionStrategy(AtomicRegister register) {
+        AtomicRegisterElectionStrategy(
+            AtomicRegister register,
+            BooleanSupplier disruptElectionsSupplier,
+            BooleanSupplier disruptPublicationsSupplier
+        ) {
             this.register = register;
+            this.disruptElectionsSupplier = disruptElectionsSupplier;
+            this.disruptPublicationsSupplier = disruptPublicationsSupplier;
         }
 
         @Override
@@ -258,6 +325,10 @@ public class AtomicRegisterCoordinatorTests extends CoordinatorTests {
         @Override
         public void onNewElection(DiscoveryNode localNode, long proposedTerm, ActionListener<StartJoinRequest> listener) {
             ActionListener.completeWith(listener, () -> {
+                if (disruptElectionsSupplier.getAsBoolean()) {
+                    throw new IOException("simulating failure to acquire term during election");
+                }
+
                 final var currentTerm = register.readCurrentTerm();
                 final var electionTerm = Math.max(proposedTerm, currentTerm + 1);
                 final var witness = register.compareAndExchange(currentTerm, electionTerm);
@@ -282,6 +353,10 @@ public class AtomicRegisterCoordinatorTests extends CoordinatorTests {
         public void beforeCommit(long term, long version, ActionListener<Void> listener) {
             // TODO: add a test to ensure that this gets called
             ActionListener.completeWith(listener, () -> {
+                if (disruptPublicationsSupplier.getAsBoolean()) {
+                    throw new IOException("simulating failure to verify term during publication");
+                }
+
                 final var currentTerm = register.readCurrentTerm();
                 if (currentTerm == term) {
                     return null;

+ 65 - 0
server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java

@@ -9,7 +9,10 @@ package org.elasticsearch.cluster.coordination;
 
 import org.apache.logging.log4j.Level;
 import org.elasticsearch.Build;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.TransportVersion;
 import org.elasticsearch.Version;
+import org.elasticsearch.action.support.PlainActionFuture;
 import org.elasticsearch.cluster.ClusterName;
 import org.elasticsearch.cluster.NotMasterException;
 import org.elasticsearch.cluster.node.DiscoveryNode;
@@ -17,11 +20,14 @@ import org.elasticsearch.cluster.service.MasterService;
 import org.elasticsearch.common.settings.ClusterSettings;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.util.concurrent.DeterministicTaskQueue;
+import org.elasticsearch.common.util.concurrent.FutureUtils;
 import org.elasticsearch.core.TimeValue;
 import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
 import org.elasticsearch.monitor.StatusInfo;
 import org.elasticsearch.tasks.TaskManager;
 import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.test.MockLogAppender;
+import org.elasticsearch.test.junit.annotations.TestLogging;
 import org.elasticsearch.test.transport.CapturingTransport;
 import org.elasticsearch.test.transport.CapturingTransport.CapturedRequest;
 import org.elasticsearch.threadpool.ThreadPool;
@@ -291,6 +297,65 @@ public class JoinHelperTests extends ESTestCase {
         assertEquals(node1, capturedRequest1a.node());
     }
 
+    @TestLogging(reason = "testing WARN logging", value = "org.elasticsearch.cluster.coordination.JoinHelper:WARN")
+    public void testLatestStoredStateFailure() {
+        DeterministicTaskQueue deterministicTaskQueue = new DeterministicTaskQueue();
+        CapturingTransport capturingTransport = new HandshakingCapturingTransport();
+        DiscoveryNode localNode = new DiscoveryNode("node0", buildNewFakeTransportAddress(), Version.CURRENT);
+        final var threadPool = deterministicTaskQueue.getThreadPool();
+        final var clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
+        final var taskManger = new TaskManager(Settings.EMPTY, threadPool, Set.of());
+        TransportService transportService = new TransportService(
+            Settings.EMPTY,
+            capturingTransport,
+            threadPool,
+            TransportService.NOOP_TRANSPORT_INTERCEPTOR,
+            x -> localNode,
+            clusterSettings,
+            new ClusterConnectionManager(Settings.EMPTY, capturingTransport, threadPool.getThreadContext()),
+            taskManger,
+            Tracer.NOOP
+        );
+        JoinHelper joinHelper = new JoinHelper(
+            null,
+            new MasterService(Settings.EMPTY, clusterSettings, threadPool, taskManger),
+            new NoOpClusterApplier(),
+            transportService,
+            () -> 1L,
+            (joinRequest, joinCallback) -> {
+                throw new AssertionError();
+            },
+            startJoinRequest -> { throw new AssertionError(); },
+            (s, p, r) -> {},
+            () -> new StatusInfo(HEALTHY, "info"),
+            new JoinReasonService(() -> 0L),
+            new NoneCircuitBreakerService(),
+            Function.identity(),
+            (listener, term) -> listener.onFailure(new ElasticsearchException("simulated"))
+        );
+
+        final var joinAccumulator = joinHelper.new CandidateJoinAccumulator();
+        final var joinListener = new PlainActionFuture<Void>();
+        joinAccumulator.handleJoinRequest(localNode, TransportVersion.CURRENT, joinListener);
+        assert joinListener.isDone() == false;
+
+        final var mockAppender = new MockLogAppender();
+        mockAppender.addExpectation(
+            new MockLogAppender.SeenEventExpectation(
+                "warning log",
+                JoinHelper.class.getCanonicalName(),
+                Level.WARN,
+                "failed to retrieve latest stored state after winning election in term [1]"
+            )
+        );
+        try (var ignored = mockAppender.capturing(JoinHelper.class)) {
+            joinAccumulator.close(Coordinator.Mode.LEADER);
+            mockAppender.assertAllExpectationsMatched();
+        }
+
+        assertEquals("simulated", expectThrows(ElasticsearchException.class, () -> FutureUtils.get(joinListener)).getMessage());
+    }
+
     private static class HandshakingCapturingTransport extends CapturingTransport {
 
         @Override

+ 3 - 3
server/src/test/java/org/elasticsearch/cluster/coordination/PublicationTests.java

@@ -10,13 +10,13 @@ package org.elasticsearch.cluster.coordination;
 
 import org.elasticsearch.Version;
 import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.SubscribableListener;
 import org.elasticsearch.cluster.ClusterState;
 import org.elasticsearch.cluster.coordination.CoordinationMetadata.VotingConfiguration;
 import org.elasticsearch.cluster.node.DiscoveryNode;
 import org.elasticsearch.cluster.node.DiscoveryNodeRole;
 import org.elasticsearch.cluster.node.DiscoveryNodes;
 import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.util.concurrent.ListenableFuture;
 import org.elasticsearch.common.util.set.Sets;
 import org.elasticsearch.core.Nullable;
 import org.elasticsearch.core.TimeValue;
@@ -91,12 +91,12 @@ public class PublicationTests extends ESTestCase {
                 }
 
                 @Override
-                protected Optional<ListenableFuture<ApplyCommitRequest>> handlePublishResponse(
+                protected Optional<SubscribableListener<ApplyCommitRequest>> handlePublishResponse(
                     DiscoveryNode sourceNode,
                     PublishResponse publishResponse
                 ) {
                     return coordinationState.handlePublishResponse(sourceNode, publishResponse).map(applyCommitRequest -> {
-                        final var future = new ListenableFuture<ApplyCommitRequest>();
+                        final var future = new SubscribableListener<ApplyCommitRequest>();
                         future.onResponse(applyCommitRequest);
                         return future;
                     });

+ 15 - 3
server/src/test/java/org/elasticsearch/cluster/coordination/stateless/StoreHeartbeatServiceTests.java

@@ -8,12 +8,14 @@
 
 package org.elasticsearch.cluster.coordination.stateless;
 
+import org.apache.logging.log4j.Level;
 import org.elasticsearch.Version;
 import org.elasticsearch.action.ActionListener;
 import org.elasticsearch.action.support.PlainActionFuture;
 import org.elasticsearch.cluster.node.DiscoveryNode;
 import org.elasticsearch.core.TimeValue;
 import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.test.MockLogAppender;
 import org.elasticsearch.threadpool.ThreadPool;
 import org.junit.After;
 import org.junit.Before;
@@ -262,9 +264,19 @@ public class StoreHeartbeatServiceTests extends ESTestCase {
 
             failReadingHeartbeat.set(true);
 
-            AtomicBoolean noRecentLeaderFound = new AtomicBoolean();
-            heartbeatService.runIfNoRecentLeader(() -> noRecentLeaderFound.set(true));
-            assertThat(noRecentLeaderFound.get(), is(false));
+            final var mockAppender = new MockLogAppender();
+            mockAppender.addExpectation(
+                new MockLogAppender.SeenEventExpectation(
+                    "warning log",
+                    StoreHeartbeatService.class.getCanonicalName(),
+                    Level.WARN,
+                    "failed to read heartbeat from store"
+                )
+            );
+            try (var ignored = mockAppender.capturing(StoreHeartbeatService.class)) {
+                heartbeatService.runIfNoRecentLeader(() -> fail("should not be called"));
+                mockAppender.assertAllExpectationsMatched();
+            }
         }
     }
 

+ 5 - 5
server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java

@@ -83,7 +83,7 @@ import java.util.Collections;
 import java.util.List;
 import java.util.function.IntUnaryOperator;
 
-import static org.elasticsearch.search.query.TopDocsCollectorContext.hasInfMaxScore;
+import static org.elasticsearch.search.query.TopDocsCollectorFactory.hasInfMaxScore;
 import static org.hamcrest.Matchers.anyOf;
 import static org.hamcrest.Matchers.arrayWithSize;
 import static org.hamcrest.Matchers.equalTo;
@@ -679,16 +679,16 @@ public class QueryPhaseTests extends IndexShardTestCase {
         context.parsedQuery(new ParsedQuery(q));
         context.setSize(3);
         context.trackTotalHitsUpTo(3);
-        TopDocsCollectorContext topDocsContext = TopDocsCollectorContext.createTopDocsCollectorContext(context, false);
-        assertEquals(topDocsContext.create(null).scoreMode(), org.apache.lucene.search.ScoreMode.COMPLETE);
+        TopDocsCollectorFactory topDocsContext = TopDocsCollectorFactory.createTopDocsCollectorFactory(context, false);
+        assertEquals(topDocsContext.collector().scoreMode(), org.apache.lucene.search.ScoreMode.COMPLETE);
         QueryPhase.executeInternal(context);
         assertEquals(5, context.queryResult().topDocs().topDocs.totalHits.value);
         assertEquals(context.queryResult().topDocs().topDocs.totalHits.relation, TotalHits.Relation.EQUAL_TO);
         assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(3));
 
         context.sort(new SortAndFormats(new Sort(new SortField("other", SortField.Type.INT)), new DocValueFormat[] { DocValueFormat.RAW }));
-        topDocsContext = TopDocsCollectorContext.createTopDocsCollectorContext(context, false);
-        assertEquals(topDocsContext.create(null).scoreMode(), org.apache.lucene.search.ScoreMode.TOP_DOCS);
+        topDocsContext = TopDocsCollectorFactory.createTopDocsCollectorFactory(context, false);
+        assertEquals(topDocsContext.collector().scoreMode(), org.apache.lucene.search.ScoreMode.TOP_DOCS);
         QueryPhase.executeInternal(context);
         assertEquals(5, context.queryResult().topDocs().topDocs.totalHits.value);
         assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(3));

+ 4 - 4
server/src/test/java/org/elasticsearch/search/query/TopDocsCollectorContextTests.java → server/src/test/java/org/elasticsearch/search/query/TopDocsCollectorFactoryTests.java

@@ -25,7 +25,7 @@ import org.elasticsearch.test.ESTestCase;
 
 import java.io.IOException;
 
-public class TopDocsCollectorContextTests extends ESTestCase {
+public class TopDocsCollectorFactoryTests extends ESTestCase {
 
     public void testShortcutTotalHitCountTextField() throws IOException {
         try (Directory dir = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), dir)) {
@@ -39,7 +39,7 @@ public class TopDocsCollectorContextTests extends ESTestCase {
             iw.commit();
             try (IndexReader reader = iw.getReader()) {
                 final Query testQuery = new FieldExistsQuery("text");
-                int hitCount = TopDocsCollectorContext.shortcutTotalHitCount(reader, testQuery);
+                int hitCount = TopDocsCollectorFactory.shortcutTotalHitCount(reader, testQuery);
                 assertEquals(-1, hitCount);
             }
         }
@@ -59,7 +59,7 @@ public class TopDocsCollectorContextTests extends ESTestCase {
             iw.commit();
             try (IndexReader reader = iw.getReader()) {
                 final Query testQuery = new FieldExistsQuery("string");
-                int hitCount = TopDocsCollectorContext.shortcutTotalHitCount(reader, testQuery);
+                int hitCount = TopDocsCollectorFactory.shortcutTotalHitCount(reader, testQuery);
                 assertEquals(2, hitCount);
             }
         }
@@ -75,7 +75,7 @@ public class TopDocsCollectorContextTests extends ESTestCase {
             iw.commit();
             try (IndexReader reader = iw.getReader()) {
                 final Query testQuery = new FieldExistsQuery("int");
-                int hitCount = TopDocsCollectorContext.shortcutTotalHitCount(reader, testQuery);
+                int hitCount = TopDocsCollectorFactory.shortcutTotalHitCount(reader, testQuery);
                 assertEquals(1, hitCount);
             }
         }

+ 6 - 2
test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java

@@ -299,7 +299,7 @@ public class AbstractCoordinatorTestCase extends ESTestCase {
             this.countingPageCacheRecycler = new CountingPageCacheRecycler();
             this.recycler = new BytesRefRecycler(countingPageCacheRecycler);
             deterministicTaskQueue.setExecutionDelayVariabilityMillis(DEFAULT_DELAY_VARIABILITY);
-            this.coordinatorStrategy = getCoordinatorStrategy();
+            this.coordinatorStrategy = createCoordinatorStrategy();
 
             assertThat(initialNodeCount, greaterThan(0));
 
@@ -874,6 +874,10 @@ public class AbstractCoordinatorTestCase extends ESTestCase {
             blackholedConnections.clear();
         }
 
+        CoordinatorStrategy getCoordinatorStrategy() {
+            return coordinatorStrategy;
+        }
+
         @Override
         public void close() {
             // noinspection ReplaceInefficientStreamCount using .count() to run the filter on every node
@@ -1593,7 +1597,7 @@ public class AbstractCoordinatorTestCase extends ESTestCase {
         }
     }
 
-    protected CoordinatorStrategy getCoordinatorStrategy() {
+    protected CoordinatorStrategy createCoordinatorStrategy() {
         return new DefaultCoordinatorStrategy();
     }
 

+ 12 - 4
x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java

@@ -109,7 +109,7 @@ public class SharedBlobCacheService<KeyType> implements Releasable {
     public static final Setting<RelativeByteSizeValue> SHARED_CACHE_SIZE_SETTING = new Setting<>(
         new Setting.SimpleKey(SHARED_CACHE_SETTINGS_PREFIX + "size"),
         (settings) -> {
-            if (DiscoveryNode.isDedicatedFrozenNode(settings) || DiscoveryNode.hasRole(settings, DiscoveryNodeRole.SEARCH_ROLE)) {
+            if (DiscoveryNode.isDedicatedFrozenNode(settings) || isSearchOrIndexingNode(settings)) {
                 return "90%";
             } else {
                 return ByteSizeValue.ZERO.getStringRep();
@@ -132,9 +132,12 @@ public class SharedBlobCacheService<KeyType> implements Releasable {
                     @SuppressWarnings("unchecked")
                     final List<DiscoveryNodeRole> roles = (List<DiscoveryNodeRole>) settings.get(NodeRoleSettings.NODE_ROLES_SETTING);
                     final var rolesSet = Set.copyOf(roles);
-                    if (DataTier.isFrozenNode(rolesSet) == false && rolesSet.contains(DiscoveryNodeRole.SEARCH_ROLE) == false) {
+                    if (DataTier.isFrozenNode(rolesSet) == false
+                        && rolesSet.contains(DiscoveryNodeRole.SEARCH_ROLE) == false
+                        && rolesSet.contains(DiscoveryNodeRole.INDEX_ROLE) == false) {
                         throw new SettingsException(
-                            "setting [{}] to be positive [{}] is only permitted on nodes with the data_frozen role, roles are [{}]",
+                            "Setting [{}] to be positive [{}] is only permitted on nodes with the data_frozen, search, or indexing role."
+                                + " Roles are [{}]",
                             SHARED_CACHE_SETTINGS_PREFIX + "size",
                             value.getStringRep(),
                             roles.stream().map(DiscoveryNodeRole::roleName).collect(Collectors.joining(","))
@@ -164,11 +167,16 @@ public class SharedBlobCacheService<KeyType> implements Releasable {
         Setting.Property.NodeScope
     );
 
+    private static boolean isSearchOrIndexingNode(Settings settings) {
+        return DiscoveryNode.hasRole(settings, DiscoveryNodeRole.SEARCH_ROLE)
+            || DiscoveryNode.hasRole(settings, DiscoveryNodeRole.INDEX_ROLE);
+    }
+
     public static final Setting<ByteSizeValue> SHARED_CACHE_SIZE_MAX_HEADROOM_SETTING = new Setting<>(
         new Setting.SimpleKey(SHARED_CACHE_SETTINGS_PREFIX + "size.max_headroom"),
         (settings) -> {
             if (SHARED_CACHE_SIZE_SETTING.exists(settings) == false
-                && (DiscoveryNode.isDedicatedFrozenNode(settings) || DiscoveryNode.hasRole(settings, DiscoveryNodeRole.SEARCH_ROLE))) {
+                && (DiscoveryNode.isDedicatedFrozenNode(settings) || isSearchOrIndexingNode(settings))) {
                 return "100GB";
             }
 

+ 7 - 4
x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java

@@ -230,11 +230,11 @@ public class SharedBlobCacheServiceTests extends ESTestCase {
         assertThat(
             e.getCause().getMessage(),
             is(
-                "setting ["
+                "Setting ["
                     + SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey()
                     + "] to be positive ["
                     + cacheSize
-                    + "] is only permitted on nodes with the data_frozen role, roles are [data_hot]"
+                    + "] is only permitted on nodes with the data_frozen, search, or indexing role. Roles are [data_hot]"
             )
         );
     }
@@ -307,9 +307,12 @@ public class SharedBlobCacheServiceTests extends ESTestCase {
         assertThat(SharedBlobCacheService.SHARED_CACHE_SIZE_MAX_HEADROOM_SETTING.get(settings), equalTo(ByteSizeValue.ofBytes(-1)));
     }
 
-    public void testSearchNodeCacheSizeDefaults() {
+    public void testSearchOrIndexNodeCacheSizeDefaults() {
         final Settings settings = Settings.builder()
-            .putList(NodeRoleSettings.NODE_ROLES_SETTING.getKey(), DiscoveryNodeRole.SEARCH_ROLE.roleName())
+            .putList(
+                NodeRoleSettings.NODE_ROLES_SETTING.getKey(),
+                randomFrom(DiscoveryNodeRole.SEARCH_ROLE, DiscoveryNodeRole.INDEX_ROLE).roleName()
+            )
             .build();
 
         RelativeByteSizeValue relativeCacheSize = SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.get(settings);

+ 4 - 4
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/RemoteClusterFeatureSetUsage.java

@@ -48,7 +48,7 @@ public class RemoteClusterFeatureSetUsage extends XPackFeatureSet.Usage {
         builder.field("size", size);
 
         int numberOfSniffModes = 0;
-        int numberOfConfigurableModels = 0;
+        int numberOfApiKeySecured = 0;
         for (var info : remoteConnectionInfos) {
             if ("sniff".equals(info.getModeInfo().modeName())) {
                 numberOfSniffModes += 1;
@@ -56,7 +56,7 @@ public class RemoteClusterFeatureSetUsage extends XPackFeatureSet.Usage {
                 assert "proxy".equals(info.getModeInfo().modeName());
             }
             if (info.hasClusterCredentials()) {
-                numberOfConfigurableModels += 1;
+                numberOfApiKeySecured += 1;
             }
         }
 
@@ -66,8 +66,8 @@ public class RemoteClusterFeatureSetUsage extends XPackFeatureSet.Usage {
         builder.endObject();
 
         builder.startObject("security");
-        builder.field("basic", size - numberOfConfigurableModels);
-        builder.field("configurable", numberOfConfigurableModels);
+        builder.field("cert", size - numberOfApiKeySecured);
+        builder.field("api_key", numberOfApiKeySecured);
         builder.endObject();
     }
 }

+ 80 - 32
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetTrainedModelsStatsAction.java

@@ -12,7 +12,6 @@ import org.elasticsearch.action.ActionType;
 import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.common.io.stream.StreamOutput;
 import org.elasticsearch.common.io.stream.Writeable;
-import org.elasticsearch.common.util.Maps;
 import org.elasticsearch.common.xcontent.ChunkedToXContent;
 import org.elasticsearch.core.RestApiVersion;
 import org.elasticsearch.ingest.IngestStats;
@@ -30,7 +29,6 @@ import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TrainedModelSizeSt
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
-import java.util.Comparator;
 import java.util.List;
 import java.util.Map;
 import java.util.Objects;
@@ -236,7 +234,7 @@ public class GetTrainedModelsStatsAction extends ActionType<GetTrainedModelsStat
         public static class Builder {
 
             private long totalModelCount;
-            private Map<String, Set<String>> expandedIdsWithAliases;
+            private Map<String, Set<String>> expandedModelIdsWithAliases;
             private Map<String, TrainedModelSizeStats> modelSizeStatsMap;
             private Map<String, IngestStats> ingestStatsMap;
             private Map<String, InferenceStats> inferenceStatsMap;
@@ -247,13 +245,13 @@ public class GetTrainedModelsStatsAction extends ActionType<GetTrainedModelsStat
                 return this;
             }
 
-            public Builder setExpandedIdsWithAliases(Map<String, Set<String>> expandedIdsWithAliases) {
-                this.expandedIdsWithAliases = expandedIdsWithAliases;
+            public Builder setExpandedModelIdsWithAliases(Map<String, Set<String>> expandedIdsWithAliases) {
+                this.expandedModelIdsWithAliases = expandedIdsWithAliases;
                 return this;
             }
 
-            public Map<String, Set<String>> getExpandedIdsWithAliases() {
-                return this.expandedIdsWithAliases;
+            public Map<String, Set<String>> getExpandedModelIdsWithAliases() {
+                return this.expandedModelIdsWithAliases;
             }
 
             public Builder setModelSizeStatsByModelId(Map<String, TrainedModelSizeStats> modelSizeStatsByModelId) {
@@ -276,36 +274,86 @@ public class GetTrainedModelsStatsAction extends ActionType<GetTrainedModelsStat
              * @param assignmentStatsMap map of model_id to assignment stats
              * @return the builder with inference stats map updated and assignment stats map set
              */
-            public Builder setDeploymentStatsByModelId(Map<String, AssignmentStats> assignmentStatsMap) {
+            public Builder setDeploymentStatsByDeploymentId(Map<String, AssignmentStats> assignmentStatsMap) {
                 this.assignmentStatsMap = assignmentStatsMap;
-                if (inferenceStatsMap == null) {
-                    inferenceStatsMap = Maps.newHashMapWithExpectedSize(assignmentStatsMap.size());
-                }
-                assignmentStatsMap.forEach(
-                    (modelId, assignmentStats) -> inferenceStatsMap.put(modelId, assignmentStats.getOverallInferenceStats())
-                );
                 return this;
             }
 
-            public Response build() {
-                List<TrainedModelStats> trainedModelStats = new ArrayList<>(expandedIdsWithAliases.size());
-                expandedIdsWithAliases.keySet().forEach(id -> {
-                    TrainedModelSizeStats modelSizeStats = modelSizeStatsMap.get(id);
-                    IngestStats ingestStats = ingestStatsMap.get(id);
-                    InferenceStats inferenceStats = inferenceStatsMap.get(id);
-                    AssignmentStats assignmentStats = assignmentStatsMap.get(id);
-                    trainedModelStats.add(
-                        new TrainedModelStats(
-                            id,
-                            modelSizeStats,
-                            ingestStats,
-                            ingestStats == null ? 0 : ingestStats.getPipelineStats().size(),
-                            inferenceStats,
-                            assignmentStats
-                        )
-                    );
+            public Response build(Map<String, Set<String>> modelToDeploymentIds) {
+                int numResponses = expandedModelIdsWithAliases.size();
+                // plus an extra response for every deployment after
+                // the first per model
+                for (var entry : modelToDeploymentIds.entrySet()) {
+                    assert expandedModelIdsWithAliases.containsKey(entry.getKey()); // model id
+                    assert entry.getValue().size() > 0; // must have a deployment
+                    numResponses += entry.getValue().size() - 1;
+                }
+
+                if (inferenceStatsMap == null) {
+                    inferenceStatsMap = Collections.emptyMap();
+                }
+
+                List<TrainedModelStats> trainedModelStats = new ArrayList<>(numResponses);
+                expandedModelIdsWithAliases.keySet().forEach(modelId -> {
+                    if (modelToDeploymentIds.containsKey(modelId) == false) { // not deployed
+                        TrainedModelSizeStats modelSizeStats = modelSizeStatsMap.get(modelId);
+                        IngestStats ingestStats = ingestStatsMap.get(modelId);
+                        InferenceStats inferenceStats = inferenceStatsMap.get(modelId);
+                        trainedModelStats.add(
+                            new TrainedModelStats(
+                                modelId,
+                                modelSizeStats,
+                                ingestStats,
+                                ingestStats == null ? 0 : ingestStats.getPipelineStats().size(),
+                                inferenceStats,
+                                null // no assignment stats for undeployed models
+                            )
+                        );
+                    } else {
+                        for (var deploymentId : modelToDeploymentIds.get(modelId)) {
+                            AssignmentStats assignmentStats = assignmentStatsMap.get(deploymentId);
+                            if (assignmentStats == null) {
+                                continue;
+                            }
+                            InferenceStats inferenceStats = assignmentStats.getOverallInferenceStats();
+                            IngestStats ingestStats = ingestStatsMap.get(deploymentId);
+                            if (ingestStats == null) {
+                                // look up by model id
+                                ingestStats = ingestStatsMap.get(modelId);
+                            }
+                            TrainedModelSizeStats modelSizeStats = modelSizeStatsMap.get(modelId);
+                            trainedModelStats.add(
+                                new TrainedModelStats(
+                                    modelId,
+                                    modelSizeStats,
+                                    ingestStats,
+                                    ingestStats == null ? 0 : ingestStats.getPipelineStats().size(),
+                                    inferenceStats,
+                                    assignmentStats
+                                )
+                            );
+                        }
+                    }
+                });
+
+                // Sort first by model id then by deployment id
+                trainedModelStats.sort((modelStats1, modelStats2) -> {
+                    var comparison = modelStats1.getModelId().compareTo(modelStats2.getModelId());
+                    if (comparison == 0) {
+                        var deploymentId1 = modelStats1.getDeploymentStats() == null
+                            ? null
+                            : modelStats1.getDeploymentStats().getDeploymentId();
+                        var deploymentId2 = modelStats2.getDeploymentStats() == null
+                            ? null
+                            : modelStats1.getDeploymentStats().getDeploymentId();
+
+                        assert deploymentId1 != null && deploymentId2 != null
+                            : "2 results for model " + modelStats1.getModelId() + " both should have deployment stats";
+
+                        comparison = deploymentId1.compareTo(deploymentId2);
+                    }
+                    return comparison;
                 });
-                trainedModelStats.sort(Comparator.comparing(TrainedModelStats::getModelId));
                 return new Response(new QueryPage<>(trainedModelStats, totalModelCount, RESULTS_FIELD));
             }
         }

+ 5 - 1
x-pack/plugin/core/src/main/resources/org/elasticsearch/xpack/entsearch/analytics/behavioral_analytics-events-default_policy.json

@@ -4,7 +4,7 @@
       "actions": {
         "rollover": {
           "max_age": "30d",
-          "max_size": "3GB"
+          "max_primary_shard_size": "50gb"
         }
       }
     },
@@ -19,6 +19,10 @@
         }
       }
     },
+    "cold": {
+      "min_age": "30d",
+      "actions": {}
+    },
     "delete": {
       "min_age": "180d",
       "actions":{

+ 6 - 0
x-pack/plugin/core/src/main/resources/org/elasticsearch/xpack/entsearch/analytics/behavioral_analytics-events-final_pipeline.json

@@ -1,6 +1,12 @@
 {
   "description": "Built-in ingest pipeline applied by default as final pipeline to behavioral analytics event data streams.",
   "processors": [
+    {
+      "set": {
+        "field": "_routing",
+        "copy_from": "session.id"
+      }
+    },
     {
       "uri_parts": {
         "field": "page.url",

+ 10 - 75
x-pack/plugin/core/src/main/resources/org/elasticsearch/xpack/entsearch/analytics/behavioral_analytics-events-mappings.json

@@ -76,15 +76,12 @@
                   "type": "keyword"
                 },
                 "original": {
-                  "fields": {
-                    "text": {
-                      "type": "match_only_text"
-                    }
-                  },
-                  "type": "wildcard"
+                  "ignore_above": 1024,
+                  "type": "keyword"
                 },
                 "path": {
-                  "type": "wildcard"
+                  "ignore_above": 1024,
+                  "type": "keyword"
                 },
                 "port": {
                   "type": "long"
@@ -118,15 +115,12 @@
                   "type": "keyword"
                 },
                 "original": {
-                  "fields": {
-                    "text": {
-                      "type": "match_only_text"
-                    }
-                  },
-                  "type": "wildcard"
+                  "ignore_above": 1024,
+                  "type": "keyword"
                 },
                 "path": {
-                  "type": "wildcard"
+                  "ignore_above": 1024,
+                  "type": "keyword"
                 },
                 "port": {
                   "type": "long"
@@ -184,67 +178,8 @@
                   "type": "integer"
                 },
                 "items": {
-                  "properties": {
-                    "document": {
-                      "properties": {
-
-                      "index": {
-                        "type": "keyword",
-                        "ignore_above": 1024
-                      },
-                      "id": {
-                        "type": "keyword",
-                        "ignore_above": 1024
-                      }
-                    }
-                    },
-                    "page": {
-                      "properties": {
-                        "url": {
-                          "properties": {
-                            "domain": {
-                              "ignore_above": 1024,
-                              "type": "keyword"
-                            },
-                            "extension": {
-                              "ignore_above": 1024,
-                              "type": "keyword"
-                            },
-                            "fragment": {
-                              "ignore_above": 1024,
-                              "type": "keyword"
-                            },
-                            "original": {
-                              "fields": {
-                                "text": {
-                                  "type": "match_only_text"
-                                }
-                              },
-                              "type": "wildcard"
-                            },
-                            "path": {
-                              "type": "wildcard"
-                            },
-                            "port": {
-                              "type": "long"
-                            },
-                            "query": {
-                              "ignore_above": 1024,
-                              "type": "keyword"
-                            },
-                            "scheme": {
-                              "ignore_above": 1024,
-                              "type": "keyword"
-                            }
-                          }
-                        },
-                        "title": {
-                          "type": "keyword",
-                          "ignore_above": 1024
-                        }
-                      }
-                    }
-                  }
+                  "type": "object",
+                  "enabled": false
                 }
               }
             }

+ 5 - 1
x-pack/plugin/core/src/main/resources/org/elasticsearch/xpack/entsearch/analytics/behavioral_analytics-events-settings.json

@@ -10,7 +10,11 @@
         "number_of_shards": 1,
         "number_of_replicas": 0,
         "auto_expand_replicas": "0-1",
-        "final_pipeline": "behavioral_analytics-events-final_pipeline"
+        "final_pipeline": "behavioral_analytics-events-final_pipeline",
+        "sort": {
+          "field": ["session.id", "@timestamp"],
+          "order": ["asc", "asc"]
+        }
       }
     }
   },

+ 3 - 1
x-pack/plugin/core/src/main/resources/org/elasticsearch/xpack/entsearch/analytics/behavioral_analytics-events-template.json

@@ -1,6 +1,8 @@
 {
   "index_patterns": ["${event_data_stream.index_pattern}"],
-  "data_stream": {},
+  "data_stream": {
+    "allow_custom_routing": true
+  },
   "priority": 100,
   "composed_of": [
     "behavioral_analytics-events-settings",

+ 6 - 6
x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/RemoteClusterFeatureSetUsageTests.java

@@ -29,12 +29,12 @@ public class RemoteClusterFeatureSetUsageTests extends ESTestCase {
     public void testToXContent() throws IOException {
         final int numberOfRemoteClusters = randomIntBetween(0, 10);
         int numberOfSniffModes = 0;
-        int numberOfConfigurableModels = 0;
+        int numberOfApiKeySecured = 0;
         final List<RemoteConnectionInfo> infos = new ArrayList<>();
         for (int i = 0; i < numberOfRemoteClusters; i++) {
             final boolean hasCredentials = randomBoolean();
             if (hasCredentials) {
-                numberOfConfigurableModels += 1;
+                numberOfApiKeySecured += 1;
             }
             final RemoteConnectionInfo.ModeInfo modeInfo;
             if (randomBoolean()) {
@@ -68,15 +68,15 @@ public class RemoteClusterFeatureSetUsageTests extends ESTestCase {
                                     "sniff": %s
                                   },
                                   "security": {
-                                     "basic": %s,
-                                     "configurable": %s
+                                     "cert": %s,
+                                     "api_key": %s
                                   }
                                 }""",
                             numberOfRemoteClusters,
                             numberOfRemoteClusters - numberOfSniffModes,
                             numberOfSniffModes,
-                            numberOfRemoteClusters - numberOfConfigurableModels,
-                            numberOfConfigurableModels
+                            numberOfRemoteClusters - numberOfApiKeySecured,
+                            numberOfApiKeySecured
                         )
                     )
                 )

+ 5 - 4
x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearch.java

@@ -51,7 +51,7 @@ import org.elasticsearch.xpack.application.analytics.action.TransportDeleteAnaly
 import org.elasticsearch.xpack.application.analytics.action.TransportGetAnalyticsCollectionAction;
 import org.elasticsearch.xpack.application.analytics.action.TransportPostAnalyticsEventAction;
 import org.elasticsearch.xpack.application.analytics.action.TransportPutAnalyticsCollectionAction;
-import org.elasticsearch.xpack.application.analytics.ingest.BulkProcessorConfig;
+import org.elasticsearch.xpack.application.analytics.ingest.AnalyticsEventIngestConfig;
 import org.elasticsearch.xpack.application.search.SearchApplicationIndexService;
 import org.elasticsearch.xpack.application.search.action.DeleteSearchApplicationAction;
 import org.elasticsearch.xpack.application.search.action.GetSearchApplicationAction;
@@ -199,9 +199,10 @@ public class EnterpriseSearch extends Plugin implements ActionPlugin, SystemInde
     @Override
     public List<Setting<?>> getSettings() {
         return List.of(
-            BulkProcessorConfig.MAX_NUMBER_OF_EVENTS_PER_BULK_SETTING,
-            BulkProcessorConfig.FLUSH_DELAY_SETTING,
-            BulkProcessorConfig.MAX_NUMBER_OF_RETRIES_SETTING
+            AnalyticsEventIngestConfig.MAX_NUMBER_OF_EVENTS_PER_BULK_SETTING,
+            AnalyticsEventIngestConfig.FLUSH_DELAY_SETTING,
+            AnalyticsEventIngestConfig.MAX_NUMBER_OF_RETRIES_SETTING,
+            AnalyticsEventIngestConfig.MAX_BYTES_IN_FLIGHT_SETTING
         );
     }
 }

+ 17 - 3
x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/ingest/AnalyticsEventEmitter.java

@@ -8,6 +8,7 @@
 package org.elasticsearch.xpack.application.analytics.ingest;
 
 import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchStatusException;
 import org.elasticsearch.action.ActionListener;
 import org.elasticsearch.action.bulk.BulkProcessor2;
 import org.elasticsearch.action.index.IndexRequest;
@@ -18,6 +19,7 @@ import org.elasticsearch.common.inject.Inject;
 import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
 import org.elasticsearch.logging.LogManager;
 import org.elasticsearch.logging.Logger;
+import org.elasticsearch.rest.RestStatus;
 import org.elasticsearch.xcontent.ToXContent;
 import org.elasticsearch.xcontent.XContentBuilder;
 import org.elasticsearch.xcontent.json.JsonXContent;
@@ -27,12 +29,13 @@ import org.elasticsearch.xpack.application.analytics.event.AnalyticsEvent;
 import org.elasticsearch.xpack.application.analytics.event.AnalyticsEventFactory;
 
 import java.io.IOException;
+import java.util.concurrent.atomic.AtomicBoolean;
 
 import static org.elasticsearch.xpack.core.ClientHelper.ENT_SEARCH_ORIGIN;
 
 public class AnalyticsEventEmitter extends AbstractLifecycleComponent {
 
-    private static final Logger logger = LogManager.getLogger(AnalyticsEvent.class);
+    private static final Logger logger = LogManager.getLogger(AnalyticsEventEmitter.class);
 
     private final Client client;
 
@@ -40,6 +43,8 @@ public class AnalyticsEventEmitter extends AbstractLifecycleComponent {
 
     private final AnalyticsEventFactory eventFactory;
 
+    private final AtomicBoolean dropEvent = new AtomicBoolean(false);
+
     @Inject
     public AnalyticsEventEmitter(Client client, BulkProcessorFactory bulkProcessorFactory) {
         this(client, bulkProcessorFactory, AnalyticsEventFactory.INSTANCE);
@@ -67,6 +72,10 @@ public class AnalyticsEventEmitter extends AbstractLifecycleComponent {
 
             bulkProcessor.add(eventIndexRequest);
 
+            if (dropEvent.compareAndSet(true, false)) {
+                logger.warn("Bulk processor has been flushed. Accepting new events again.");
+            }
+
             if (request.isDebug()) {
                 listener.onResponse(new PostAnalyticsEventAction.DebugResponse(true, event));
             } else {
@@ -75,8 +84,13 @@ public class AnalyticsEventEmitter extends AbstractLifecycleComponent {
         } catch (IOException e) {
             listener.onFailure(new ElasticsearchException("Unable to parse the event.", e));
         } catch (EsRejectedExecutionException e) {
-            listener.onFailure(new ElasticsearchException("Unable to add the event to the bulk."));
-            logger.error("Unable to add the event to the bulk.", e);
+            listener.onFailure(
+                new ElasticsearchStatusException("Unable to add the event: too many requests.", RestStatus.TOO_MANY_REQUESTS)
+            );
+
+            if (dropEvent.compareAndSet(false, true)) {
+                logger.warn("Bulk processor is full. Start dropping events.");
+            }
         }
     }
 

+ 39 - 15
x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/ingest/BulkProcessorConfig.java → x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/ingest/AnalyticsEventIngestConfig.java

@@ -10,6 +10,7 @@ package org.elasticsearch.xpack.application.analytics.ingest;
 import org.elasticsearch.common.inject.Inject;
 import org.elasticsearch.common.settings.Setting;
 import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeValue;
 import org.elasticsearch.core.Strings;
 import org.elasticsearch.core.TimeValue;
 
@@ -20,30 +21,46 @@ import org.elasticsearch.core.TimeValue;
  *  - max_events_per_bulk: the maximum number of events that can be added to the bulk before flushing the bulk (default: 1000)
  *  - max_number_of_retries: the maximum number of retries when bulk execution fails (default: 3)
  */
-public class BulkProcessorConfig {
-    private static final String SETTING_ROOT_PATH = "xpack.applications.behavioral_analytics.ingest.bulk_processor";
+public class AnalyticsEventIngestConfig {
+    private static final String SETTING_ROOT_PATH = "xpack.applications.behavioral_analytics.ingest";
 
+    private static final TimeValue DEFAULT_FLUSH_DELAY = TimeValue.timeValueSeconds(10);
+    private static final TimeValue MIN_FLUSH_DELAY = TimeValue.timeValueSeconds(1);
+    private static final TimeValue MAX_FLUSH_DELAY = TimeValue.timeValueSeconds(60);
     public static final Setting<TimeValue> FLUSH_DELAY_SETTING = Setting.timeSetting(
-        Strings.format("%s.%s", SETTING_ROOT_PATH, "flush_delay"),
-        TimeValue.timeValueSeconds(10),
-        TimeValue.timeValueSeconds(1),
-        TimeValue.timeValueSeconds(60),
+        Strings.format("%s.%s", SETTING_ROOT_PATH, "bulk_processor.flush_delay"),
+        DEFAULT_FLUSH_DELAY,
+        MIN_FLUSH_DELAY,
+        MAX_FLUSH_DELAY,
         Setting.Property.NodeScope
     );
 
+    private static final int DEFAULT_BULK_SIZE = 500;
+    private static final int MIN_BULK_SIZE = 1;
+    private static final int MAX_BULK_SIZE = 1000;
     public static final Setting<Integer> MAX_NUMBER_OF_EVENTS_PER_BULK_SETTING = Setting.intSetting(
-        Strings.format("%s.%s", SETTING_ROOT_PATH, "max_events_per_bulk"),
-        1000,
-        1,
-        10000,
+        Strings.format("%s.%s", SETTING_ROOT_PATH, "bulk_processor.max_events_per_bulk"),
+        DEFAULT_BULK_SIZE,
+        MIN_BULK_SIZE,
+        MAX_BULK_SIZE,
         Setting.Property.NodeScope
     );
 
+    private static final int DEFAULT_MAX_NUMBER_OF_RETRIES = 1;
+    private static final int MIN_MAX_NUMBER_OF_RETRIES = 0;
+    private static final int MAX_MAX_NUMBER_OF_RETRIES = 5;
     public static final Setting<Integer> MAX_NUMBER_OF_RETRIES_SETTING = Setting.intSetting(
-        Strings.format("%s.%s", SETTING_ROOT_PATH, "max_number_of_retries"),
-        3,
-        0,
-        5,
+        Strings.format("%s.%s", SETTING_ROOT_PATH, "bulk_processor.max_number_of_retries"),
+        DEFAULT_MAX_NUMBER_OF_RETRIES,
+        MIN_MAX_NUMBER_OF_RETRIES,
+        MAX_MAX_NUMBER_OF_RETRIES,
+        Setting.Property.NodeScope
+    );
+
+    private static final String DEFAULT_MAX_BYTES_IN_FLIGHT = "5%";
+    public static final Setting<ByteSizeValue> MAX_BYTES_IN_FLIGHT_SETTING = Setting.memorySizeSetting(
+        Strings.format("%s.%s", SETTING_ROOT_PATH, "bulk_processor.max_bytes_in_flight"),
+        DEFAULT_MAX_BYTES_IN_FLIGHT,
         Setting.Property.NodeScope
     );
 
@@ -53,11 +70,14 @@ public class BulkProcessorConfig {
 
     private final int maxNumberOfEventsPerBulk;
 
+    private final ByteSizeValue maxBytesInFlight;
+
     @Inject
-    public BulkProcessorConfig(Settings settings) {
+    public AnalyticsEventIngestConfig(Settings settings) {
         this.flushDelay = FLUSH_DELAY_SETTING.get(settings);
         this.maxNumberOfRetries = MAX_NUMBER_OF_RETRIES_SETTING.get(settings);
         this.maxNumberOfEventsPerBulk = MAX_NUMBER_OF_EVENTS_PER_BULK_SETTING.get(settings);
+        this.maxBytesInFlight = MAX_BYTES_IN_FLIGHT_SETTING.get(settings);
     }
 
     public TimeValue flushDelay() {
@@ -71,4 +91,8 @@ public class BulkProcessorConfig {
     public int maxNumberOfEventsPerBulk() {
         return maxNumberOfEventsPerBulk;
     }
+
+    public ByteSizeValue maxBytesInFlight() {
+        return maxBytesInFlight;
+    }
 }

+ 3 - 5
x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/ingest/BulkProcessorFactory.java

@@ -13,8 +13,6 @@ import org.elasticsearch.action.bulk.BulkRequest;
 import org.elasticsearch.action.bulk.BulkResponse;
 import org.elasticsearch.client.internal.Client;
 import org.elasticsearch.common.inject.Inject;
-import org.elasticsearch.common.unit.ByteSizeUnit;
-import org.elasticsearch.common.unit.ByteSizeValue;
 import org.elasticsearch.logging.LogManager;
 import org.elasticsearch.logging.Logger;
 
@@ -28,10 +26,10 @@ import java.util.stream.Collectors;
 public class BulkProcessorFactory {
     private static final Logger logger = LogManager.getLogger(AnalyticsEventEmitter.class);
 
-    private final BulkProcessorConfig config;
+    private final AnalyticsEventIngestConfig config;
 
     @Inject
-    public BulkProcessorFactory(BulkProcessorConfig config) {
+    public BulkProcessorFactory(AnalyticsEventIngestConfig config) {
         this.config = config;
     }
 
@@ -39,8 +37,8 @@ public class BulkProcessorFactory {
         return BulkProcessor2.builder(client::bulk, new BulkProcessorListener(), client.threadPool())
             .setMaxNumberOfRetries(config.maxNumberOfRetries())
             .setBulkActions(config.maxNumberOfEventsPerBulk())
-            .setBulkSize(new ByteSizeValue(-1, ByteSizeUnit.BYTES))
             .setFlushInterval(config.flushDelay())
+            .setMaxBytesInFlight(config.maxBytesInFlight())
             .build();
     }
 

+ 7 - 3
x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/analytics/ingest/AnalyticsEventEmitterTests.java

@@ -9,12 +9,15 @@ package org.elasticsearch.xpack.application.analytics.ingest;
 
 import org.apache.logging.log4j.util.Strings;
 import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchStatusException;
 import org.elasticsearch.action.ActionListener;
 import org.elasticsearch.action.bulk.BulkProcessor2;
 import org.elasticsearch.action.index.IndexRequest;
 import org.elasticsearch.client.internal.Client;
 import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
+import org.elasticsearch.rest.RestStatus;
 import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.threadpool.ThreadPool;
 import org.elasticsearch.xcontent.XContentBuilder;
 import org.elasticsearch.xpack.application.analytics.action.PostAnalyticsEventAction;
 import org.elasticsearch.xpack.application.analytics.event.AnalyticsEvent;
@@ -129,6 +132,7 @@ public class AnalyticsEventEmitterTests extends ESTestCase {
 
         // Mocking the client used in the test.
         Client clientMock = mock(Client.class);
+        doReturn(mock(ThreadPool.class)).when(clientMock).threadPool();
 
         // Mocking the bulk processor used in the test.
         BulkProcessorFactory bulkProcessorFactoryMock = mock(BulkProcessorFactory.class);
@@ -148,9 +152,9 @@ public class AnalyticsEventEmitterTests extends ESTestCase {
         verify(listener, never()).onResponse(any());
 
         // Verify listener exception.
-        verify(listener).onFailure(argThat((Exception e) -> {
-            assertThat(e, instanceOf(ElasticsearchException.class));
-            assertThat(e.getMessage(), equalTo("Unable to add the event to the bulk."));
+        verify(listener).onFailure(argThat((ElasticsearchStatusException e) -> {
+            assertThat(e.status(), equalTo(RestStatus.TOO_MANY_REQUESTS));
+            assertThat(e.getMessage(), equalTo("Unable to add the event: too many requests."));
             return true;
         }));
     }

+ 35 - 18
x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/analytics/ingest/BulkProcessorConfigTests.java → x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/analytics/ingest/AnalyticsEventIngestConfigTests.java

@@ -8,31 +8,34 @@
 package org.elasticsearch.xpack.application.analytics.ingest;
 
 import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeValue;
 import org.elasticsearch.core.TimeValue;
+import org.elasticsearch.monitor.jvm.JvmInfo;
 import org.elasticsearch.test.ESTestCase;
 
 import static org.hamcrest.Matchers.containsString;
 import static org.hamcrest.Matchers.equalTo;
 
-public class BulkProcessorConfigTests extends ESTestCase {
+public class AnalyticsEventIngestConfigTests extends ESTestCase {
 
     public void testDefaultConfig() {
-        BulkProcessorConfig config = new BulkProcessorConfig(Settings.EMPTY);
+        AnalyticsEventIngestConfig config = new AnalyticsEventIngestConfig(Settings.EMPTY);
 
         assertThat(config.flushDelay(), equalTo(TimeValue.timeValueSeconds(10)));
-        assertThat(config.maxNumberOfEventsPerBulk(), equalTo(1000));
-        assertThat(config.maxNumberOfRetries(), equalTo(3));
+        assertThat(config.maxNumberOfEventsPerBulk(), equalTo(500));
+        assertThat(config.maxNumberOfRetries(), equalTo(1));
+        assertThat(config.maxBytesInFlight(), equalTo(ByteSizeValue.ofBytes((long) (0.05 * heapSize()))));
     }
 
     public void testCustomFlushDelay() {
         String value = randomTimeValue(1, 60, "s");
-        BulkProcessorConfig config = createCustomConfig("flush_delay", value);
+        AnalyticsEventIngestConfig config = createCustomBulkProcessorConfig("flush_delay", value);
         assertThat(config.flushDelay(), equalTo(TimeValue.parseTimeValue(value, "flush_delay")));
     }
 
     public void testCustomFlushDelayTooLow() {
         String value = randomTimeValue(1, 60, "ms");
-        Exception e = expectThrows(IllegalArgumentException.class, () -> createCustomConfig("flush_delay", value));
+        Exception e = expectThrows(IllegalArgumentException.class, () -> createCustomBulkProcessorConfig("flush_delay", value));
         assertThat(
             e.getMessage(),
             containsString("[xpack.applications.behavioral_analytics.ingest.bulk_processor.flush_delay], must be >= [1s]")
@@ -40,7 +43,7 @@ public class BulkProcessorConfigTests extends ESTestCase {
     }
 
     public void testCustomFlushDelayTooHigh() {
-        Exception e = expectThrows(IllegalArgumentException.class, () -> createCustomConfig("flush_delay", "61s"));
+        Exception e = expectThrows(IllegalArgumentException.class, () -> createCustomBulkProcessorConfig("flush_delay", "61s"));
         assertThat(
             e.getMessage(),
             containsString("[xpack.applications.behavioral_analytics.ingest.bulk_processor.flush_delay], must be <= [60s]")
@@ -48,13 +51,13 @@ public class BulkProcessorConfigTests extends ESTestCase {
     }
 
     public void testCustomMaxNumberOfEventsPerBulk() {
-        int value = randomIntBetween(1, 10000);
-        BulkProcessorConfig config = createCustomConfig("max_events_per_bulk", String.valueOf(value));
+        int value = randomIntBetween(1, 1000);
+        AnalyticsEventIngestConfig config = createCustomBulkProcessorConfig("max_events_per_bulk", String.valueOf(value));
         assertThat(config.maxNumberOfEventsPerBulk(), equalTo(value));
     }
 
     public void testCustomMaxNumberOfEventsPerBulkTooLow() {
-        Exception e = expectThrows(IllegalArgumentException.class, () -> createCustomConfig("max_events_per_bulk", "0"));
+        Exception e = expectThrows(IllegalArgumentException.class, () -> createCustomBulkProcessorConfig("max_events_per_bulk", "0"));
         assertThat(
             e.getMessage(),
             containsString("[xpack.applications.behavioral_analytics.ingest.bulk_processor.max_events_per_bulk] must be >= 1")
@@ -62,21 +65,21 @@ public class BulkProcessorConfigTests extends ESTestCase {
     }
 
     public void testCustomMaxNumberOfEventsPerBulkTooHigh() {
-        Exception e = expectThrows(IllegalArgumentException.class, () -> createCustomConfig("max_events_per_bulk", "10001"));
+        Exception e = expectThrows(IllegalArgumentException.class, () -> createCustomBulkProcessorConfig("max_events_per_bulk", "1001"));
         assertThat(
             e.getMessage(),
-            containsString("[xpack.applications.behavioral_analytics.ingest.bulk_processor.max_events_per_bulk] must be <= 10000")
+            containsString("[xpack.applications.behavioral_analytics.ingest.bulk_processor.max_events_per_bulk] must be <= 1000")
         );
     }
 
     public void testCustomMaxNumberOfRetries() {
         int value = randomIntBetween(1, 5);
-        BulkProcessorConfig config = createCustomConfig("max_number_of_retries", String.valueOf(value));
+        AnalyticsEventIngestConfig config = createCustomBulkProcessorConfig("max_number_of_retries", String.valueOf(value));
         assertThat(config.maxNumberOfRetries(), equalTo(value));
     }
 
     public void testCustomMaxNumberOfRetriesTooLow() {
-        Exception e = expectThrows(IllegalArgumentException.class, () -> createCustomConfig("max_number_of_retries", "-1"));
+        Exception e = expectThrows(IllegalArgumentException.class, () -> createCustomBulkProcessorConfig("max_number_of_retries", "-1"));
         assertThat(
             e.getMessage(),
             containsString("[xpack.applications.behavioral_analytics.ingest.bulk_processor.max_number_of_retries] must be >= 0")
@@ -84,15 +87,29 @@ public class BulkProcessorConfigTests extends ESTestCase {
     }
 
     public void testCustomMaxNumberOfRetriesTooHigh() {
-        Exception e = expectThrows(IllegalArgumentException.class, () -> createCustomConfig("max_number_of_retries", "6"));
+        Exception e = expectThrows(IllegalArgumentException.class, () -> createCustomBulkProcessorConfig("max_number_of_retries", "6"));
         assertThat(
             e.getMessage(),
             containsString("[xpack.applications.behavioral_analytics.ingest.bulk_processor.max_number_of_retries] must be <= 5")
         );
     }
 
-    private BulkProcessorConfig createCustomConfig(String key, String value) {
-        key = "xpack.applications.behavioral_analytics.ingest.bulk_processor." + key;
-        return new BulkProcessorConfig(Settings.builder().put(key, value).build());
+    public void testCustomMaxBytesInFlight() {
+        double value = randomIntBetween(1, 100);
+        AnalyticsEventIngestConfig config = createCustomBulkProcessorConfig("max_bytes_in_flight", value + "%");
+        assertThat(config.maxBytesInFlight(), equalTo(ByteSizeValue.ofBytes((long) (value / 100 * heapSize()))));
+    }
+
+    private AnalyticsEventIngestConfig createCustomBulkProcessorConfig(String key, String value) {
+        return createCustomConfig("bulk_processor." + key, value);
+    }
+
+    private AnalyticsEventIngestConfig createCustomConfig(String key, String value) {
+        key = "xpack.applications.behavioral_analytics.ingest." + key;
+        return new AnalyticsEventIngestConfig(Settings.builder().put(key, value).build());
+    }
+
+    private long heapSize() {
+        return JvmInfo.jvmInfo().getMem().getHeapMax().getBytes();
     }
 }

+ 7 - 3
x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/analytics/ingest/BulkProcessorFactoryTests.java

@@ -13,6 +13,7 @@ import org.elasticsearch.action.bulk.BulkProcessor2;
 import org.elasticsearch.action.bulk.BulkRequest;
 import org.elasticsearch.action.index.IndexRequest;
 import org.elasticsearch.client.internal.Client;
+import org.elasticsearch.common.unit.ByteSizeValue;
 import org.elasticsearch.core.TimeValue;
 import org.elasticsearch.rest.RestStatus;
 import org.elasticsearch.test.ESTestCase;
@@ -48,7 +49,8 @@ public class BulkProcessorFactoryTests extends ESTestCase {
     }
 
     public void testFlushDelay() throws Exception {
-        BulkProcessorConfig config = mock(BulkProcessorConfig.class);
+        AnalyticsEventIngestConfig config = mock(AnalyticsEventIngestConfig.class);
+        doReturn(ByteSizeValue.ofMb(10)).when(config).maxBytesInFlight();
         doReturn(TimeValue.timeValueSeconds(1)).when(config).flushDelay();
         doReturn(10).when(config).maxNumberOfEventsPerBulk();
 
@@ -70,8 +72,9 @@ public class BulkProcessorFactoryTests extends ESTestCase {
         int maxBulkActions = randomIntBetween(1, 10);
         int totalEvents = randomIntBetween(1, 5) * maxBulkActions + randomIntBetween(1, maxBulkActions);
 
-        BulkProcessorConfig config = mock(BulkProcessorConfig.class);
+        AnalyticsEventIngestConfig config = mock(AnalyticsEventIngestConfig.class);
         doReturn(maxBulkActions).when(config).maxNumberOfEventsPerBulk();
+        doReturn(ByteSizeValue.ofMb(10)).when(config).maxBytesInFlight();
 
         Client client = mock(Client.class);
         InOrder inOrder = Mockito.inOrder(client);
@@ -102,9 +105,10 @@ public class BulkProcessorFactoryTests extends ESTestCase {
 
     public void testMaxRetries() {
         int numberOfRetries = between(0, 5);
-        BulkProcessorConfig config = mock(BulkProcessorConfig.class);
+        AnalyticsEventIngestConfig config = mock(AnalyticsEventIngestConfig.class);
         doReturn(1).when(config).maxNumberOfEventsPerBulk();
         doReturn(numberOfRetries).when(config).maxNumberOfRetries();
+        doReturn(ByteSizeValue.ofMb(10)).when(config).maxBytesInFlight();
 
         Client client = mock(Client.class);
         doAnswer(i -> {

+ 68 - 6
x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/MachineLearningPackageLoader.java

@@ -6,10 +6,13 @@
  */
 package org.elasticsearch.xpack.ml.packageloader;
 
+import org.elasticsearch.Version;
 import org.elasticsearch.action.ActionRequest;
 import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.bootstrap.BootstrapCheck;
+import org.elasticsearch.bootstrap.BootstrapContext;
+import org.elasticsearch.common.Strings;
 import org.elasticsearch.common.settings.Setting;
-import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.plugins.ActionPlugin;
 import org.elasticsearch.plugins.Plugin;
 import org.elasticsearch.xpack.core.ml.packageloader.action.GetTrainedModelPackageConfigAction;
@@ -17,12 +20,17 @@ import org.elasticsearch.xpack.core.ml.packageloader.action.LoadTrainedModelPack
 import org.elasticsearch.xpack.ml.packageloader.action.TransportGetTrainedModelPackageConfigAction;
 import org.elasticsearch.xpack.ml.packageloader.action.TransportLoadTrainedModelPackage;
 
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.nio.file.Path;
 import java.util.Arrays;
 import java.util.List;
+import java.util.Locale;
+import java.util.Set;
 
-public class MachineLearningPackageLoader extends Plugin implements ActionPlugin {
+import static org.elasticsearch.core.Strings.format;
 
-    private final Settings settings;
+public class MachineLearningPackageLoader extends Plugin implements ActionPlugin {
 
     public static final String DEFAULT_ML_MODELS_REPOSITORY = "https://ml-models.elastic.co";
     public static final Setting<String> MODEL_REPOSITORY = Setting.simpleString(
@@ -35,9 +43,13 @@ public class MachineLearningPackageLoader extends Plugin implements ActionPlugin
     // re-using thread pool setup by the ml plugin
     public static final String UTILITY_THREAD_POOL_NAME = "ml_utility";
 
-    public MachineLearningPackageLoader(Settings settings) {
-        this.settings = settings;
-    }
+    private static final String MODEL_REPOSITORY_DOCUMENTATION_LINK = format(
+        "https://www.elastic.co/guide/en/machine-learning/%d.%d/ml-nlp-deploy-models.html#ml-nlp-deploy-model-air-gaped",
+        Version.CURRENT.major,
+        Version.CURRENT.minor
+    );
+
+    public MachineLearningPackageLoader() {}
 
     @Override
     public List<Setting<?>> getSettings() {
@@ -52,4 +64,54 @@ public class MachineLearningPackageLoader extends Plugin implements ActionPlugin
             new ActionHandler<>(LoadTrainedModelPackageAction.INSTANCE, TransportLoadTrainedModelPackage.class)
         );
     }
+
+    @Override
+    public List<BootstrapCheck> getBootstrapChecks() {
+        return List.of(new BootstrapCheck() {
+            @Override
+            public BootstrapCheckResult check(BootstrapContext context) {
+                try {
+                    validateModelRepository(MODEL_REPOSITORY.get(context.settings()), context.environment().configFile());
+                } catch (Exception e) {
+                    return BootstrapCheckResult.failure(
+                        "Found an invalid configuration for xpack.ml.model_repository. "
+                            + e.getMessage()
+                            + ". See "
+                            + MODEL_REPOSITORY_DOCUMENTATION_LINK
+                            + " for more information."
+                    );
+                }
+                return BootstrapCheckResult.success();
+            }
+
+            @Override
+            public boolean alwaysEnforce() {
+                return true;
+            }
+        });
+    }
+
+    static void validateModelRepository(String repository, Path configPath) throws URISyntaxException {
+        URI baseUri = new URI(repository.endsWith("/") ? repository : repository + "/").normalize();
+        URI normalizedConfigUri = configPath.toUri().normalize();
+
+        if (Strings.isNullOrEmpty(baseUri.getScheme())) {
+            throw new IllegalArgumentException(
+                "xpack.ml.model_repository must contain a scheme, supported schemes are \"http\", \"https\", \"file\""
+            );
+        }
+
+        final String scheme = baseUri.getScheme().toLowerCase(Locale.ROOT);
+        if (Set.of("http", "https", "file").contains(scheme) == false) {
+            throw new IllegalArgumentException(
+                "xpack.ml.model_repository must be configured with one of the following schemes: \"http\", \"https\", \"file\""
+            );
+        }
+
+        if (scheme.equals("file") && (baseUri.getPath().startsWith(normalizedConfigUri.getPath()) == false)) {
+            throw new IllegalArgumentException(
+                "If xpack.ml.model_repository is a file location, it must be placed below the configuration: " + normalizedConfigUri
+            );
+        }
+    }
 }

+ 24 - 1
x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/ModelLoaderUtils.java

@@ -10,6 +10,7 @@ package org.elasticsearch.xpack.ml.packageloader.action;
 import org.elasticsearch.ElasticsearchStatusException;
 import org.elasticsearch.ResourceNotFoundException;
 import org.elasticsearch.SpecialPermission;
+import org.elasticsearch.common.Strings;
 import org.elasticsearch.common.bytes.BytesArray;
 import org.elasticsearch.common.hash.MessageDigests;
 import org.elasticsearch.common.io.Streams;
@@ -28,6 +29,7 @@ import java.io.InputStream;
 import java.io.UncheckedIOException;
 import java.net.HttpURLConnection;
 import java.net.URI;
+import java.net.URISyntaxException;
 import java.nio.file.Files;
 import java.security.AccessController;
 import java.security.MessageDigest;
@@ -93,6 +95,8 @@ final class ModelLoaderUtils {
 
     static InputStream getInputStreamFromModelRepository(URI uri) throws IOException {
         String scheme = uri.getScheme().toLowerCase(Locale.ROOT);
+
+        // if you add a scheme here, also add it to the bootstrap check in {@link MachineLearningPackageLoader#validateModelRepository}
         switch (scheme) {
             case "http":
             case "https":
@@ -104,7 +108,7 @@ final class ModelLoaderUtils {
         }
     }
 
-    public static Tuple<List<String>, List<String>> loadVocabulary(URI uri) {
+    static Tuple<List<String>, List<String>> loadVocabulary(URI uri) {
         try {
             InputStream vocabInputStream = getInputStreamFromModelRepository(uri);
 
@@ -132,6 +136,25 @@ final class ModelLoaderUtils {
         }
     }
 
+    static URI resolvePackageLocation(String repository, String artefact) throws URISyntaxException {
+        URI baseUri = new URI(repository.endsWith("/") ? repository : repository + "/").normalize();
+        URI resolvedUri = baseUri.resolve(artefact).normalize();
+
+        if (Strings.isNullOrEmpty(baseUri.getScheme())) {
+            throw new IllegalArgumentException("Repository must contain a scheme");
+        }
+
+        if (baseUri.getScheme().equals(resolvedUri.getScheme()) == false) {
+            throw new IllegalArgumentException("Illegal schema change in package location");
+        }
+
+        if (resolvedUri.getPath().startsWith(baseUri.getPath()) == false) {
+            throw new IllegalArgumentException("Illegal path in package location");
+        }
+
+        return baseUri.resolve(artefact);
+    }
+
     private ModelLoaderUtils() {}
 
     @SuppressWarnings("'java.lang.SecurityManager' is deprecated and marked for removal ")

+ 2 - 1
x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/TransportGetTrainedModelPackageConfigAction.java

@@ -72,12 +72,13 @@ public class TransportGetTrainedModelPackageConfigAction extends TransportMaster
     @Override
     protected void masterOperation(Task task, Request request, ClusterState state, ActionListener<Response> listener) throws Exception {
         String repository = MachineLearningPackageLoader.MODEL_REPOSITORY.get(settings);
+
         String packagedModelId = request.getPackagedModelId();
         logger.trace(() -> format("Fetch package manifest for [%s] from [%s]", packagedModelId, repository));
 
         threadPool.executor(MachineLearningPackageLoader.UTILITY_THREAD_POOL_NAME).execute(() -> {
             try {
-                URI uri = new URI(repository).resolve(packagedModelId + ModelLoaderUtils.METADATA_FILE_EXTENSION);
+                URI uri = ModelLoaderUtils.resolvePackageLocation(repository, packagedModelId + ModelLoaderUtils.METADATA_FILE_EXTENSION);
                 InputStream inputStream = ModelLoaderUtils.getInputStreamFromModelRepository(uri);
 
                 try (

+ 0 - 2
x-pack/plugin/ml-package-loader/src/main/plugin-metadata/plugin-security.policy

@@ -8,6 +8,4 @@
 
 grant {
   permission java.net.SocketPermission "*", "connect";
-
-  permission java.io.FilePermission "<<ALL FILES>>", "read";
 };

+ 56 - 0
x-pack/plugin/ml-package-loader/src/test/java/org/elasticsearch/xpack/ml/packageloader/MachineLearningPackageLoaderTests.java

@@ -0,0 +1,56 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+package org.elasticsearch.xpack.ml.packageloader;
+
+import org.elasticsearch.core.PathUtils;
+import org.elasticsearch.test.ESTestCase;
+
+public class MachineLearningPackageLoaderTests extends ESTestCase {
+
+    public void testValidateModelRepository() {
+        Exception e = expectThrows(
+            IllegalArgumentException.class,
+            () -> MachineLearningPackageLoader.validateModelRepository("file:///etc/passwd", PathUtils.get("/home/elk/elasticsearch"))
+        );
+
+        assertEquals(
+            "If xpack.ml.model_repository is a file location, it must be placed below the configuration: file:///home/elk/elasticsearch",
+            e.getMessage()
+        );
+
+        e = expectThrows(
+            IllegalArgumentException.class,
+            () -> MachineLearningPackageLoader.validateModelRepository("file:///home/elk/", PathUtils.get("/home/elk/elasticsearch"))
+        );
+
+        assertEquals(
+            "If xpack.ml.model_repository is a file location, it must be placed below the configuration: file:///home/elk/elasticsearch",
+            e.getMessage()
+        );
+
+        e = expectThrows(
+            IllegalArgumentException.class,
+            () -> MachineLearningPackageLoader.validateModelRepository("elk/", PathUtils.get("/home/elk/elasticsearch"))
+        );
+
+        assertEquals(
+            "xpack.ml.model_repository must contain a scheme, supported schemes are \"http\", \"https\", \"file\"",
+            e.getMessage()
+        );
+
+        e = expectThrows(
+            IllegalArgumentException.class,
+            () -> MachineLearningPackageLoader.validateModelRepository("mqtt://elky/", PathUtils.get("/home/elk/elasticsearch"))
+        );
+
+        assertEquals(
+            "xpack.ml.model_repository must be configured with one of the following schemes: \"http\", \"https\", \"file\"",
+            e.getMessage()
+        );
+    }
+}

+ 60 - 0
x-pack/plugin/ml-package-loader/src/test/java/org/elasticsearch/xpack/ml/packageloader/action/ModelLoaderUtilsTests.java

@@ -0,0 +1,60 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+package org.elasticsearch.xpack.ml.packageloader.action;
+
+import org.elasticsearch.test.ESTestCase;
+
+import java.net.URI;
+import java.net.URISyntaxException;
+
+public class ModelLoaderUtilsTests extends ESTestCase {
+
+    public void testResolvePackageLocationTrailingSlash() throws URISyntaxException {
+        assertEquals(new URI("file:/home/ml/package.ext"), ModelLoaderUtils.resolvePackageLocation("file:///home/ml", "package.ext"));
+        assertEquals(new URI("file:/home/ml/package.ext"), ModelLoaderUtils.resolvePackageLocation("file:///home/ml/", "package.ext"));
+        assertEquals(
+            new URI("http://my-package.repo/package.ext"),
+            ModelLoaderUtils.resolvePackageLocation("http://my-package.repo", "package.ext")
+        );
+        assertEquals(
+            new URI("http://my-package.repo/package.ext"),
+            ModelLoaderUtils.resolvePackageLocation("http://my-package.repo/", "package.ext")
+        );
+        assertEquals(
+            new URI("http://my-package.repo/sub/package.ext"),
+            ModelLoaderUtils.resolvePackageLocation("http://my-package.repo/sub", "package.ext")
+        );
+        assertEquals(
+            new URI("http://my-package.repo/sub/package.ext"),
+            ModelLoaderUtils.resolvePackageLocation("http://my-package.repo/sub/", "package.ext")
+        );
+    }
+
+    public void testResolvePackageLocationBreakout() {
+        Exception e = expectThrows(
+            IllegalArgumentException.class,
+            () -> ModelLoaderUtils.resolvePackageLocation("file:///home/ml/", "../package.ext")
+        );
+
+        assertEquals("Illegal path in package location", e.getMessage());
+
+        e = expectThrows(
+            IllegalArgumentException.class,
+            () -> ModelLoaderUtils.resolvePackageLocation("file:///home/ml/", "http://foo.ba")
+        );
+        assertEquals("Illegal schema change in package location", e.getMessage());
+    }
+
+    public void testResolvePackageLocationNoSchemeInRepository() {
+        Exception e = expectThrows(
+            IllegalArgumentException.class,
+            () -> ModelLoaderUtils.resolvePackageLocation("/home/ml/", "package.ext")
+        );
+        assertEquals("Repository must contain a scheme", e.getMessage());
+    }
+}

+ 4 - 1
x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlDailyMaintenanceServiceIT.java

@@ -56,7 +56,10 @@ public class MlDailyMaintenanceServiceIT extends MlNativeAutodetectIntegTestCase
             threadPool,
             client(),
             mock(ClusterService.class),
-            mock(MlAssignmentNotifier.class)
+            mock(MlAssignmentNotifier.class),
+            true,
+            true,
+            true
         );
 
         putJob("maintenance-test-1");

+ 141 - 6
x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MultipleDeploymentsIT.java

@@ -8,18 +8,24 @@
 package org.elasticsearch.xpack.ml.integration;
 
 import org.elasticsearch.client.Response;
+import org.elasticsearch.common.xcontent.support.XContentMapValues;
+import org.elasticsearch.core.Tuple;
+import org.elasticsearch.xpack.core.ml.utils.MapHelper;
 
 import java.io.IOException;
+import java.util.HashSet;
 import java.util.List;
+import java.util.Map;
+
+import static org.hamcrest.Matchers.empty;
+import static org.hamcrest.Matchers.hasSize;
 
 public class MultipleDeploymentsIT extends PyTorchModelRestTestCase {
 
     @SuppressWarnings("unchecked")
     public void testDeployModelMultipleTimes() throws IOException {
         String baseModelId = "base-model";
-        createPassThroughModel(baseModelId);
-        putModelDefinition(baseModelId);
-        putVocabulary(List.of("these", "are", "my", "words"), baseModelId);
+        putAllModelParts(baseModelId);
 
         String forSearch = "for-search";
         startWithDeploymentId(baseModelId, forSearch);
@@ -35,12 +41,141 @@ public class MultipleDeploymentsIT extends PyTorchModelRestTestCase {
         inference = infer("my words", forIngest);
         assertOK(inference);
 
-        // TODO
-        // assertInferenceCount(1, forSearch);
-        // assertInferenceCount(2, forIngest);
+        assertInferenceCount(1, forSearch);
+        assertInferenceCount(2, forIngest);
 
         stopDeployment(forSearch);
         stopDeployment(forIngest);
+
+        Response statsResponse = getTrainedModelStats("_all");
+        Map<String, Object> stats = entityAsMap(statsResponse);
+        List<Map<String, Object>> trainedModelStats = (List<Map<String, Object>>) stats.get("trained_model_stats");
+        assertThat(stats.toString(), trainedModelStats, hasSize(2));
+
+        for (var statsMap : trainedModelStats) {
+            // no deployment stats when the deployment is stopped
+            assertNull(stats.toString(), statsMap.get("deployment_stats"));
+        }
+    }
+
+    @SuppressWarnings("unchecked")
+    public void testGetStats() throws IOException {
+        String undeployedModel1 = "undeployed_1";
+        putAllModelParts(undeployedModel1);
+        String undeployedModel2 = "undeployed_2";
+        putAllModelParts(undeployedModel2);
+
+        String modelWith1Deployment = "model-with-1-deployment";
+        putAllModelParts(modelWith1Deployment);
+
+        String modelWith2Deployments = "model-with-2-deployments";
+        putAllModelParts(modelWith2Deployments);
+        String forSearchDeployment = "for-search";
+        startWithDeploymentId(modelWith2Deployments, forSearchDeployment);
+        String forIngestDeployment = "for-ingest";
+        startWithDeploymentId(modelWith2Deployments, forIngestDeployment);
+
+        // deployment Id is the same as model
+        startDeployment(modelWith1Deployment);
+
+        {
+            Map<String, Object> stats = entityAsMap(getTrainedModelStats("_all"));
+            List<Map<String, Object>> trainedModelStats = (List<Map<String, Object>>) stats.get("trained_model_stats");
+            checkExpectedStats(
+                List.of(
+                    new Tuple<>(undeployedModel1, null),
+                    new Tuple<>(undeployedModel2, null),
+                    new Tuple<>(modelWith1Deployment, modelWith1Deployment),
+                    new Tuple<>(modelWith2Deployments, forSearchDeployment),
+                    new Tuple<>(modelWith2Deployments, forIngestDeployment)
+                ),
+                trainedModelStats,
+                true
+            );
+
+            // check the sorted order
+            assertEquals(trainedModelStats.get(0).get("model_id"), "lang_ident_model_1");
+            assertEquals(trainedModelStats.get(1).get("model_id"), modelWith1Deployment);
+            assertEquals(MapHelper.dig("deployment_stats.deployment_id", trainedModelStats.get(1)), modelWith1Deployment);
+            assertEquals(trainedModelStats.get(2).get("model_id"), modelWith2Deployments);
+            assertEquals(MapHelper.dig("deployment_stats.deployment_id", trainedModelStats.get(2)), forIngestDeployment);
+            assertEquals(trainedModelStats.get(3).get("model_id"), modelWith2Deployments);
+            assertEquals(MapHelper.dig("deployment_stats.deployment_id", trainedModelStats.get(3)), forSearchDeployment);
+            assertEquals(trainedModelStats.get(4).get("model_id"), undeployedModel1);
+            assertEquals(trainedModelStats.get(5).get("model_id"), undeployedModel2);
+        }
+        {
+            Map<String, Object> stats = entityAsMap(getTrainedModelStats(modelWith1Deployment));
+            List<Map<String, Object>> trainedModelStats = (List<Map<String, Object>>) stats.get("trained_model_stats");
+            checkExpectedStats(List.of(new Tuple<>(modelWith1Deployment, modelWith1Deployment)), trainedModelStats);
+        }
+        {
+            Map<String, Object> stats = entityAsMap(getTrainedModelStats(modelWith2Deployments));
+            List<Map<String, Object>> trainedModelStats = (List<Map<String, Object>>) stats.get("trained_model_stats");
+            checkExpectedStats(
+                List.of(new Tuple<>(modelWith2Deployments, forSearchDeployment), new Tuple<>(modelWith2Deployments, forIngestDeployment)),
+                trainedModelStats
+            );
+        }
+        {
+            Map<String, Object> stats = entityAsMap(getTrainedModelStats(forIngestDeployment));
+            List<Map<String, Object>> trainedModelStats = (List<Map<String, Object>>) stats.get("trained_model_stats");
+            checkExpectedStats(List.of(new Tuple<>(modelWith2Deployments, forIngestDeployment)), trainedModelStats);
+        }
+        {
+            // wildcard model id matching
+            Map<String, Object> stats = entityAsMap(getTrainedModelStats("model-with-*"));
+            List<Map<String, Object>> trainedModelStats = (List<Map<String, Object>>) stats.get("trained_model_stats");
+            checkExpectedStats(
+                List.of(
+                    new Tuple<>(modelWith1Deployment, modelWith1Deployment),
+                    new Tuple<>(modelWith2Deployments, forSearchDeployment),
+                    new Tuple<>(modelWith2Deployments, forIngestDeployment)
+                ),
+                trainedModelStats
+            );
+        }
+        {
+            // wildcard deployment id matching
+            Map<String, Object> stats = entityAsMap(getTrainedModelStats("for-*"));
+            List<Map<String, Object>> trainedModelStats = (List<Map<String, Object>>) stats.get("trained_model_stats");
+            checkExpectedStats(
+                List.of(new Tuple<>(modelWith2Deployments, forSearchDeployment), new Tuple<>(modelWith2Deployments, forIngestDeployment)),
+                trainedModelStats
+            );
+        }
+    }
+
+    private void checkExpectedStats(List<Tuple<String, String>> modelDeploymentPairs, List<Map<String, Object>> trainedModelStats) {
+        checkExpectedStats(modelDeploymentPairs, trainedModelStats, false);
+    }
+
+    private void checkExpectedStats(
+        List<Tuple<String, String>> modelDeploymentPairs,
+        List<Map<String, Object>> trainedModelStats,
+        boolean plusOneForLangIdent
+    ) {
+        var concatenatedIds = new HashSet<String>();
+        modelDeploymentPairs.forEach(t -> concatenatedIds.add(t.v1() + t.v2()));
+
+        int expectedSize = modelDeploymentPairs.size();
+        if (plusOneForLangIdent) {
+            expectedSize++;
+        }
+        assertEquals(trainedModelStats.toString(), trainedModelStats.size(), expectedSize);
+        for (var tmStats : trainedModelStats) {
+            String modelId = (String) tmStats.get("model_id");
+            String deploymentId = (String) XContentMapValues.extractValue("deployment_stats.deployment_id", tmStats);
+            concatenatedIds.remove(modelId + deploymentId);
+        }
+
+        assertThat("Missing stats for " + concatenatedIds, concatenatedIds, empty());
+    }
+
+    private void putAllModelParts(String modelId) throws IOException {
+        createPassThroughModel(modelId);
+        putModelDefinition(modelId);
+        putVocabulary(List.of("these", "are", "my", "words"), modelId);
     }
 
     private void putModelDefinition(String modelId) throws IOException {

+ 19 - 9
x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelRestTestCase.java

@@ -60,7 +60,8 @@ public abstract class PyTorchModelRestTestCase extends ESRestTestCase {
                     "logger.org.elasticsearch.xpack.ml.inference.assignment" : "DEBUG",
                     "logger.org.elasticsearch.xpack.ml.inference.deployment" : "DEBUG",
                     "logger.org.elasticsearch.xpack.ml.inference.pytorch" : "DEBUG",
-                    "logger.org.elasticsearch.xpack.ml.process.logging" : "DEBUG"
+                    "logger.org.elasticsearch.xpack.ml.process.logging" : "DEBUG",
+                    "logger.org.elasticsearch.xpack.ml.action" : "DEBUG"
                 }}""");
         client().performRequest(loggingSettings);
     }
@@ -122,15 +123,24 @@ public abstract class PyTorchModelRestTestCase extends ESRestTestCase {
 
     @SuppressWarnings("unchecked")
     protected void assertInferenceCount(int expectedCount, String deploymentId) throws IOException {
-        Response noInferenceCallsStatsResponse = getTrainedModelStats(deploymentId);
-        Map<String, Object> stats = entityAsMap(noInferenceCallsStatsResponse);
+        Response statsResponse = getTrainedModelStats(deploymentId);
+        Map<String, Object> stats = entityAsMap(statsResponse);
+        List<Map<String, Object>> trainedModelStats = (List<Map<String, Object>>) stats.get("trained_model_stats");
+
+        boolean deploymentFound = false;
+        for (var statsMap : trainedModelStats) {
+            var deploymentStats = (Map<String, Object>) XContentMapValues.extractValue("deployment_stats", statsMap);
+            // find the matching deployment
+            if (deploymentId.equals(deploymentStats.get("deployment_id"))) {
+                List<Map<String, Object>> nodes = (List<Map<String, Object>>) XContentMapValues.extractValue("nodes", deploymentStats);
+                int inferenceCount = sumInferenceCountOnNodes(nodes);
+                assertEquals(stats.toString(), expectedCount, inferenceCount);
+                deploymentFound = true;
+                break;
+            }
+        }
 
-        List<Map<String, Object>> nodes = (List<Map<String, Object>>) XContentMapValues.extractValue(
-            "trained_model_stats.0.deployment_stats.nodes",
-            stats
-        );
-        int inferenceCount = sumInferenceCountOnNodes(nodes);
-        assertEquals(expectedCount, inferenceCount);
+        assertTrue("No deployment stats found for deployment [" + deploymentId + "]", deploymentFound);
     }
 
     protected int sumInferenceCountOnNodes(List<Map<String, Object>> nodes) {

+ 2 - 0
x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/ChunkedTrainedModelPersisterIT.java

@@ -113,6 +113,7 @@ public class ChunkedTrainedModelPersisterIT extends MlSingleNodeTestCase {
             Collections.emptySet(),
             ModelAliasMetadata.EMPTY,
             null,
+            Collections.emptySet(),
             getIdsFuture
         );
         Tuple<Long, Map<String, Set<String>>> ids = getIdsFuture.actionGet();
@@ -184,6 +185,7 @@ public class ChunkedTrainedModelPersisterIT extends MlSingleNodeTestCase {
             Collections.emptySet(),
             ModelAliasMetadata.EMPTY,
             null,
+            Collections.emptySet(),
             getIdsFuture
         );
         Tuple<Long, Map<String, Set<String>>> ids = getIdsFuture.actionGet();

+ 15 - 0
x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/DefaultMachineLearningExtension.java

@@ -17,4 +17,19 @@ public class DefaultMachineLearningExtension implements MachineLearningExtension
     public boolean includeNodeInfo() {
         return true;
     }
+
+    @Override
+    public boolean isAnomalyDetectionEnabled() {
+        return true;
+    }
+
+    @Override
+    public boolean isDataFrameAnalyticsEnabled() {
+        return true;
+    }
+
+    @Override
+    public boolean isNlpEnabled() {
+        return true;
+    }
 }

+ 245 - 196
x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java

@@ -449,9 +449,7 @@ import org.elasticsearch.xpack.ml.utils.persistence.ResultsPersisterService;
 import org.elasticsearch.xpack.ml.vectors.TextEmbeddingQueryVectorBuilder;
 
 import java.io.IOException;
-import java.time.Clock;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.List;
@@ -460,8 +458,6 @@ import java.util.concurrent.ConcurrentHashMap;
 import java.util.function.Supplier;
 import java.util.function.UnaryOperator;
 
-import static java.util.Collections.emptyList;
-import static java.util.Collections.singletonList;
 import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN;
 import static org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX;
 import static org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndexFields.STATE_INDEX_PREFIX;
@@ -849,11 +845,6 @@ public class MachineLearning extends Plugin
         );
     }
 
-    // overridable by tests
-    protected Clock getClock() {
-        return Clock.systemUTC();
-    }
-
     @Override
     public Collection<Object> createComponents(
         Client client,
@@ -872,7 +863,7 @@ public class MachineLearning extends Plugin
     ) {
         if (enabled == false) {
             // special holder for @link(MachineLearningFeatureSetUsage) which needs access to job manager, empty if ML is disabled
-            return singletonList(new JobManagerHolder());
+            return List.of(new JobManagerHolder());
         }
 
         this.mlUpgradeModeActionFilter.set(new MlUpgradeModeActionFilter(clusterService));
@@ -1188,7 +1179,18 @@ public class MachineLearning extends Plugin
             new MlAutoscalingDeciderService(memoryTracker, settings, nodeAvailabilityZoneMapper, clusterService)
         );
 
-        return Arrays.asList(
+        MlInitializationService mlInitializationService = new MlInitializationService(
+            settings,
+            threadPool,
+            clusterService,
+            client,
+            mlAssignmentNotifier,
+            machineLearningExtension.get().isAnomalyDetectionEnabled(),
+            machineLearningExtension.get().isDataFrameAnalyticsEnabled(),
+            machineLearningExtension.get().isNlpEnabled()
+        );
+
+        return List.of(
             mlLifeCycleService,
             new MlControllerHolder(mlController),
             jobResultsProvider,
@@ -1198,7 +1200,7 @@ public class MachineLearning extends Plugin
             jobManager,
             jobManagerHolder,
             autodetectProcessManager,
-            new MlInitializationService(settings, threadPool, clusterService, client, mlAssignmentNotifier),
+            mlInitializationService,
             jobDataCountsPersister,
             datafeedRunner,
             datafeedManager,
@@ -1230,10 +1232,10 @@ public class MachineLearning extends Plugin
         IndexNameExpressionResolver expressionResolver
     ) {
         if (enabled == false) {
-            return emptyList();
+            return List.of();
         }
 
-        return Arrays.asList(
+        return List.of(
             new OpenJobPersistentTasksExecutor(
                 settings,
                 clusterService,
@@ -1280,207 +1282,254 @@ public class MachineLearning extends Plugin
         Supplier<DiscoveryNodes> nodesInCluster
     ) {
         if (false == enabled) {
-            return emptyList();
+            return List.of();
         }
-        return Arrays.asList(
-            new RestGetJobsAction(),
-            new RestGetJobStatsAction(),
-            new RestMlInfoAction(),
-            new RestMlMemoryAction(),
-            new RestPutJobAction(),
-            new RestPostJobUpdateAction(),
-            new RestDeleteJobAction(),
-            new RestOpenJobAction(),
-            new RestGetFiltersAction(),
-            new RestPutFilterAction(),
-            new RestUpdateFilterAction(),
-            new RestDeleteFilterAction(),
-            new RestGetInfluencersAction(),
-            new RestGetRecordsAction(),
-            new RestGetBucketsAction(),
-            new RestGetOverallBucketsAction(),
-            new RestPostDataAction(),
-            new RestCloseJobAction(),
-            new RestFlushJobAction(),
-            new RestResetJobAction(),
-            new RestValidateDetectorAction(),
-            new RestValidateJobConfigAction(),
-            new RestEstimateModelMemoryAction(),
-            new RestGetCategoriesAction(),
-            new RestGetModelSnapshotsAction(),
-            new RestRevertModelSnapshotAction(),
-            new RestUpdateModelSnapshotAction(),
-            new RestGetDatafeedsAction(),
-            new RestGetDatafeedStatsAction(),
-            new RestPutDatafeedAction(),
-            new RestUpdateDatafeedAction(),
-            new RestDeleteDatafeedAction(),
-            new RestPreviewDatafeedAction(),
-            new RestStartDatafeedAction(),
-            new RestStopDatafeedAction(),
-            new RestDeleteModelSnapshotAction(),
-            new RestDeleteExpiredDataAction(),
-            new RestForecastJobAction(),
-            new RestDeleteForecastAction(),
-            new RestGetCalendarsAction(),
-            new RestPutCalendarAction(),
-            new RestDeleteCalendarAction(),
-            new RestDeleteCalendarEventAction(),
-            new RestDeleteCalendarJobAction(),
-            new RestPutCalendarJobAction(),
-            new RestGetCalendarEventsAction(),
-            new RestPostCalendarEventAction(),
-            new RestSetUpgradeModeAction(),
-            new RestGetDataFrameAnalyticsAction(),
-            new RestGetDataFrameAnalyticsStatsAction(),
-            new RestPutDataFrameAnalyticsAction(),
-            new RestPostDataFrameAnalyticsUpdateAction(),
-            new RestDeleteDataFrameAnalyticsAction(),
-            new RestStartDataFrameAnalyticsAction(),
-            new RestStopDataFrameAnalyticsAction(),
-            new RestEvaluateDataFrameAction(),
-            new RestExplainDataFrameAnalyticsAction(),
-            new RestGetTrainedModelsAction(),
-            new RestDeleteTrainedModelAction(),
-            new RestGetTrainedModelsStatsAction(),
-            new RestPutTrainedModelAction(),
-            new RestUpgradeJobModelSnapshotAction(),
-            new RestGetJobModelSnapshotsUpgradeStatsAction(),
-            new RestPutTrainedModelAliasAction(),
-            new RestDeleteTrainedModelAliasAction(),
-            new RestPreviewDataFrameAnalyticsAction(),
-            new RestStartTrainedModelDeploymentAction(),
-            new RestStopTrainedModelDeploymentAction(),
-            new RestInferTrainedModelDeploymentAction(),
-            new RestUpdateTrainedModelDeploymentAction(),
-            new RestPutTrainedModelDefinitionPartAction(),
-            new RestPutTrainedModelVocabularyAction(),
-            new RestInferTrainedModelAction(),
-            new RestClearDeploymentCacheAction(),
-            // CAT Handlers
-            new RestCatJobsAction(),
-            new RestCatTrainedModelsAction(),
-            new RestCatDatafeedsAction(),
-            new RestCatDataFrameAnalyticsAction()
-        );
+        List<RestHandler> restHandlers = new ArrayList<>();
+        restHandlers.add(new RestMlInfoAction());
+        restHandlers.add(new RestMlMemoryAction());
+        restHandlers.add(new RestSetUpgradeModeAction());
+        if (machineLearningExtension.get().isAnomalyDetectionEnabled()) {
+            restHandlers.add(new RestGetJobsAction());
+            restHandlers.add(new RestGetJobStatsAction());
+            restHandlers.add(new RestPutJobAction());
+            restHandlers.add(new RestPostJobUpdateAction());
+            restHandlers.add(new RestDeleteJobAction());
+            restHandlers.add(new RestOpenJobAction());
+            restHandlers.add(new RestGetFiltersAction());
+            restHandlers.add(new RestPutFilterAction());
+            restHandlers.add(new RestUpdateFilterAction());
+            restHandlers.add(new RestDeleteFilterAction());
+            restHandlers.add(new RestGetInfluencersAction());
+            restHandlers.add(new RestGetRecordsAction());
+            restHandlers.add(new RestGetBucketsAction());
+            restHandlers.add(new RestGetOverallBucketsAction());
+            restHandlers.add(new RestPostDataAction());
+            restHandlers.add(new RestCloseJobAction());
+            restHandlers.add(new RestFlushJobAction());
+            restHandlers.add(new RestResetJobAction());
+            restHandlers.add(new RestValidateDetectorAction());
+            restHandlers.add(new RestValidateJobConfigAction());
+            restHandlers.add(new RestEstimateModelMemoryAction());
+            restHandlers.add(new RestGetCategoriesAction());
+            restHandlers.add(new RestGetModelSnapshotsAction());
+            restHandlers.add(new RestRevertModelSnapshotAction());
+            restHandlers.add(new RestUpdateModelSnapshotAction());
+            restHandlers.add(new RestGetDatafeedsAction());
+            restHandlers.add(new RestGetDatafeedStatsAction());
+            restHandlers.add(new RestPutDatafeedAction());
+            restHandlers.add(new RestUpdateDatafeedAction());
+            restHandlers.add(new RestDeleteDatafeedAction());
+            restHandlers.add(new RestPreviewDatafeedAction());
+            restHandlers.add(new RestStartDatafeedAction());
+            restHandlers.add(new RestStopDatafeedAction());
+            restHandlers.add(new RestDeleteModelSnapshotAction());
+            restHandlers.add(new RestForecastJobAction());
+            restHandlers.add(new RestDeleteForecastAction());
+            restHandlers.add(new RestGetCalendarsAction());
+            restHandlers.add(new RestPutCalendarAction());
+            restHandlers.add(new RestDeleteCalendarAction());
+            restHandlers.add(new RestDeleteCalendarEventAction());
+            restHandlers.add(new RestDeleteCalendarJobAction());
+            restHandlers.add(new RestPutCalendarJobAction());
+            restHandlers.add(new RestGetCalendarEventsAction());
+            restHandlers.add(new RestPostCalendarEventAction());
+            restHandlers.add(new RestUpgradeJobModelSnapshotAction());
+            restHandlers.add(new RestGetJobModelSnapshotsUpgradeStatsAction());
+            restHandlers.add(new RestDeleteExpiredDataAction());
+            restHandlers.add(new RestCatJobsAction());
+            restHandlers.add(new RestCatDatafeedsAction());
+        }
+        if (machineLearningExtension.get().isDataFrameAnalyticsEnabled() || machineLearningExtension.get().isNlpEnabled()) {
+            restHandlers.add(new RestGetTrainedModelsAction());
+            restHandlers.add(new RestDeleteTrainedModelAction());
+            restHandlers.add(new RestGetTrainedModelsStatsAction());
+            restHandlers.add(new RestPutTrainedModelAction());
+            restHandlers.add(new RestPutTrainedModelAliasAction());
+            restHandlers.add(new RestDeleteTrainedModelAliasAction());
+            restHandlers.add(new RestPutTrainedModelDefinitionPartAction());
+            restHandlers.add(new RestInferTrainedModelAction());
+            restHandlers.add(new RestCatTrainedModelsAction());
+            if (machineLearningExtension.get().isDataFrameAnalyticsEnabled()) {
+                restHandlers.add(new RestGetDataFrameAnalyticsAction());
+                restHandlers.add(new RestGetDataFrameAnalyticsStatsAction());
+                restHandlers.add(new RestPutDataFrameAnalyticsAction());
+                restHandlers.add(new RestPostDataFrameAnalyticsUpdateAction());
+                restHandlers.add(new RestDeleteDataFrameAnalyticsAction());
+                restHandlers.add(new RestStartDataFrameAnalyticsAction());
+                restHandlers.add(new RestStopDataFrameAnalyticsAction());
+                restHandlers.add(new RestEvaluateDataFrameAction());
+                restHandlers.add(new RestExplainDataFrameAnalyticsAction());
+                restHandlers.add(new RestPreviewDataFrameAnalyticsAction());
+                restHandlers.add(new RestCatDataFrameAnalyticsAction());
+            }
+            if (machineLearningExtension.get().isNlpEnabled()) {
+                restHandlers.add(new RestStartTrainedModelDeploymentAction());
+                restHandlers.add(new RestStopTrainedModelDeploymentAction());
+                restHandlers.add(new RestInferTrainedModelDeploymentAction());
+                restHandlers.add(new RestUpdateTrainedModelDeploymentAction());
+                restHandlers.add(new RestPutTrainedModelVocabularyAction());
+                restHandlers.add(new RestClearDeploymentCacheAction());
+            }
+        }
+        return restHandlers;
     }
 
     @Override
     public List<ActionHandler<? extends ActionRequest, ? extends ActionResponse>> getActions() {
-        var usageAction = new ActionHandler<>(XPackUsageFeatureAction.MACHINE_LEARNING, MachineLearningUsageTransportAction.class);
-        var infoAction = new ActionHandler<>(XPackInfoFeatureAction.MACHINE_LEARNING, MachineLearningInfoTransportAction.class);
+        List<ActionHandler<? extends ActionRequest, ? extends ActionResponse>> actionHandlers = new ArrayList<>();
+        actionHandlers.add(new ActionHandler<>(XPackUsageFeatureAction.MACHINE_LEARNING, MachineLearningUsageTransportAction.class));
+        actionHandlers.add(new ActionHandler<>(XPackInfoFeatureAction.MACHINE_LEARNING, MachineLearningInfoTransportAction.class));
         if (false == enabled) {
-            return List.of(usageAction, infoAction);
+            return actionHandlers;
         }
-        return List.of(
-            new ActionHandler<>(GetJobsAction.INSTANCE, TransportGetJobsAction.class),
-            new ActionHandler<>(GetJobsStatsAction.INSTANCE, TransportGetJobsStatsAction.class),
-            new ActionHandler<>(MlInfoAction.INSTANCE, TransportMlInfoAction.class),
-            new ActionHandler<>(MlMemoryAction.INSTANCE, TransportMlMemoryAction.class),
-            new ActionHandler<>(PutJobAction.INSTANCE, TransportPutJobAction.class),
-            new ActionHandler<>(UpdateJobAction.INSTANCE, TransportUpdateJobAction.class),
-            new ActionHandler<>(DeleteJobAction.INSTANCE, TransportDeleteJobAction.class),
-            new ActionHandler<>(OpenJobAction.INSTANCE, TransportOpenJobAction.class),
-            new ActionHandler<>(GetFiltersAction.INSTANCE, TransportGetFiltersAction.class),
-            new ActionHandler<>(PutFilterAction.INSTANCE, TransportPutFilterAction.class),
-            new ActionHandler<>(UpdateFilterAction.INSTANCE, TransportUpdateFilterAction.class),
-            new ActionHandler<>(DeleteFilterAction.INSTANCE, TransportDeleteFilterAction.class),
-            new ActionHandler<>(KillProcessAction.INSTANCE, TransportKillProcessAction.class),
-            new ActionHandler<>(GetBucketsAction.INSTANCE, TransportGetBucketsAction.class),
-            new ActionHandler<>(GetInfluencersAction.INSTANCE, TransportGetInfluencersAction.class),
-            new ActionHandler<>(GetOverallBucketsAction.INSTANCE, TransportGetOverallBucketsAction.class),
-            new ActionHandler<>(GetRecordsAction.INSTANCE, TransportGetRecordsAction.class),
-            new ActionHandler<>(PostDataAction.INSTANCE, TransportPostDataAction.class),
-            new ActionHandler<>(CloseJobAction.INSTANCE, TransportCloseJobAction.class),
-            new ActionHandler<>(FinalizeJobExecutionAction.INSTANCE, TransportFinalizeJobExecutionAction.class),
-            new ActionHandler<>(FlushJobAction.INSTANCE, TransportFlushJobAction.class),
-            new ActionHandler<>(ResetJobAction.INSTANCE, TransportResetJobAction.class),
-            new ActionHandler<>(ValidateDetectorAction.INSTANCE, TransportValidateDetectorAction.class),
-            new ActionHandler<>(ValidateJobConfigAction.INSTANCE, TransportValidateJobConfigAction.class),
-            new ActionHandler<>(EstimateModelMemoryAction.INSTANCE, TransportEstimateModelMemoryAction.class),
-            new ActionHandler<>(GetCategoriesAction.INSTANCE, TransportGetCategoriesAction.class),
-            new ActionHandler<>(GetModelSnapshotsAction.INSTANCE, TransportGetModelSnapshotsAction.class),
-            new ActionHandler<>(RevertModelSnapshotAction.INSTANCE, TransportRevertModelSnapshotAction.class),
-            new ActionHandler<>(UpdateModelSnapshotAction.INSTANCE, TransportUpdateModelSnapshotAction.class),
-            new ActionHandler<>(GetDatafeedsAction.INSTANCE, TransportGetDatafeedsAction.class),
-            new ActionHandler<>(GetDatafeedsStatsAction.INSTANCE, TransportGetDatafeedsStatsAction.class),
-            new ActionHandler<>(PutDatafeedAction.INSTANCE, TransportPutDatafeedAction.class),
-            new ActionHandler<>(UpdateDatafeedAction.INSTANCE, TransportUpdateDatafeedAction.class),
-            new ActionHandler<>(DeleteDatafeedAction.INSTANCE, TransportDeleteDatafeedAction.class),
-            new ActionHandler<>(PreviewDatafeedAction.INSTANCE, TransportPreviewDatafeedAction.class),
-            new ActionHandler<>(StartDatafeedAction.INSTANCE, TransportStartDatafeedAction.class),
-            new ActionHandler<>(StopDatafeedAction.INSTANCE, TransportStopDatafeedAction.class),
-            new ActionHandler<>(IsolateDatafeedAction.INSTANCE, TransportIsolateDatafeedAction.class),
-            new ActionHandler<>(DeleteModelSnapshotAction.INSTANCE, TransportDeleteModelSnapshotAction.class),
-            new ActionHandler<>(UpdateProcessAction.INSTANCE, TransportUpdateProcessAction.class),
-            new ActionHandler<>(DeleteExpiredDataAction.INSTANCE, TransportDeleteExpiredDataAction.class),
-            new ActionHandler<>(ForecastJobAction.INSTANCE, TransportForecastJobAction.class),
-            new ActionHandler<>(DeleteForecastAction.INSTANCE, TransportDeleteForecastAction.class),
-            new ActionHandler<>(GetCalendarsAction.INSTANCE, TransportGetCalendarsAction.class),
-            new ActionHandler<>(PutCalendarAction.INSTANCE, TransportPutCalendarAction.class),
-            new ActionHandler<>(DeleteCalendarAction.INSTANCE, TransportDeleteCalendarAction.class),
-            new ActionHandler<>(DeleteCalendarEventAction.INSTANCE, TransportDeleteCalendarEventAction.class),
-            new ActionHandler<>(UpdateCalendarJobAction.INSTANCE, TransportUpdateCalendarJobAction.class),
-            new ActionHandler<>(GetCalendarEventsAction.INSTANCE, TransportGetCalendarEventsAction.class),
-            new ActionHandler<>(PostCalendarEventsAction.INSTANCE, TransportPostCalendarEventsAction.class),
-            new ActionHandler<>(PersistJobAction.INSTANCE, TransportPersistJobAction.class),
-            new ActionHandler<>(SetUpgradeModeAction.INSTANCE, TransportSetUpgradeModeAction.class),
-            new ActionHandler<>(GetDataFrameAnalyticsAction.INSTANCE, TransportGetDataFrameAnalyticsAction.class),
-            new ActionHandler<>(GetDataFrameAnalyticsStatsAction.INSTANCE, TransportGetDataFrameAnalyticsStatsAction.class),
-            new ActionHandler<>(PutDataFrameAnalyticsAction.INSTANCE, TransportPutDataFrameAnalyticsAction.class),
-            new ActionHandler<>(UpdateDataFrameAnalyticsAction.INSTANCE, TransportUpdateDataFrameAnalyticsAction.class),
-            new ActionHandler<>(DeleteDataFrameAnalyticsAction.INSTANCE, TransportDeleteDataFrameAnalyticsAction.class),
-            new ActionHandler<>(StartDataFrameAnalyticsAction.INSTANCE, TransportStartDataFrameAnalyticsAction.class),
-            new ActionHandler<>(StopDataFrameAnalyticsAction.INSTANCE, TransportStopDataFrameAnalyticsAction.class),
-            new ActionHandler<>(EvaluateDataFrameAction.INSTANCE, TransportEvaluateDataFrameAction.class),
-            new ActionHandler<>(ExplainDataFrameAnalyticsAction.INSTANCE, TransportExplainDataFrameAnalyticsAction.class),
-            new ActionHandler<>(InferModelAction.INSTANCE, TransportInternalInferModelAction.class),
-            new ActionHandler<>(InferModelAction.EXTERNAL_INSTANCE, TransportExternalInferModelAction.class),
-            new ActionHandler<>(TrainedModelCacheInfoAction.INSTANCE, TransportTrainedModelCacheInfoAction.class),
-            new ActionHandler<>(GetTrainedModelsAction.INSTANCE, TransportGetTrainedModelsAction.class),
-            new ActionHandler<>(DeleteTrainedModelAction.INSTANCE, TransportDeleteTrainedModelAction.class),
-            new ActionHandler<>(GetTrainedModelsStatsAction.INSTANCE, TransportGetTrainedModelsStatsAction.class),
-            new ActionHandler<>(PutTrainedModelAction.INSTANCE, TransportPutTrainedModelAction.class),
-            new ActionHandler<>(UpgradeJobModelSnapshotAction.INSTANCE, TransportUpgradeJobModelSnapshotAction.class),
-            new ActionHandler<>(CancelJobModelSnapshotUpgradeAction.INSTANCE, TransportCancelJobModelSnapshotUpgradeAction.class),
-            new ActionHandler<>(GetJobModelSnapshotsUpgradeStatsAction.INSTANCE, TransportGetJobModelSnapshotsUpgradeStatsAction.class),
-            new ActionHandler<>(PutTrainedModelAliasAction.INSTANCE, TransportPutTrainedModelAliasAction.class),
-            new ActionHandler<>(DeleteTrainedModelAliasAction.INSTANCE, TransportDeleteTrainedModelAliasAction.class),
-            new ActionHandler<>(PreviewDataFrameAnalyticsAction.INSTANCE, TransportPreviewDataFrameAnalyticsAction.class),
-            new ActionHandler<>(SetResetModeAction.INSTANCE, TransportSetResetModeAction.class),
-            new ActionHandler<>(StartTrainedModelDeploymentAction.INSTANCE, TransportStartTrainedModelDeploymentAction.class),
-            new ActionHandler<>(StopTrainedModelDeploymentAction.INSTANCE, TransportStopTrainedModelDeploymentAction.class),
-            new ActionHandler<>(InferTrainedModelDeploymentAction.INSTANCE, TransportInferTrainedModelDeploymentAction.class),
-            new ActionHandler<>(UpdateTrainedModelDeploymentAction.INSTANCE, TransportUpdateTrainedModelDeploymentAction.class),
-            new ActionHandler<>(GetDeploymentStatsAction.INSTANCE, TransportGetDeploymentStatsAction.class),
-            new ActionHandler<>(GetDatafeedRunningStateAction.INSTANCE, TransportGetDatafeedRunningStateAction.class),
-            new ActionHandler<>(CreateTrainedModelAssignmentAction.INSTANCE, TransportCreateTrainedModelAssignmentAction.class),
-            new ActionHandler<>(DeleteTrainedModelAssignmentAction.INSTANCE, TransportDeleteTrainedModelAssignmentAction.class),
-            new ActionHandler<>(PutTrainedModelDefinitionPartAction.INSTANCE, TransportPutTrainedModelDefinitionPartAction.class),
-            new ActionHandler<>(PutTrainedModelVocabularyAction.INSTANCE, TransportPutTrainedModelVocabularyAction.class),
-            new ActionHandler<>(
-                UpdateTrainedModelAssignmentRoutingInfoAction.INSTANCE,
-                TransportUpdateTrainedModelAssignmentStateAction.class
-            ),
-            new ActionHandler<>(ClearDeploymentCacheAction.INSTANCE, TransportClearDeploymentCacheAction.class),
-            usageAction,
-            infoAction
-        );
+        actionHandlers.add(new ActionHandler<>(MlInfoAction.INSTANCE, TransportMlInfoAction.class));
+        actionHandlers.add(new ActionHandler<>(MlMemoryAction.INSTANCE, TransportMlMemoryAction.class));
+        actionHandlers.add(new ActionHandler<>(SetUpgradeModeAction.INSTANCE, TransportSetUpgradeModeAction.class));
+        actionHandlers.add(new ActionHandler<>(SetResetModeAction.INSTANCE, TransportSetResetModeAction.class));
+        if (machineLearningExtension.get().isAnomalyDetectionEnabled()) {
+            actionHandlers.add(new ActionHandler<>(GetJobsAction.INSTANCE, TransportGetJobsAction.class));
+            actionHandlers.add(new ActionHandler<>(GetJobsStatsAction.INSTANCE, TransportGetJobsStatsAction.class));
+            actionHandlers.add(new ActionHandler<>(PutJobAction.INSTANCE, TransportPutJobAction.class));
+            actionHandlers.add(new ActionHandler<>(UpdateJobAction.INSTANCE, TransportUpdateJobAction.class));
+            actionHandlers.add(new ActionHandler<>(DeleteJobAction.INSTANCE, TransportDeleteJobAction.class));
+            actionHandlers.add(new ActionHandler<>(OpenJobAction.INSTANCE, TransportOpenJobAction.class));
+            actionHandlers.add(new ActionHandler<>(GetFiltersAction.INSTANCE, TransportGetFiltersAction.class));
+            actionHandlers.add(new ActionHandler<>(PutFilterAction.INSTANCE, TransportPutFilterAction.class));
+            actionHandlers.add(new ActionHandler<>(UpdateFilterAction.INSTANCE, TransportUpdateFilterAction.class));
+            actionHandlers.add(new ActionHandler<>(DeleteFilterAction.INSTANCE, TransportDeleteFilterAction.class));
+            actionHandlers.add(new ActionHandler<>(KillProcessAction.INSTANCE, TransportKillProcessAction.class));
+            actionHandlers.add(new ActionHandler<>(GetBucketsAction.INSTANCE, TransportGetBucketsAction.class));
+            actionHandlers.add(new ActionHandler<>(GetInfluencersAction.INSTANCE, TransportGetInfluencersAction.class));
+            actionHandlers.add(new ActionHandler<>(GetOverallBucketsAction.INSTANCE, TransportGetOverallBucketsAction.class));
+            actionHandlers.add(new ActionHandler<>(GetRecordsAction.INSTANCE, TransportGetRecordsAction.class));
+            actionHandlers.add(new ActionHandler<>(PostDataAction.INSTANCE, TransportPostDataAction.class));
+            actionHandlers.add(new ActionHandler<>(CloseJobAction.INSTANCE, TransportCloseJobAction.class));
+            actionHandlers.add(new ActionHandler<>(FinalizeJobExecutionAction.INSTANCE, TransportFinalizeJobExecutionAction.class));
+            actionHandlers.add(new ActionHandler<>(FlushJobAction.INSTANCE, TransportFlushJobAction.class));
+            actionHandlers.add(new ActionHandler<>(ResetJobAction.INSTANCE, TransportResetJobAction.class));
+            actionHandlers.add(new ActionHandler<>(ValidateDetectorAction.INSTANCE, TransportValidateDetectorAction.class));
+            actionHandlers.add(new ActionHandler<>(ValidateJobConfigAction.INSTANCE, TransportValidateJobConfigAction.class));
+            actionHandlers.add(new ActionHandler<>(EstimateModelMemoryAction.INSTANCE, TransportEstimateModelMemoryAction.class));
+            actionHandlers.add(new ActionHandler<>(GetCategoriesAction.INSTANCE, TransportGetCategoriesAction.class));
+            actionHandlers.add(new ActionHandler<>(GetModelSnapshotsAction.INSTANCE, TransportGetModelSnapshotsAction.class));
+            actionHandlers.add(new ActionHandler<>(RevertModelSnapshotAction.INSTANCE, TransportRevertModelSnapshotAction.class));
+            actionHandlers.add(new ActionHandler<>(UpdateModelSnapshotAction.INSTANCE, TransportUpdateModelSnapshotAction.class));
+            actionHandlers.add(new ActionHandler<>(GetDatafeedsAction.INSTANCE, TransportGetDatafeedsAction.class));
+            actionHandlers.add(new ActionHandler<>(GetDatafeedsStatsAction.INSTANCE, TransportGetDatafeedsStatsAction.class));
+            actionHandlers.add(new ActionHandler<>(PutDatafeedAction.INSTANCE, TransportPutDatafeedAction.class));
+            actionHandlers.add(new ActionHandler<>(UpdateDatafeedAction.INSTANCE, TransportUpdateDatafeedAction.class));
+            actionHandlers.add(new ActionHandler<>(DeleteDatafeedAction.INSTANCE, TransportDeleteDatafeedAction.class));
+            actionHandlers.add(new ActionHandler<>(PreviewDatafeedAction.INSTANCE, TransportPreviewDatafeedAction.class));
+            actionHandlers.add(new ActionHandler<>(StartDatafeedAction.INSTANCE, TransportStartDatafeedAction.class));
+            actionHandlers.add(new ActionHandler<>(StopDatafeedAction.INSTANCE, TransportStopDatafeedAction.class));
+            actionHandlers.add(new ActionHandler<>(IsolateDatafeedAction.INSTANCE, TransportIsolateDatafeedAction.class));
+            actionHandlers.add(new ActionHandler<>(DeleteModelSnapshotAction.INSTANCE, TransportDeleteModelSnapshotAction.class));
+            actionHandlers.add(new ActionHandler<>(UpdateProcessAction.INSTANCE, TransportUpdateProcessAction.class));
+            actionHandlers.add(new ActionHandler<>(ForecastJobAction.INSTANCE, TransportForecastJobAction.class));
+            actionHandlers.add(new ActionHandler<>(DeleteForecastAction.INSTANCE, TransportDeleteForecastAction.class));
+            actionHandlers.add(new ActionHandler<>(GetCalendarsAction.INSTANCE, TransportGetCalendarsAction.class));
+            actionHandlers.add(new ActionHandler<>(PutCalendarAction.INSTANCE, TransportPutCalendarAction.class));
+            actionHandlers.add(new ActionHandler<>(DeleteCalendarAction.INSTANCE, TransportDeleteCalendarAction.class));
+            actionHandlers.add(new ActionHandler<>(DeleteCalendarEventAction.INSTANCE, TransportDeleteCalendarEventAction.class));
+            actionHandlers.add(new ActionHandler<>(UpdateCalendarJobAction.INSTANCE, TransportUpdateCalendarJobAction.class));
+            actionHandlers.add(new ActionHandler<>(GetCalendarEventsAction.INSTANCE, TransportGetCalendarEventsAction.class));
+            actionHandlers.add(new ActionHandler<>(PostCalendarEventsAction.INSTANCE, TransportPostCalendarEventsAction.class));
+            actionHandlers.add(new ActionHandler<>(PersistJobAction.INSTANCE, TransportPersistJobAction.class));
+            actionHandlers.add(new ActionHandler<>(UpgradeJobModelSnapshotAction.INSTANCE, TransportUpgradeJobModelSnapshotAction.class));
+            actionHandlers.add(
+                new ActionHandler<>(CancelJobModelSnapshotUpgradeAction.INSTANCE, TransportCancelJobModelSnapshotUpgradeAction.class)
+            );
+            actionHandlers.add(
+                new ActionHandler<>(GetJobModelSnapshotsUpgradeStatsAction.INSTANCE, TransportGetJobModelSnapshotsUpgradeStatsAction.class)
+            );
+            actionHandlers.add(new ActionHandler<>(GetDatafeedRunningStateAction.INSTANCE, TransportGetDatafeedRunningStateAction.class));
+            actionHandlers.add(new ActionHandler<>(DeleteExpiredDataAction.INSTANCE, TransportDeleteExpiredDataAction.class));
+        }
+        if (machineLearningExtension.get().isDataFrameAnalyticsEnabled() || machineLearningExtension.get().isNlpEnabled()) {
+            actionHandlers.add(new ActionHandler<>(GetTrainedModelsAction.INSTANCE, TransportGetTrainedModelsAction.class));
+            actionHandlers.add(new ActionHandler<>(DeleteTrainedModelAction.INSTANCE, TransportDeleteTrainedModelAction.class));
+            actionHandlers.add(new ActionHandler<>(GetTrainedModelsStatsAction.INSTANCE, TransportGetTrainedModelsStatsAction.class));
+            actionHandlers.add(new ActionHandler<>(PutTrainedModelAction.INSTANCE, TransportPutTrainedModelAction.class));
+            actionHandlers.add(new ActionHandler<>(PutTrainedModelAliasAction.INSTANCE, TransportPutTrainedModelAliasAction.class));
+            actionHandlers.add(new ActionHandler<>(DeleteTrainedModelAliasAction.INSTANCE, TransportDeleteTrainedModelAliasAction.class));
+            actionHandlers.add(
+                new ActionHandler<>(PutTrainedModelDefinitionPartAction.INSTANCE, TransportPutTrainedModelDefinitionPartAction.class)
+            );
+            actionHandlers.add(new ActionHandler<>(InferModelAction.INSTANCE, TransportInternalInferModelAction.class));
+            actionHandlers.add(new ActionHandler<>(InferModelAction.EXTERNAL_INSTANCE, TransportExternalInferModelAction.class));
+            if (machineLearningExtension.get().isDataFrameAnalyticsEnabled()) {
+                actionHandlers.add(new ActionHandler<>(GetDataFrameAnalyticsAction.INSTANCE, TransportGetDataFrameAnalyticsAction.class));
+                actionHandlers.add(
+                    new ActionHandler<>(GetDataFrameAnalyticsStatsAction.INSTANCE, TransportGetDataFrameAnalyticsStatsAction.class)
+                );
+                actionHandlers.add(new ActionHandler<>(PutDataFrameAnalyticsAction.INSTANCE, TransportPutDataFrameAnalyticsAction.class));
+                actionHandlers.add(
+                    new ActionHandler<>(UpdateDataFrameAnalyticsAction.INSTANCE, TransportUpdateDataFrameAnalyticsAction.class)
+                );
+                actionHandlers.add(
+                    new ActionHandler<>(DeleteDataFrameAnalyticsAction.INSTANCE, TransportDeleteDataFrameAnalyticsAction.class)
+                );
+                actionHandlers.add(
+                    new ActionHandler<>(StartDataFrameAnalyticsAction.INSTANCE, TransportStartDataFrameAnalyticsAction.class)
+                );
+                actionHandlers.add(new ActionHandler<>(StopDataFrameAnalyticsAction.INSTANCE, TransportStopDataFrameAnalyticsAction.class));
+                actionHandlers.add(new ActionHandler<>(EvaluateDataFrameAction.INSTANCE, TransportEvaluateDataFrameAction.class));
+                actionHandlers.add(
+                    new ActionHandler<>(ExplainDataFrameAnalyticsAction.INSTANCE, TransportExplainDataFrameAnalyticsAction.class)
+                );
+                actionHandlers.add(new ActionHandler<>(TrainedModelCacheInfoAction.INSTANCE, TransportTrainedModelCacheInfoAction.class));
+                actionHandlers.add(
+                    new ActionHandler<>(PreviewDataFrameAnalyticsAction.INSTANCE, TransportPreviewDataFrameAnalyticsAction.class)
+                );
+            }
+            if (machineLearningExtension.get().isNlpEnabled()) {
+                actionHandlers.add(
+                    new ActionHandler<>(StartTrainedModelDeploymentAction.INSTANCE, TransportStartTrainedModelDeploymentAction.class)
+                );
+                actionHandlers.add(
+                    new ActionHandler<>(StopTrainedModelDeploymentAction.INSTANCE, TransportStopTrainedModelDeploymentAction.class)
+                );
+                actionHandlers.add(
+                    new ActionHandler<>(InferTrainedModelDeploymentAction.INSTANCE, TransportInferTrainedModelDeploymentAction.class)
+                );
+                actionHandlers.add(
+                    new ActionHandler<>(UpdateTrainedModelDeploymentAction.INSTANCE, TransportUpdateTrainedModelDeploymentAction.class)
+                );
+                actionHandlers.add(
+                    new ActionHandler<>(PutTrainedModelVocabularyAction.INSTANCE, TransportPutTrainedModelVocabularyAction.class)
+                );
+                actionHandlers.add(new ActionHandler<>(ClearDeploymentCacheAction.INSTANCE, TransportClearDeploymentCacheAction.class));
+                actionHandlers.add(new ActionHandler<>(GetDeploymentStatsAction.INSTANCE, TransportGetDeploymentStatsAction.class));
+                actionHandlers.add(
+                    new ActionHandler<>(CreateTrainedModelAssignmentAction.INSTANCE, TransportCreateTrainedModelAssignmentAction.class)
+                );
+                actionHandlers.add(
+                    new ActionHandler<>(DeleteTrainedModelAssignmentAction.INSTANCE, TransportDeleteTrainedModelAssignmentAction.class)
+                );
+                actionHandlers.add(
+                    new ActionHandler<>(
+                        UpdateTrainedModelAssignmentRoutingInfoAction.INSTANCE,
+                        TransportUpdateTrainedModelAssignmentStateAction.class
+                    )
+                );
+            }
+        }
+        return actionHandlers;
     }
 
     @Override
     public List<ActionFilter> getActionFilters() {
         if (enabled == false) {
-            return emptyList();
+            return List.of();
         }
 
-        return singletonList(this.mlUpgradeModeActionFilter.get());
+        return List.of(this.mlUpgradeModeActionFilter.get());
     }
 
     @Override
     public List<ExecutorBuilder<?>> getExecutorBuilders(Settings unused) {
         if (false == enabled) {
-            return emptyList();
+            return List.of();
         }
 
         // These thread pools scale such that they can accommodate the maximum number of jobs per node

+ 6 - 0
x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningExtension.java

@@ -12,4 +12,10 @@ public interface MachineLearningExtension {
     boolean useIlm();
 
     boolean includeNodeInfo();
+
+    boolean isAnomalyDetectionEnabled();
+
+    boolean isDataFrameAnalyticsEnabled();
+
+    boolean isNlpEnabled();
 }

+ 75 - 33
x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceService.java

@@ -52,7 +52,7 @@ import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin;
  */
 public class MlDailyMaintenanceService implements Releasable {
 
-    private static final Logger LOGGER = LogManager.getLogger(MlDailyMaintenanceService.class);
+    private static final Logger logger = LogManager.getLogger(MlDailyMaintenanceService.class);
 
     private static final int MAX_TIME_OFFSET_MINUTES = 120;
 
@@ -67,6 +67,10 @@ public class MlDailyMaintenanceService implements Releasable {
      */
     private final Supplier<TimeValue> schedulerProvider;
 
+    private final boolean isAnomalyDetectionEnabled;
+    private final boolean isDataFrameAnalyticsEnabled;
+    private final boolean isNlpEnabled;
+
     private volatile Scheduler.Cancellable cancellable;
     private volatile float deleteExpiredDataRequestsPerSecond;
 
@@ -76,7 +80,10 @@ public class MlDailyMaintenanceService implements Releasable {
         Client client,
         ClusterService clusterService,
         MlAssignmentNotifier mlAssignmentNotifier,
-        Supplier<TimeValue> scheduleProvider
+        Supplier<TimeValue> scheduleProvider,
+        boolean isAnomalyDetectionEnabled,
+        boolean isDataFrameAnalyticsEnabled,
+        boolean isNlpEnabled
     ) {
         this.threadPool = Objects.requireNonNull(threadPool);
         this.client = Objects.requireNonNull(client);
@@ -84,6 +91,9 @@ public class MlDailyMaintenanceService implements Releasable {
         this.mlAssignmentNotifier = Objects.requireNonNull(mlAssignmentNotifier);
         this.schedulerProvider = Objects.requireNonNull(scheduleProvider);
         this.deleteExpiredDataRequestsPerSecond = MachineLearning.NIGHTLY_MAINTENANCE_REQUESTS_PER_SECOND.get(settings);
+        this.isAnomalyDetectionEnabled = isAnomalyDetectionEnabled;
+        this.isDataFrameAnalyticsEnabled = isDataFrameAnalyticsEnabled;
+        this.isNlpEnabled = isNlpEnabled;
     }
 
     public MlDailyMaintenanceService(
@@ -92,9 +102,22 @@ public class MlDailyMaintenanceService implements Releasable {
         ThreadPool threadPool,
         Client client,
         ClusterService clusterService,
-        MlAssignmentNotifier mlAssignmentNotifier
+        MlAssignmentNotifier mlAssignmentNotifier,
+        boolean isAnomalyDetectionEnabled,
+        boolean isDataFrameAnalyticsEnabled,
+        boolean isNlpEnabled
     ) {
-        this(settings, threadPool, client, clusterService, mlAssignmentNotifier, () -> delayToNextTime(clusterName));
+        this(
+            settings,
+            threadPool,
+            client,
+            clusterService,
+            mlAssignmentNotifier,
+            () -> delayToNextTime(clusterName),
+            isAnomalyDetectionEnabled,
+            isDataFrameAnalyticsEnabled,
+            isNlpEnabled
+        );
     }
 
     void setDeleteExpiredDataRequestsPerSecond(float value) {
@@ -121,12 +144,12 @@ public class MlDailyMaintenanceService implements Releasable {
     }
 
     public synchronized void start() {
-        LOGGER.debug("Starting ML daily maintenance service");
+        logger.debug("Starting ML daily maintenance service");
         scheduleNext();
     }
 
     public synchronized void stop() {
-        LOGGER.debug("Stopping ML daily maintenance service");
+        logger.debug("Stopping ML daily maintenance service");
         if (cancellable != null && cancellable.isCancelled() == false) {
             cancellable.cancel();
         }
@@ -146,7 +169,7 @@ public class MlDailyMaintenanceService implements Releasable {
             cancellable = threadPool.schedule(this::triggerTasks, schedulerProvider.get(), ThreadPool.Names.GENERIC);
         } catch (EsRejectedExecutionException e) {
             if (e.isExecutorShutdown()) {
-                LOGGER.debug("failed to schedule next maintenance task; shutting down", e);
+                logger.debug("failed to schedule next maintenance task; shutting down", e);
             } else {
                 throw e;
             }
@@ -156,47 +179,66 @@ public class MlDailyMaintenanceService implements Releasable {
     private void triggerTasks() {
         try {
             if (MlMetadata.getMlMetadata(clusterService.state()).isUpgradeMode()) {
-                LOGGER.warn("skipping scheduled [ML] maintenance tasks because upgrade mode is enabled");
+                logger.warn("skipping scheduled [ML] maintenance tasks because upgrade mode is enabled");
                 return;
             }
             if (MlMetadata.getMlMetadata(clusterService.state()).isResetMode()) {
-                LOGGER.warn("skipping scheduled [ML] maintenance tasks because machine learning feature reset is in progress");
+                logger.warn("skipping scheduled [ML] maintenance tasks because machine learning feature reset is in progress");
                 return;
             }
-            LOGGER.info("triggering scheduled [ML] maintenance tasks");
-
-            // Step 3: Log any error that could have happened
-            ActionListener<AcknowledgedResponse> finalListener = ActionListener.wrap(
-                unused -> {},
-                e -> LOGGER.error("An error occurred during [ML] maintenance tasks execution", e)
-            );
-
-            // Step 2: Delete expired data
-            ActionListener<AcknowledgedResponse> deleteJobsListener = ActionListener.wrap(
-                unused -> triggerDeleteExpiredDataTask(finalListener),
-                e -> {
-                    LOGGER.info("[ML] maintenance task: triggerDeleteJobsInStateDeletingWithoutDeletionTask failed", e);
-                    // Note: Steps 1 and 2 are independent of each other and step 2 is executed even if step 1 failed.
-                    triggerDeleteExpiredDataTask(finalListener);
-                }
-            );
-
-            // Step 1: Delete jobs that are in deleting state
-            triggerDeleteJobsInStateDeletingWithoutDeletionTask(deleteJobsListener);
+            logger.info("triggering scheduled [ML] maintenance tasks");
 
+            if (isAnomalyDetectionEnabled) {
+                triggerAnomalyDetectionMaintenance();
+            }
+            if (isDataFrameAnalyticsEnabled) {
+                triggerDataFrameAnalyticsMaintenance();
+            }
+            if (isNlpEnabled) {
+                triggerNlpMaintenance();
+            }
             auditUnassignedMlTasks();
         } finally {
             scheduleNext();
         }
     }
 
+    private void triggerAnomalyDetectionMaintenance() {
+        // Step 3: Log any error that could have happened
+        ActionListener<AcknowledgedResponse> finalListener = ActionListener.wrap(
+            unused -> {},
+            e -> logger.error("An error occurred during [ML] maintenance tasks execution", e)
+        );
+
+        // Step 2: Delete expired data
+        ActionListener<AcknowledgedResponse> deleteJobsListener = ActionListener.wrap(
+            unused -> triggerDeleteExpiredDataTask(finalListener),
+            e -> {
+                logger.info("[ML] maintenance task: triggerDeleteJobsInStateDeletingWithoutDeletionTask failed", e);
+                // Note: Steps 1 and 2 are independent of each other and step 2 is executed even if step 1 failed.
+                triggerDeleteExpiredDataTask(finalListener);
+            }
+        );
+
+        // Step 1: Delete jobs that are in deleting state
+        triggerDeleteJobsInStateDeletingWithoutDeletionTask(deleteJobsListener);
+    }
+
+    private void triggerDataFrameAnalyticsMaintenance() {
+        // Currently a NOOP
+    }
+
+    private void triggerNlpMaintenance() {
+        // Currently a NOOP
+    }
+
     private void triggerDeleteExpiredDataTask(ActionListener<AcknowledgedResponse> finalListener) {
         ActionListener<DeleteExpiredDataAction.Response> deleteExpiredDataActionListener = ActionListener.wrap(
             deleteExpiredDataResponse -> {
                 if (deleteExpiredDataResponse.isDeleted()) {
-                    LOGGER.info("Successfully completed [ML] maintenance task: triggerDeleteExpiredDataTask");
+                    logger.info("Successfully completed [ML] maintenance task: triggerDeleteExpiredDataTask");
                 } else {
-                    LOGGER.info("Halting [ML] maintenance tasks before completion as elapsed time is too great");
+                    logger.info("Halting [ML] maintenance tasks before completion as elapsed time is too great");
                 }
                 finalListener.onResponse(AcknowledgedResponse.TRUE);
             },
@@ -224,9 +266,9 @@ public class MlDailyMaintenanceService implements Releasable {
                     .map(DeleteJobAction.Request::getJobId)
                     .collect(toList());
                 if (jobIds.isEmpty()) {
-                    LOGGER.info("Successfully completed [ML] maintenance task: triggerDeleteJobsInStateDeletingWithoutDeletionTask");
+                    logger.info("Successfully completed [ML] maintenance task: triggerDeleteJobsInStateDeletingWithoutDeletionTask");
                 } else {
-                    LOGGER.info("The following ML jobs could not be deleted: [" + String.join(",", jobIds) + "]");
+                    logger.info("The following ML jobs could not be deleted: [" + String.join(",", jobIds) + "]");
                 }
                 finalListener.onResponse(AcknowledgedResponse.TRUE);
             },

+ 8 - 2
x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlInitializationService.java

@@ -62,7 +62,10 @@ public class MlInitializationService implements ClusterStateListener {
         ThreadPool threadPool,
         ClusterService clusterService,
         Client client,
-        MlAssignmentNotifier mlAssignmentNotifier
+        MlAssignmentNotifier mlAssignmentNotifier,
+        boolean isAnomalyDetectionEnabled,
+        boolean isDataFrameAnalyticsEnabled,
+        boolean isNlpEnabled
     ) {
         this(
             client,
@@ -73,7 +76,10 @@ public class MlInitializationService implements ClusterStateListener {
                 threadPool,
                 client,
                 clusterService,
-                mlAssignmentNotifier
+                mlAssignmentNotifier,
+                isAnomalyDetectionEnabled,
+                isDataFrameAnalyticsEnabled,
+                isNlpEnabled
             ),
             clusterService
         );

+ 1 - 1
x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAction.java

@@ -134,7 +134,7 @@ public class TransportDeleteTrainedModelAction extends AcknowledgedTransportMast
             }
         }
 
-        if (TrainedModelAssignmentMetadata.fromState(state).isAssigned(request.getId())) {
+        if (TrainedModelAssignmentMetadata.fromState(state).modelIsDeployed(request.getId())) {
             if (request.isForce()) {
                 forceStopDeployment(
                     request.getId(),

+ 1 - 0
x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsAction.java

@@ -144,6 +144,7 @@ public class TransportGetTrainedModelsAction extends HandledTransportAction<Requ
             new HashSet<>(request.getTags()),
             ModelAliasMetadata.fromState(clusterService.state()),
             parentTaskId,
+            Collections.emptySet(),
             idExpansionListener
         );
     }

+ 147 - 37
x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsAction.java

@@ -6,7 +6,10 @@
  */
 package org.elasticsearch.xpack.ml.action;
 
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
 import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.StepListener;
 import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
 import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsAction;
 import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest;
@@ -18,6 +21,7 @@ import org.elasticsearch.action.support.HandledTransportAction;
 import org.elasticsearch.client.internal.Client;
 import org.elasticsearch.cluster.ClusterState;
 import org.elasticsearch.cluster.service.ClusterService;
+import org.elasticsearch.common.Strings;
 import org.elasticsearch.common.document.DocumentField;
 import org.elasticsearch.common.inject.Inject;
 import org.elasticsearch.common.metrics.CounterMetric;
@@ -32,6 +36,7 @@ import org.elasticsearch.search.sort.SortOrder;
 import org.elasticsearch.tasks.Task;
 import org.elasticsearch.tasks.TaskId;
 import org.elasticsearch.transport.TransportService;
+import org.elasticsearch.xpack.core.action.util.ExpandedIdsMatcher;
 import org.elasticsearch.xpack.core.ml.action.GetDeploymentStatsAction;
 import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsAction;
 import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsStatsAction;
@@ -43,12 +48,15 @@ import org.elasticsearch.xpack.core.ml.inference.persistence.InferenceIndexConst
 import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceStats;
 import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TrainedModelSizeStats;
 import org.elasticsearch.xpack.ml.inference.ModelAliasMetadata;
+import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata;
 import org.elasticsearch.xpack.ml.inference.persistence.TrainedModelDefinitionDoc;
 import org.elasticsearch.xpack.ml.inference.persistence.TrainedModelProvider;
 
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
@@ -59,12 +67,14 @@ import java.util.stream.Stream;
 
 import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN;
 import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin;
-import static org.elasticsearch.xpack.ml.utils.InferenceProcessorInfoExtractor.pipelineIdsByModelIdsOrAliases;
+import static org.elasticsearch.xpack.ml.utils.InferenceProcessorInfoExtractor.pipelineIdsByResource;
 
 public class TransportGetTrainedModelsStatsAction extends HandledTransportAction<
     GetTrainedModelsStatsAction.Request,
     GetTrainedModelsStatsAction.Response> {
 
+    private static final Logger logger = LogManager.getLogger(TransportGetTrainedModelsStatsAction.class);
+
     private final Client client;
     private final ClusterService clusterService;
     private final TrainedModelProvider trainedModelProvider;
@@ -90,81 +100,175 @@ public class TransportGetTrainedModelsStatsAction extends HandledTransportAction
         ActionListener<GetTrainedModelsStatsAction.Response> listener
     ) {
         final TaskId parentTaskId = new TaskId(clusterService.localNode().getId(), task.getId());
-        final ModelAliasMetadata currentMetadata = ModelAliasMetadata.fromState(clusterService.state());
+        final ModelAliasMetadata modelAliasMetadata = ModelAliasMetadata.fromState(clusterService.state());
+        final TrainedModelAssignmentMetadata assignmentMetadata = TrainedModelAssignmentMetadata.fromState(clusterService.state());
+        final Set<String> matchedDeploymentIds = matchedDeploymentIds(request.getResourceId(), assignmentMetadata);
+
         GetTrainedModelsStatsAction.Response.Builder responseBuilder = new GetTrainedModelsStatsAction.Response.Builder();
 
-        ActionListener<Map<String, TrainedModelSizeStats>> modelSizeStatsListener = ActionListener.wrap(modelSizeStatsByModelId -> {
+        StepListener<Map<String, TrainedModelSizeStats>> modelSizeStatsListener = new StepListener<>();
+        modelSizeStatsListener.whenComplete(modelSizeStatsByModelId -> {
             responseBuilder.setModelSizeStatsByModelId(modelSizeStatsByModelId);
-            listener.onResponse(responseBuilder.build());
+            listener.onResponse(
+                responseBuilder.build(modelToDeployments(responseBuilder.getExpandedModelIdsWithAliases().keySet(), assignmentMetadata))
+            );
         }, listener::onFailure);
 
-        ActionListener<GetDeploymentStatsAction.Response> deploymentStatsListener = ActionListener.wrap(deploymentStats -> {
-            responseBuilder.setDeploymentStatsByModelId(
+        StepListener<GetDeploymentStatsAction.Response> deploymentStatsListener = new StepListener<>();
+        deploymentStatsListener.whenComplete(deploymentStats -> {
+            // deployment stats for each matching deployment
+            // not necessarily for all models
+            responseBuilder.setDeploymentStatsByDeploymentId(
                 deploymentStats.getStats()
                     .results()
                     .stream()
                     .collect(Collectors.toMap(AssignmentStats::getDeploymentId, Function.identity()))
             );
-            modelSizeStats(responseBuilder.getExpandedIdsWithAliases(), request.isAllowNoResources(), parentTaskId, modelSizeStatsListener);
+            modelSizeStats(
+                responseBuilder.getExpandedModelIdsWithAliases(),
+                request.isAllowNoResources(),
+                parentTaskId,
+                modelSizeStatsListener
+            );
         }, listener::onFailure);
 
-        ActionListener<List<InferenceStats>> inferenceStatsListener = ActionListener.wrap(inferenceStats -> {
+        StepListener<List<InferenceStats>> inferenceStatsListener = new StepListener<>();
+        // inference stats are per model and are only
+        // persisted for boosted tree models
+        inferenceStatsListener.whenComplete(inferenceStats -> {
             responseBuilder.setInferenceStatsByModelId(
                 inferenceStats.stream().collect(Collectors.toMap(InferenceStats::getModelId, Function.identity()))
             );
-            GetDeploymentStatsAction.Request getDeploymentStatsRequest = new GetDeploymentStatsAction.Request(request.getResourceId());
-            getDeploymentStatsRequest.setParentTask(parentTaskId);
-            executeAsyncWithOrigin(
-                client,
-                ML_ORIGIN,
-                GetDeploymentStatsAction.INSTANCE,
-                getDeploymentStatsRequest,
-                deploymentStatsListener
-            );
+            getDeploymentStats(client, request.getResourceId(), parentTaskId, assignmentMetadata, deploymentStatsListener);
         }, listener::onFailure);
 
-        ActionListener<NodesStatsResponse> nodesStatsListener = ActionListener.wrap(nodesStatsResponse -> {
-            Set<String> allPossiblePipelineReferences = responseBuilder.getExpandedIdsWithAliases()
+        StepListener<NodesStatsResponse> nodesStatsListener = new StepListener<>();
+        nodesStatsListener.whenComplete(nodesStatsResponse -> {
+            // find all pipelines whether using the model id,
+            // alias or deployment id.
+            Set<String> allPossiblePipelineReferences = responseBuilder.getExpandedModelIdsWithAliases()
                 .entrySet()
                 .stream()
                 .flatMap(entry -> Stream.concat(entry.getValue().stream(), Stream.of(entry.getKey())))
                 .collect(Collectors.toSet());
-            Map<String, Set<String>> pipelineIdsByModelIdsOrAliases = pipelineIdsByModelIdsOrAliases(
-                clusterService.state(),
-                allPossiblePipelineReferences
-            );
+            allPossiblePipelineReferences.addAll(matchedDeploymentIds);
+
+            Map<String, Set<String>> pipelineIdsByResource = pipelineIdsByResource(clusterService.state(), allPossiblePipelineReferences);
             Map<String, IngestStats> modelIdIngestStats = inferenceIngestStatsByModelId(
                 nodesStatsResponse,
-                currentMetadata,
-                pipelineIdsByModelIdsOrAliases
+                modelAliasMetadata,
+                pipelineIdsByResource
             );
             responseBuilder.setIngestStatsByModelId(modelIdIngestStats);
             trainedModelProvider.getInferenceStats(
-                responseBuilder.getExpandedIdsWithAliases().keySet().toArray(new String[0]),
+                responseBuilder.getExpandedModelIdsWithAliases().keySet().toArray(new String[0]),
                 parentTaskId,
                 inferenceStatsListener
             );
         }, listener::onFailure);
 
-        ActionListener<Tuple<Long, Map<String, Set<String>>>> idsListener = ActionListener.wrap(tuple -> {
-            responseBuilder.setExpandedIdsWithAliases(tuple.v2()).setTotalModelCount(tuple.v1());
-            String[] ingestNodes = ingestNodes(clusterService.state());
-            NodesStatsRequest nodesStatsRequest = new NodesStatsRequest(ingestNodes).clear()
-                .addMetric(NodesStatsRequest.Metric.INGEST.metricName());
-            nodesStatsRequest.setParentTask(parentTaskId);
-            executeAsyncWithOrigin(client, ML_ORIGIN, NodesStatsAction.INSTANCE, nodesStatsRequest, nodesStatsListener);
+        StepListener<Tuple<Long, Map<String, Set<String>>>> idsListener = new StepListener<>();
+        idsListener.whenComplete(tuple -> {
+            responseBuilder.setExpandedModelIdsWithAliases(tuple.v2()).setTotalModelCount(tuple.v1());
+            executeAsyncWithOrigin(
+                client,
+                ML_ORIGIN,
+                NodesStatsAction.INSTANCE,
+                nodeStatsRequest(clusterService.state(), parentTaskId),
+                nodesStatsListener
+            );
         }, listener::onFailure);
+
+        // When the request resource is a deployment find the
+        // model used in that deployment for the model stats
+        String idExpression = addModelsUsedInMatchingDeployments(request.getResourceId(), assignmentMetadata);
+        logger.debug("Expanded models/deployment Ids request [{}]", idExpression);
+
+        // the request id may contain deployment ids
+        // It is not an error if these don't match a model id but
+        // they need to be included in case the deployment id is also
+        // a model id. Hence, the `matchedDeploymentIds` parameter
         trainedModelProvider.expandIds(
-            request.getResourceId(),
+            idExpression,
             request.isAllowNoResources(),
             request.getPageParams(),
             Collections.emptySet(),
-            currentMetadata,
+            modelAliasMetadata,
             parentTaskId,
+            matchedDeploymentIds,
             idsListener
         );
     }
 
+    static String addModelsUsedInMatchingDeployments(String idExpression, TrainedModelAssignmentMetadata assignmentMetadata) {
+        if (Strings.isAllOrWildcard(idExpression)) {
+            return idExpression;
+        } else {
+            var tokens = new HashSet<>(Arrays.asList(ExpandedIdsMatcher.tokenizeExpression(idExpression)));
+            var modelsUsedByMatchingDeployments = modelsUsedByMatchingDeploymentId(idExpression, assignmentMetadata);
+            tokens.addAll(modelsUsedByMatchingDeployments);
+            return String.join(",", tokens);
+        }
+    }
+
+    static Map<String, Set<String>> modelToDeployments(Set<String> modelIds, TrainedModelAssignmentMetadata assignments) {
+        var modelToDeploymentMap = new HashMap<String, Set<String>>();
+        for (var assignment : assignments.allAssignments().values()) {
+            if (modelIds.contains(assignment.getModelId())) {
+                modelToDeploymentMap.computeIfAbsent(assignment.getModelId(), k -> new HashSet<>()).add(assignment.getDeploymentId());
+            }
+        }
+        return modelToDeploymentMap;
+    }
+
+    static Set<String> matchedDeploymentIds(String resourceId, TrainedModelAssignmentMetadata assignments) {
+        var deploymentIds = new HashSet<String>();
+        var matcher = new ExpandedIdsMatcher.SimpleIdsMatcher(resourceId);
+        for (var assignment : assignments.allAssignments().values()) {
+            if (matcher.idMatches(assignment.getDeploymentId())) {
+                deploymentIds.add(assignment.getDeploymentId());
+            }
+        }
+        return deploymentIds;
+    }
+
+    static Set<String> modelsUsedByMatchingDeploymentId(String resourceId, TrainedModelAssignmentMetadata assignments) {
+        var modelIds = new HashSet<String>();
+        var matcher = new ExpandedIdsMatcher.SimpleIdsMatcher(resourceId);
+        for (var assignment : assignments.allAssignments().values()) {
+            if (matcher.idMatches(assignment.getDeploymentId())) {
+                modelIds.add(assignment.getModelId());
+            }
+        }
+        return modelIds;
+    }
+
+    static void getDeploymentStats(
+        Client client,
+        String resourceId,
+        TaskId parentTaskId,
+        TrainedModelAssignmentMetadata assignments,
+        ActionListener<GetDeploymentStatsAction.Response> deploymentStatsListener
+    ) {
+        // include all matched deployments and models
+        var matcher = new ExpandedIdsMatcher.SimpleIdsMatcher(resourceId);
+        var matchedDeployments = new HashSet<String>();
+        for (var assignment : assignments.allAssignments().values()) {
+            if (matcher.idMatches(assignment.getDeploymentId())) {
+                matchedDeployments.add(assignment.getDeploymentId());
+            } else if (matcher.idMatches(assignment.getModelId())) {
+                matchedDeployments.add(assignment.getDeploymentId());
+            }
+        }
+        String deployments = matchedDeployments.stream().collect(Collectors.joining(","));
+
+        logger.info("Fetching stats for deployments [{}]", deployments);
+
+        GetDeploymentStatsAction.Request getDeploymentStatsRequest = new GetDeploymentStatsAction.Request(deployments);
+        getDeploymentStatsRequest.setParentTask(parentTaskId);
+        executeAsyncWithOrigin(client, ML_ORIGIN, GetDeploymentStatsAction.INSTANCE, getDeploymentStatsRequest, deploymentStatsListener);
+    }
+
     private void modelSizeStats(
         Map<String, Set<String>> expandedIdsWithAliases,
         boolean allowNoResources,
@@ -260,8 +364,12 @@ public class TransportGetTrainedModelsStatsAction extends HandledTransportAction
         return ingestStatsMap;
     }
 
-    static String[] ingestNodes(final ClusterState clusterState) {
-        return clusterState.nodes().getIngestNodes().keySet().toArray(String[]::new);
+    static NodesStatsRequest nodeStatsRequest(ClusterState state, TaskId parentTaskId) {
+        String[] ingestNodes = state.nodes().getIngestNodes().keySet().toArray(String[]::new);
+        NodesStatsRequest nodesStatsRequest = new NodesStatsRequest(ingestNodes).clear()
+            .addMetric(NodesStatsRequest.Metric.INGEST.metricName());
+        nodesStatsRequest.setParentTask(parentTaskId);
+        return nodesStatsRequest;
     }
 
     static IngestStats ingestStatsForPipelineIds(NodeStats nodeStats, Set<String> pipelineIds) {
@@ -361,4 +469,6 @@ public class TransportGetTrainedModelsStatsAction extends HandledTransportAction
         }
     }
 
+    private record ModelAndDeployment(String modelId, String deploymentId) {}
+
 }

+ 11 - 0
x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentMetadata.java

@@ -114,6 +114,17 @@ public class TrainedModelAssignmentMetadata implements Metadata.Custom {
         return deploymentRoutingEntries.containsKey(deploymentId);
     }
 
+    public boolean modelIsDeployed(String modelId) {
+        return deploymentRoutingEntries.values().stream().anyMatch(assignment -> modelId.equals(assignment.getModelId()));
+    }
+
+    public List<TrainedModelAssignment> getDeploymentsUsingModel(String modelId) {
+        return deploymentRoutingEntries.values()
+            .stream()
+            .filter(assignment -> modelId.equals(assignment.getModelId()))
+            .collect(Collectors.toList());
+    }
+
     public Map<String, TrainedModelAssignment> allAssignments() {
         return Collections.unmodifiableMap(deploymentRoutingEntries);
     }

+ 19 - 0
x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProvider.java

@@ -863,6 +863,23 @@ public class TrainedModelProvider {
         }));
     }
 
+    /**
+     * Returns a Tuple of
+     *  - hit count: the number of matching model Ids
+     *  - Map model id -> aliases: All matched model Ids and
+     *    the list of aliases that reference the model Id
+     *
+     * @param idExpression The expression to expand
+     * @param allowNoResources When wildcard expressions are used allow
+     *                         no matches (don't error)
+     * @param pageParams paging
+     * @param tags Tags the model must contain
+     * @param modelAliasMetadata Aliases
+     * @param parentTaskId Optional parent task Id
+     * @param previouslyMatchedIds Ids that have already been matched (e.g. deployment Id).
+     *                             It is not an error if these Ids are not matched in the query
+     * @param idsListener The listener
+     */
     public void expandIds(
         String idExpression,
         boolean allowNoResources,
@@ -870,6 +887,7 @@ public class TrainedModelProvider {
         Set<String> tags,
         ModelAliasMetadata modelAliasMetadata,
         @Nullable TaskId parentTaskId,
+        Set<String> previouslyMatchedIds,
         ActionListener<Tuple<Long, Map<String, Set<String>>>> idsListener
     ) {
         String[] tokens = Strings.tokenizeToStringArray(idExpression, ",");
@@ -973,6 +991,7 @@ public class TrainedModelProvider {
                 // Reverse lookup to see what model aliases were matched by their found trained model IDs
                 ExpandedIdsMatcher requiredMatches = new ExpandedIdsMatcher(tokens, allowNoResources);
                 requiredMatches.filterMatchedIds(matchedTokens);
+                requiredMatches.filterMatchedIds(previouslyMatchedIds);
                 if (requiredMatches.hasUnmatchedIds()) {
                     idsListener.onFailure(ExceptionsHelper.missingTrainedModel(requiredMatches.unmatchedIdsString()));
                 } else {

+ 3 - 3
x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/InferenceProcessorInfoExtractor.java

@@ -71,10 +71,10 @@ public final class InferenceProcessorInfoExtractor {
 
     /**
      * @param state Current cluster state
-     * @return a map from Model IDs or Aliases to each pipeline referencing them.
+     * @return a map from Model or Deployment IDs or Aliases to each pipeline referencing them.
      */
     @SuppressWarnings("unchecked")
-    public static Map<String, Set<String>> pipelineIdsByModelIdsOrAliases(ClusterState state, Set<String> modelIds) {
+    public static Map<String, Set<String>> pipelineIdsByResource(ClusterState state, Set<String> ids) {
         Map<String, Set<String>> pipelineIdsByModelIds = new HashMap<>();
         Metadata metadata = state.metadata();
         if (metadata == null) {
@@ -90,7 +90,7 @@ public final class InferenceProcessorInfoExtractor {
             for (Map<String, Object> processorConfigWithKey : processorConfigs) {
                 for (Map.Entry<String, Object> entry : processorConfigWithKey.entrySet()) {
                     addModelsAndPipelines(entry.getKey(), pipelineId, (Map<String, Object>) entry.getValue(), pam -> {
-                        if (modelIds.contains(pam.modelIdOrAlias)) {
+                        if (ids.contains(pam.modelIdOrAlias)) {
                             pipelineIdsByModelIds.computeIfAbsent(pam.modelIdOrAlias, m -> new LinkedHashSet<>()).add(pipelineId);
                         }
                     }, 0);

+ 148 - 0
x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningTests.java

@@ -16,18 +16,36 @@ import org.elasticsearch.cluster.metadata.Metadata;
 import org.elasticsearch.cluster.service.ClusterService;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.license.XPackLicenseState;
+import org.elasticsearch.plugins.ActionPlugin;
+import org.elasticsearch.plugins.ExtensiblePlugin;
+import org.elasticsearch.rest.RestHandler;
 import org.elasticsearch.test.ESTestCase;
 import org.elasticsearch.threadpool.TestThreadPool;
 import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.xpack.core.action.XPackUsageFeatureAction;
 import org.elasticsearch.xpack.core.ml.MlMetadata;
+import org.elasticsearch.xpack.core.ml.action.GetDataFrameAnalyticsAction;
+import org.elasticsearch.xpack.core.ml.action.GetJobsAction;
+import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsAction;
+import org.elasticsearch.xpack.core.ml.action.MlInfoAction;
 import org.elasticsearch.xpack.core.ml.action.SetUpgradeModeAction;
+import org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction;
+import org.elasticsearch.xpack.ml.rest.RestMlInfoAction;
+import org.elasticsearch.xpack.ml.rest.dataframe.RestGetDataFrameAnalyticsAction;
+import org.elasticsearch.xpack.ml.rest.inference.RestGetTrainedModelsAction;
+import org.elasticsearch.xpack.ml.rest.inference.RestStartTrainedModelDeploymentAction;
+import org.elasticsearch.xpack.ml.rest.job.RestGetJobsAction;
 
 import java.io.IOException;
 import java.util.Collections;
+import java.util.List;
 import java.util.Map;
 
 import static org.hamcrest.Matchers.containsString;
 import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.hasItem;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.hamcrest.Matchers.not;
 import static org.hamcrest.Matchers.startsWith;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.eq;
@@ -205,6 +223,136 @@ public class MachineLearningTests extends ESTestCase {
         }
     }
 
+    public void testAnomalyDetectionOnly() throws IOException {
+        Settings settings = Settings.builder().put("path.home", createTempDir()).build();
+        try (MachineLearning machineLearning = createTrialLicensedMachineLearning(settings)) {
+            MlTestExtensionLoader loader = new MlTestExtensionLoader(new MlTestExtension(false, false, true, false, false));
+            machineLearning.loadExtensions(loader);
+            List<RestHandler> restHandlers = machineLearning.getRestHandlers(settings, null, null, null, null, null, null);
+            assertThat(restHandlers, hasItem(instanceOf(RestMlInfoAction.class)));
+            assertThat(restHandlers, hasItem(instanceOf(RestGetJobsAction.class)));
+            assertThat(restHandlers, not(hasItem(instanceOf(RestGetTrainedModelsAction.class))));
+            assertThat(restHandlers, not(hasItem(instanceOf(RestGetDataFrameAnalyticsAction.class))));
+            assertThat(restHandlers, not(hasItem(instanceOf(RestStartTrainedModelDeploymentAction.class))));
+            List<?> actions = machineLearning.getActions().stream().map(ActionPlugin.ActionHandler::getAction).toList();
+            assertThat(actions, hasItem(instanceOf(XPackUsageFeatureAction.class)));
+            assertThat(actions, hasItem(instanceOf(MlInfoAction.class)));
+            assertThat(actions, hasItem(instanceOf(GetJobsAction.class)));
+            assertThat(actions, not(hasItem(instanceOf(GetTrainedModelsAction.class))));
+            assertThat(actions, not(hasItem(instanceOf(GetDataFrameAnalyticsAction.class))));
+            assertThat(actions, not(hasItem(instanceOf(StartTrainedModelDeploymentAction.class))));
+        }
+    }
+
+    public void testDataFrameAnalyticsOnly() throws IOException {
+        Settings settings = Settings.builder().put("path.home", createTempDir()).build();
+        try (MachineLearning machineLearning = createTrialLicensedMachineLearning(settings)) {
+            MlTestExtensionLoader loader = new MlTestExtensionLoader(new MlTestExtension(false, false, false, true, false));
+            machineLearning.loadExtensions(loader);
+            List<RestHandler> restHandlers = machineLearning.getRestHandlers(settings, null, null, null, null, null, null);
+            assertThat(restHandlers, hasItem(instanceOf(RestMlInfoAction.class)));
+            assertThat(restHandlers, not(hasItem(instanceOf(RestGetJobsAction.class))));
+            assertThat(restHandlers, hasItem(instanceOf(RestGetTrainedModelsAction.class)));
+            assertThat(restHandlers, hasItem(instanceOf(RestGetDataFrameAnalyticsAction.class)));
+            assertThat(restHandlers, not(hasItem(instanceOf(RestStartTrainedModelDeploymentAction.class))));
+            List<?> actions = machineLearning.getActions().stream().map(ActionPlugin.ActionHandler::getAction).toList();
+            assertThat(actions, hasItem(instanceOf(XPackUsageFeatureAction.class)));
+            assertThat(actions, hasItem(instanceOf(MlInfoAction.class)));
+            assertThat(actions, not(hasItem(instanceOf(GetJobsAction.class))));
+            assertThat(actions, hasItem(instanceOf(GetTrainedModelsAction.class)));
+            assertThat(actions, hasItem(instanceOf(GetDataFrameAnalyticsAction.class)));
+            assertThat(actions, not(hasItem(instanceOf(StartTrainedModelDeploymentAction.class))));
+        }
+    }
+
+    public void testNlpOnly() throws IOException {
+        Settings settings = Settings.builder().put("path.home", createTempDir()).build();
+        try (MachineLearning machineLearning = createTrialLicensedMachineLearning(settings)) {
+            MlTestExtensionLoader loader = new MlTestExtensionLoader(new MlTestExtension(false, false, false, false, true));
+            machineLearning.loadExtensions(loader);
+            List<RestHandler> restHandlers = machineLearning.getRestHandlers(settings, null, null, null, null, null, null);
+            assertThat(restHandlers, hasItem(instanceOf(RestMlInfoAction.class)));
+            assertThat(restHandlers, not(hasItem(instanceOf(RestGetJobsAction.class))));
+            assertThat(restHandlers, hasItem(instanceOf(RestGetTrainedModelsAction.class)));
+            assertThat(restHandlers, not(hasItem(instanceOf(RestGetDataFrameAnalyticsAction.class))));
+            assertThat(restHandlers, hasItem(instanceOf(RestStartTrainedModelDeploymentAction.class)));
+            List<?> actions = machineLearning.getActions().stream().map(ActionPlugin.ActionHandler::getAction).toList();
+            assertThat(actions, hasItem(instanceOf(XPackUsageFeatureAction.class)));
+            assertThat(actions, hasItem(instanceOf(MlInfoAction.class)));
+            assertThat(actions, not(hasItem(instanceOf(GetJobsAction.class))));
+            assertThat(actions, hasItem(instanceOf(GetTrainedModelsAction.class)));
+            assertThat(actions, not(hasItem(instanceOf(GetDataFrameAnalyticsAction.class))));
+            assertThat(actions, hasItem(instanceOf(StartTrainedModelDeploymentAction.class)));
+        }
+    }
+
+    public static class MlTestExtension implements MachineLearningExtension {
+
+        private final boolean useIlm;
+        private final boolean includeNodeInfo;
+        private final boolean isAnomalyDetectionEnabled;
+        private final boolean isDataFrameAnalyticsEnabled;
+        private final boolean isNlpEnabled;
+
+        MlTestExtension(
+            boolean useIlm,
+            boolean includeNodeInfo,
+            boolean isAnomalyDetectionEnabled,
+            boolean isDataFrameAnalyticsEnabled,
+            boolean isNlpEnabled
+        ) {
+            this.useIlm = useIlm;
+            this.includeNodeInfo = includeNodeInfo;
+            this.isAnomalyDetectionEnabled = isAnomalyDetectionEnabled;
+            this.isDataFrameAnalyticsEnabled = isDataFrameAnalyticsEnabled;
+            this.isNlpEnabled = isNlpEnabled;
+        }
+
+        @Override
+        public boolean useIlm() {
+            return useIlm;
+        }
+
+        @Override
+        public boolean includeNodeInfo() {
+            return includeNodeInfo;
+        }
+
+        @Override
+        public boolean isAnomalyDetectionEnabled() {
+            return isAnomalyDetectionEnabled;
+        }
+
+        @Override
+        public boolean isDataFrameAnalyticsEnabled() {
+            return isDataFrameAnalyticsEnabled;
+        }
+
+        @Override
+        public boolean isNlpEnabled() {
+            return isNlpEnabled;
+        }
+    }
+
+    public static class MlTestExtensionLoader implements ExtensiblePlugin.ExtensionLoader {
+
+        private final MachineLearningExtension extension;
+
+        MlTestExtensionLoader(MachineLearningExtension extension) {
+            this.extension = extension;
+        }
+
+        @Override
+        @SuppressWarnings("unchecked")
+        public <T> List<T> loadExtensions(Class<T> extensionPointType) {
+            if (extensionPointType.isAssignableFrom(MachineLearningExtension.class)) {
+                return List.of((T) extension);
+            } else {
+                return List.of();
+            }
+        }
+    }
+
     public static class TrialLicensedMachineLearning extends MachineLearning {
 
         // A license state constructed like this is considered a trial license

+ 27 - 1
x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceServiceTests.java

@@ -45,6 +45,7 @@ import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.same;
 import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.verifyNoMoreInteractions;
@@ -139,6 +140,21 @@ public class MlDailyMaintenanceServiceTests extends ESTestCase {
         );
     }
 
+    public void testNoAnomalyDetectionTasksWhenDisabled() throws InterruptedException {
+        when(clusterService.state()).thenReturn(createClusterState(false));
+
+        CountDownLatch latch = new CountDownLatch(2);
+        try (MlDailyMaintenanceService service = createService(latch, client, false, randomBoolean(), randomBoolean())) {
+            service.start();
+            latch.await(5, TimeUnit.SECONDS);
+        }
+
+        verify(client, never()).threadPool();
+        verify(client, never()).execute(same(DeleteExpiredDataAction.INSTANCE), any(), any());
+        verify(client, never()).execute(same(GetJobsAction.INSTANCE), any(), any());
+        verify(mlAssignmentNotifier, Mockito.atLeast(1)).auditUnassignedMlTasks(any(), any());
+    }
+
     private void assertThatBothTasksAreTriggered(Answer<?> deleteExpiredDataAnswer, Answer<?> getJobsAnswer) throws InterruptedException {
         when(clusterService.state()).thenReturn(createClusterState(false));
         doAnswer(deleteExpiredDataAnswer).when(client).execute(same(DeleteExpiredDataAction.INSTANCE), any(), any());
@@ -239,6 +255,16 @@ public class MlDailyMaintenanceServiceTests extends ESTestCase {
     }
 
     private MlDailyMaintenanceService createService(CountDownLatch latch, Client client) {
+        return createService(latch, client, true, true, true);
+    }
+
+    private MlDailyMaintenanceService createService(
+        CountDownLatch latch,
+        Client client,
+        boolean isAnomalyDetectionEnabled,
+        boolean isDataFrameAnalyticsEnabled,
+        boolean isNlpEnabled
+    ) {
         return new MlDailyMaintenanceService(Settings.EMPTY, threadPool, client, clusterService, mlAssignmentNotifier, () -> {
             // We need to be careful that an unexpected iteration doesn't get squeezed in by the maintenance threadpool in
             // between the latch getting counted down to zero and the main test thread stopping the maintenance service.
@@ -250,7 +276,7 @@ public class MlDailyMaintenanceServiceTests extends ESTestCase {
             } else {
                 return TimeValue.timeValueHours(1);
             }
-        });
+        }, isAnomalyDetectionEnabled, isDataFrameAnalyticsEnabled, isNlpEnabled);
     }
 
     private static ClusterState createClusterState(boolean isUpgradeMode) {

+ 8 - 2
x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlInitializationServiceTests.java

@@ -81,7 +81,10 @@ public class MlInitializationServiceTests extends ESTestCase {
             threadPool,
             clusterService,
             client,
-            mlAssignmentNotifier
+            mlAssignmentNotifier,
+            true,
+            true,
+            true
         );
         initializationService.onMaster();
         assertThat(initializationService.getDailyMaintenanceService().isStarted(), is(true));
@@ -93,7 +96,10 @@ public class MlInitializationServiceTests extends ESTestCase {
             threadPool,
             clusterService,
             client,
-            mlAssignmentNotifier
+            mlAssignmentNotifier,
+            true,
+            true,
+            true
         );
         initializationService.offMaster();
         assertThat(initializationService.getDailyMaintenanceService().isStarted(), is(false));

+ 36 - 1
x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentMetadataTests.java

@@ -23,6 +23,7 @@ import java.util.function.Function;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
 
+import static org.hamcrest.Matchers.hasSize;
 import static org.hamcrest.Matchers.is;
 
 public class TrainedModelAssignmentMetadataTests extends AbstractChunkedSerializingTestCase<TrainedModelAssignmentMetadata> {
@@ -56,7 +57,7 @@ public class TrainedModelAssignmentMetadataTests extends AbstractChunkedSerializ
         return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929
     }
 
-    public void testIsAllocated() {
+    public void testIsAssigned() {
         String allocatedModelId = "test_model_id";
         String allocatedDeploymentId = "test_deployment";
         TrainedModelAssignmentMetadata metadata = TrainedModelAssignmentMetadata.Builder.empty()
@@ -66,9 +67,43 @@ public class TrainedModelAssignmentMetadataTests extends AbstractChunkedSerializ
             )
             .build();
         assertThat(metadata.isAssigned(allocatedDeploymentId), is(true));
+        assertThat(metadata.isAssigned(allocatedModelId), is(false));
         assertThat(metadata.isAssigned("unknown_model_id"), is(false));
     }
 
+    public void testModelIsDeployed() {
+        String allocatedModelId = "test_model_id";
+        String allocatedDeploymentId = "test_deployment";
+        TrainedModelAssignmentMetadata metadata = TrainedModelAssignmentMetadata.Builder.empty()
+            .addNewAssignment(
+                allocatedDeploymentId,
+                TrainedModelAssignment.Builder.empty(randomParams(allocatedDeploymentId, allocatedModelId))
+            )
+            .build();
+        assertThat(metadata.modelIsDeployed(allocatedDeploymentId), is(false));
+        assertThat(metadata.modelIsDeployed(allocatedModelId), is(true));
+        assertThat(metadata.modelIsDeployed("unknown_model_id"), is(false));
+    }
+
+    public void testGetDeploymentsUsingModel() {
+        String modelId1 = "test_model_id_1";
+        String deployment1 = "test_deployment_1";
+        String deployment2 = "test_deployment_2";
+        String deployment3 = "test_deployment_3";
+        TrainedModelAssignmentMetadata metadata = TrainedModelAssignmentMetadata.Builder.empty()
+            .addNewAssignment(deployment1, TrainedModelAssignment.Builder.empty(randomParams(deployment1, modelId1)))
+            .addNewAssignment(deployment2, TrainedModelAssignment.Builder.empty(randomParams(deployment2, modelId1)))
+            .addNewAssignment(deployment3, TrainedModelAssignment.Builder.empty(randomParams(deployment3, "different_model")))
+            .build();
+        var assignments = metadata.getDeploymentsUsingModel(modelId1);
+        assertThat(assignments, hasSize(2));
+        assertEquals(assignments.get(0).getModelId(), modelId1);
+        assertEquals(assignments.get(1).getModelId(), modelId1);
+
+        assignments = metadata.getDeploymentsUsingModel("not-deployed");
+        assertThat(assignments, hasSize(0));
+    }
+
     private static StartTrainedModelDeploymentAction.TaskParams randomParams(String deploymentId, String modelId) {
         return new StartTrainedModelDeploymentAction.TaskParams(
             modelId,

+ 1 - 4
x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/InferenceProcessorInfoExtractorTests.java

@@ -48,10 +48,7 @@ public class InferenceProcessorInfoExtractorTests extends ESTestCase {
 
         ClusterState clusterState = buildClusterStateWithModelReferences(2, modelId1, modelId2, modelId3);
 
-        Map<String, Set<String>> pipelineIdsByModelIds = InferenceProcessorInfoExtractor.pipelineIdsByModelIdsOrAliases(
-            clusterState,
-            modelIds
-        );
+        Map<String, Set<String>> pipelineIdsByModelIds = InferenceProcessorInfoExtractor.pipelineIdsByResource(clusterState, modelIds);
 
         assertThat(pipelineIdsByModelIds.keySet(), equalTo(modelIds));
         assertThat(

+ 4 - 4
x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityLicensingAndFeatureUsageRestIT.java

@@ -46,7 +46,7 @@ public class RemoteClusterSecurityLicensingAndFeatureUsageRestIT extends Abstrac
     private static final AtomicReference<Map<String, Object>> API_KEY_MAP_REF = new AtomicReference<>();
 
     private static final String REMOTE_INDEX_NAME = "remote_index";
-    public static final String CONFIGURABLE_CROSS_CLUSTER_ACCESS_FEATURE_NAME = "configurable-cross-cluster-access";
+    public static final String ADVANCED_REMOTE_CLUSTER_SECURITY_FEATURE_NAME = "advanced-remote-cluster-security";
 
     static {
         fulfillingCluster = ElasticsearchCluster.local()
@@ -230,7 +230,7 @@ public class RemoteClusterSecurityLicensingAndFeatureUsageRestIT extends Abstrac
         assertThat(exception.getResponse().getStatusLine().getStatusCode(), equalTo(403));
         assertThat(
             exception.getMessage(),
-            containsString("current license is non-compliant for [" + CONFIGURABLE_CROSS_CLUSTER_ACCESS_FEATURE_NAME + "]")
+            containsString("current license is non-compliant for [" + ADVANCED_REMOTE_CLUSTER_SECURITY_FEATURE_NAME + "]")
         );
     }
 
@@ -269,12 +269,12 @@ public class RemoteClusterSecurityLicensingAndFeatureUsageRestIT extends Abstrac
 
     private static void assertFeatureTracked(RestClient client) throws IOException {
         Set<String> features = fetchFeatureUsageFromNode(client);
-        assertThat(CONFIGURABLE_CROSS_CLUSTER_ACCESS_FEATURE_NAME, is(in(features)));
+        assertThat(ADVANCED_REMOTE_CLUSTER_SECURITY_FEATURE_NAME, is(in(features)));
     }
 
     private static void assertFeatureNotTracked(RestClient client) throws IOException {
         Set<String> features = fetchFeatureUsageFromNode(client);
-        assertThat(CONFIGURABLE_CROSS_CLUSTER_ACCESS_FEATURE_NAME, not(is(in(features))));
+        assertThat(ADVANCED_REMOTE_CLUSTER_SECURITY_FEATURE_NAME, not(is(in(features))));
     }
 
     private static Set<String> fetchFeatureUsageFromNode(RestClient client) throws IOException {

+ 8 - 8
x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/crossclusteraccess/CrossClusterAccessHeadersForCcsRestIT.java

@@ -720,12 +720,12 @@ public class CrossClusterAccessHeadersForCcsRestIT extends SecurityOnTrialLicens
                 .boundRemoteIngressAddress()
                 .publishAddress();
             final int numberOfRemoteClusters = randomIntBetween(0, 5);
-            final int numberOfConfigurables = randomIntBetween(0, Math.min(2, numberOfRemoteClusters));
-            final int numberOfBasics = numberOfRemoteClusters - numberOfConfigurables;
+            final int numberOfApiKeySecured = randomIntBetween(0, Math.min(2, numberOfRemoteClusters));
+            final int numberOfCertSecured = numberOfRemoteClusters - numberOfApiKeySecured;
             final List<Boolean> useProxyModes = randomList(numberOfRemoteClusters, numberOfRemoteClusters, ESTestCase::randomBoolean);
 
             // Remote clusters with new configurable model
-            switch (numberOfConfigurables) {
+            switch (numberOfApiKeySecured) {
                 case 0 -> {
                 }
                 case 1 -> setupClusterSettings(CLUSTER_A, remoteClusterServerPortAddress, useProxyModes.get(0));
@@ -733,12 +733,12 @@ public class CrossClusterAccessHeadersForCcsRestIT extends SecurityOnTrialLicens
                     setupClusterSettings(CLUSTER_A, remoteClusterServerPortAddress, useProxyModes.get(0));
                     setupClusterSettings(CLUSTER_B, remoteClusterServerPortAddress, useProxyModes.get(1));
                 }
-                default -> throw new IllegalArgumentException("invalid number of configurable remote clusters");
+                default -> throw new IllegalArgumentException("invalid number of api_key secured remote clusters");
             }
 
             // Remote clusters with basic model
-            for (int i = 0; i < numberOfBasics; i++) {
-                setupClusterSettings("basic_cluster_" + i, transportPortAddress, useProxyModes.get(i + numberOfConfigurables));
+            for (int i = 0; i < numberOfCertSecured; i++) {
+                setupClusterSettings("basic_cluster_" + i, transportPortAddress, useProxyModes.get(i + numberOfApiKeySecured));
             }
 
             final Request xPackUsageRequest = new Request("GET", "/_xpack/usage");
@@ -750,8 +750,8 @@ public class CrossClusterAccessHeadersForCcsRestIT extends SecurityOnTrialLicens
             final int numberOfProxyModes = (int) useProxyModes.stream().filter(e -> e).count();
             assertThat(path.evaluate("remote_clusters.mode.proxy"), equalTo(numberOfProxyModes));
             assertThat(path.evaluate("remote_clusters.mode.sniff"), equalTo(numberOfRemoteClusters - numberOfProxyModes));
-            assertThat(path.evaluate("remote_clusters.security.basic"), equalTo(numberOfBasics));
-            assertThat(path.evaluate("remote_clusters.security.configurable"), equalTo(numberOfConfigurables));
+            assertThat(path.evaluate("remote_clusters.security.cert"), equalTo(numberOfCertSecured));
+            assertThat(path.evaluate("remote_clusters.security.api_key"), equalTo(numberOfApiKeySecured));
 
             assertThat(path.evaluate("security.remote_cluster_server.available"), is(true));
             assertThat(path.evaluate("security.remote_cluster_server.enabled"), is(false));

+ 2 - 2
x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java

@@ -495,9 +495,9 @@ public class Security extends Plugin
     /**
      * Configurable cross cluster access is Enterprise feature.
      */
-    public static final LicensedFeature.Momentary CONFIGURABLE_CROSS_CLUSTER_ACCESS_FEATURE = LicensedFeature.momentary(
+    public static final LicensedFeature.Momentary ADVANCED_REMOTE_CLUSTER_SECURITY_FEATURE = LicensedFeature.momentary(
         null,
-        "configurable-cross-cluster-access",
+        "advanced-remote-cluster-security",
         License.OperationMode.ENTERPRISE
     );
 

+ 2 - 2
x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityUsageTransportAction.java

@@ -50,7 +50,7 @@ import static org.elasticsearch.xpack.core.XPackSettings.REMOTE_CLUSTER_CLIENT_S
 import static org.elasticsearch.xpack.core.XPackSettings.REMOTE_CLUSTER_SERVER_SSL_ENABLED;
 import static org.elasticsearch.xpack.core.XPackSettings.TOKEN_SERVICE_ENABLED_SETTING;
 import static org.elasticsearch.xpack.core.XPackSettings.TRANSPORT_SSL_ENABLED;
-import static org.elasticsearch.xpack.security.Security.CONFIGURABLE_CROSS_CLUSTER_ACCESS_FEATURE;
+import static org.elasticsearch.xpack.security.Security.ADVANCED_REMOTE_CLUSTER_SECURITY_FEATURE;
 
 public class SecurityUsageTransportAction extends XPackUsageFeatureTransportAction {
 
@@ -191,7 +191,7 @@ public class SecurityUsageTransportAction extends XPackUsageFeatureTransportActi
         if (TcpTransport.isUntrustedRemoteClusterEnabled() && XPackSettings.SECURITY_ENABLED.get(settings)) {
             return Map.of(
                 "available",
-                CONFIGURABLE_CROSS_CLUSTER_ACCESS_FEATURE.checkWithoutTracking(licenseState),
+                ADVANCED_REMOTE_CLUSTER_SECURITY_FEATURE.checkWithoutTracking(licenseState),
                 "enabled",
                 RemoteClusterPortSettings.REMOTE_CLUSTER_SERVER_ENABLED.get(settings)
             );

+ 2 - 2
x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/CrossClusterAccessServerTransportFilter.java

@@ -128,12 +128,12 @@ final class CrossClusterAccessServerTransportFilter extends ServerTransportFilte
         final TransportRequest request,
         final ActionListener<Authentication> authenticationListener
     ) {
-        if (false == Security.CONFIGURABLE_CROSS_CLUSTER_ACCESS_FEATURE.check(licenseState)) {
+        if (false == Security.ADVANCED_REMOTE_CLUSTER_SECURITY_FEATURE.check(licenseState)) {
             onFailureWithDebugLog(
                 securityAction,
                 request,
                 authenticationListener,
-                LicenseUtils.newComplianceException(Security.CONFIGURABLE_CROSS_CLUSTER_ACCESS_FEATURE.getName())
+                LicenseUtils.newComplianceException(Security.ADVANCED_REMOTE_CLUSTER_SECURITY_FEATURE.getName())
             );
         } else if (false == CROSS_CLUSTER_ACCESS_ACTION_ALLOWLIST.contains(securityAction)) {
             onFailureWithDebugLog(

+ 2 - 2
x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java

@@ -298,8 +298,8 @@ public class SecurityServerTransportInterceptor implements TransportInterceptor
                 final TransportRequestOptions options,
                 final TransportResponseHandler<T> handler
             ) {
-                if (false == Security.CONFIGURABLE_CROSS_CLUSTER_ACCESS_FEATURE.check(licenseState)) {
-                    throw LicenseUtils.newComplianceException(Security.CONFIGURABLE_CROSS_CLUSTER_ACCESS_FEATURE.getName());
+                if (false == Security.ADVANCED_REMOTE_CLUSTER_SECURITY_FEATURE.check(licenseState)) {
+                    throw LicenseUtils.newComplianceException(Security.ADVANCED_REMOTE_CLUSTER_SECURITY_FEATURE.getName());
                 }
                 final String remoteClusterAlias = remoteClusterCredentials.clusterAlias();
 

+ 1 - 1
x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityInfoTransportActionTests.java

@@ -103,7 +103,7 @@ public class SecurityInfoTransportActionTests extends ESTestCase {
         final boolean operatorPrivilegesAvailable = randomBoolean();
         when(licenseState.isAllowed(Security.OPERATOR_PRIVILEGES_FEATURE)).thenReturn(operatorPrivilegesAvailable);
         final boolean remoteClusterServerAvailable = randomBoolean();
-        when(licenseState.isAllowed(Security.CONFIGURABLE_CROSS_CLUSTER_ACCESS_FEATURE)).thenReturn(remoteClusterServerAvailable);
+        when(licenseState.isAllowed(Security.ADVANCED_REMOTE_CLUSTER_SECURITY_FEATURE)).thenReturn(remoteClusterServerAvailable);
 
         Settings.Builder settings = Settings.builder().put(this.settings);
 

+ 4 - 1
x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/FileRolesStoreTests.java

@@ -361,7 +361,7 @@ public class FileRolesStoreTests extends ESTestCase {
         assertThat(roles.get("role_query_fields"), nullValue());
         assertThat(roles.get("role_query_invalid"), nullValue());
 
-        assertThat(events, hasSize(4));
+        assertThat(events, hasSize(TcpTransport.isUntrustedRemoteClusterEnabled() ? 4 : 5));
         assertThat(
             events.get(0),
             startsWith(
@@ -394,6 +394,9 @@ public class FileRolesStoreTests extends ESTestCase {
                     + "]. document and field level security is not enabled."
             )
         );
+        if (false == TcpTransport.isUntrustedRemoteClusterEnabled()) {
+            assertThat(events.get(4), startsWith("failed to parse role [role_remote_indices]. unexpected field [remote_indices]"));
+        }
     }
 
     public void testParseFileWithFLSAndDLSUnlicensed() throws Exception {

+ 5 - 4
x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java

@@ -38,6 +38,7 @@ import org.elasticsearch.test.ESTestCase;
 import org.elasticsearch.test.VersionUtils;
 import org.elasticsearch.threadpool.TestThreadPool;
 import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TcpTransport;
 import org.elasticsearch.xcontent.ToXContent;
 import org.elasticsearch.xcontent.XContentBuilder;
 import org.elasticsearch.xcontent.XContentType;
@@ -116,7 +117,7 @@ public class NativeRolesStoreTests extends ESTestCase {
             generateRandomStringArray(5, randomIntBetween(2, 8), true, true),
             RoleDescriptorTests.randomRoleDescriptorMetadata(ESTestCase.randomBoolean()),
             null,
-            RoleDescriptorTests.randomRemoteIndicesPrivileges(1, 2)
+            TcpTransport.isUntrustedRemoteClusterEnabled() ? RoleDescriptorTests.randomRemoteIndicesPrivileges(1, 2) : null
         );
         assertFalse(flsRole.getTransientMetadata().containsKey("unlicensed_features"));
 
@@ -131,7 +132,7 @@ public class NativeRolesStoreTests extends ESTestCase {
             generateRandomStringArray(5, randomIntBetween(2, 8), true, true),
             RoleDescriptorTests.randomRoleDescriptorMetadata(ESTestCase.randomBoolean()),
             null,
-            RoleDescriptorTests.randomRemoteIndicesPrivileges(1, 2)
+            TcpTransport.isUntrustedRemoteClusterEnabled() ? RoleDescriptorTests.randomRemoteIndicesPrivileges(1, 2) : null
         );
         assertFalse(dlsRole.getTransientMetadata().containsKey("unlicensed_features"));
 
@@ -151,7 +152,7 @@ public class NativeRolesStoreTests extends ESTestCase {
             generateRandomStringArray(5, randomIntBetween(2, 8), true, true),
             RoleDescriptorTests.randomRoleDescriptorMetadata(ESTestCase.randomBoolean()),
             null,
-            RoleDescriptorTests.randomRemoteIndicesPrivileges(1, 2)
+            TcpTransport.isUntrustedRemoteClusterEnabled() ? RoleDescriptorTests.randomRemoteIndicesPrivileges(1, 2) : null
         );
         assertFalse(flsDlsRole.getTransientMetadata().containsKey("unlicensed_features"));
 
@@ -164,7 +165,7 @@ public class NativeRolesStoreTests extends ESTestCase {
             generateRandomStringArray(5, randomIntBetween(2, 8), false, true),
             RoleDescriptorTests.randomRoleDescriptorMetadata(ESTestCase.randomBoolean()),
             null,
-            RoleDescriptorTests.randomRemoteIndicesPrivileges(1, 2)
+            TcpTransport.isUntrustedRemoteClusterEnabled() ? RoleDescriptorTests.randomRemoteIndicesPrivileges(1, 2) : null
         );
         assertFalse(noFlsDlsRole.getTransientMetadata().containsKey("unlicensed_features"));
 

+ 3 - 3
x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java

@@ -126,7 +126,7 @@ public class SecurityServerTransportInterceptorTests extends ESTestCase {
         threadContext = threadPool.getThreadContext();
         securityContext = spy(new SecurityContext(settings, threadPool.getThreadContext()));
         mockLicenseState = MockLicenseState.createMock();
-        Mockito.when(mockLicenseState.isAllowed(Security.CONFIGURABLE_CROSS_CLUSTER_ACCESS_FEATURE)).thenReturn(true);
+        Mockito.when(mockLicenseState.isAllowed(Security.ADVANCED_REMOTE_CLUSTER_SECURITY_FEATURE)).thenReturn(true);
     }
 
     @After
@@ -591,7 +591,7 @@ public class SecurityServerTransportInterceptorTests extends ESTestCase {
         assumeTrue("untrusted remote cluster feature flag must be enabled", TcpTransport.isUntrustedRemoteClusterEnabled());
 
         final MockLicenseState unsupportedLicenseState = MockLicenseState.createMock();
-        Mockito.when(unsupportedLicenseState.isAllowed(Security.CONFIGURABLE_CROSS_CLUSTER_ACCESS_FEATURE)).thenReturn(false);
+        Mockito.when(unsupportedLicenseState.isAllowed(Security.ADVANCED_REMOTE_CLUSTER_SECURITY_FEATURE)).thenReturn(false);
 
         AuthenticationTestHelper.builder().build().writeToContext(threadContext);
         final String remoteClusterAlias = randomAlphaOfLengthBetween(5, 10);
@@ -645,7 +645,7 @@ public class SecurityServerTransportInterceptorTests extends ESTestCase {
         assertThat(actualException.get().getCause(), instanceOf(ElasticsearchSecurityException.class));
         assertThat(
             actualException.get().getCause().getMessage(),
-            equalTo("current license is non-compliant for [" + Security.CONFIGURABLE_CROSS_CLUSTER_ACCESS_FEATURE.getName() + "]")
+            equalTo("current license is non-compliant for [" + Security.ADVANCED_REMOTE_CLUSTER_SECURITY_FEATURE.getName() + "]")
         );
         verify(remoteClusterCredentialsResolver, times(1)).resolve(eq(remoteClusterAlias));
         assertThat(securityContext.getThreadContext().getHeader(CROSS_CLUSTER_ACCESS_SUBJECT_INFO_HEADER_KEY), nullValue());

+ 3 - 3
x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterTests.java

@@ -92,7 +92,7 @@ public class ServerTransportFilterTests extends ESTestCase {
         crossClusterAccessAuthcService = mock(CrossClusterAccessAuthenticationService.class);
         when(crossClusterAccessAuthcService.getAuthenticationService()).thenReturn(authcService);
         mockLicenseState = MockLicenseState.createMock();
-        Mockito.when(mockLicenseState.isAllowed(Security.CONFIGURABLE_CROSS_CLUSTER_ACCESS_FEATURE)).thenReturn(true);
+        Mockito.when(mockLicenseState.isAllowed(Security.ADVANCED_REMOTE_CLUSTER_SECURITY_FEATURE)).thenReturn(true);
     }
 
     public void testInbound() {
@@ -340,7 +340,7 @@ public class ServerTransportFilterTests extends ESTestCase {
 
     public void testCrossClusterAccessInboundFailsWithUnsupportedLicense() {
         final MockLicenseState unsupportedLicenseState = MockLicenseState.createMock();
-        Mockito.when(unsupportedLicenseState.isAllowed(Security.CONFIGURABLE_CROSS_CLUSTER_ACCESS_FEATURE)).thenReturn(false);
+        Mockito.when(unsupportedLicenseState.isAllowed(Security.ADVANCED_REMOTE_CLUSTER_SECURITY_FEATURE)).thenReturn(false);
 
         ServerTransportFilter crossClusterAccessFilter = getNodeCrossClusterAccessFilter(unsupportedLicenseState);
         PlainActionFuture<Void> listener = new PlainActionFuture<>();
@@ -350,7 +350,7 @@ public class ServerTransportFilterTests extends ESTestCase {
         ElasticsearchSecurityException actualException = expectThrows(ElasticsearchSecurityException.class, listener::actionGet);
         assertThat(
             actualException.getMessage(),
-            equalTo("current license is non-compliant for [" + Security.CONFIGURABLE_CROSS_CLUSTER_ACCESS_FEATURE.getName() + "]")
+            equalTo("current license is non-compliant for [" + Security.ADVANCED_REMOTE_CLUSTER_SECURITY_FEATURE.getName() + "]")
         );
 
         // License check should be executed first, hence we don't expect authc/authz to be even attempted.

+ 8 - 0
x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/3rd_party_deployment.yml

@@ -460,6 +460,14 @@ setup:
               { "input": "words" }
             ]
           }
+  - do:
+      ml.get_trained_models_stats:
+        model_id: test_model
+  - match: { count: 1 } # one model matched
+  - match: { trained_model_stats.0.model_id: test_model }
+  - match: { trained_model_stats.0.deployment_stats.deployment_id: test_model_for_ingest }
+  - match: { trained_model_stats.1.model_id: test_model }
+  - match: { trained_model_stats.1.deployment_stats.deployment_id: test_model_for_search }
 
   - do:
       ml.stop_trained_model_deployment:

+ 1 - 1
x-pack/plugin/voting-only-node/src/test/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodeCoordinatorTests.java

@@ -38,7 +38,7 @@ public class VotingOnlyNodeCoordinatorTests extends AbstractCoordinatorTestCase
     }
 
     @Override
-    protected CoordinatorStrategy getCoordinatorStrategy() {
+    protected CoordinatorStrategy createCoordinatorStrategy() {
         return new DefaultCoordinatorStrategy(new VotingOnlyNodePlugin.VotingOnlyNodeElectionStrategy());
     }