Browse Source

Merge remote-tracking branch 'public/main' into merge-main

Niels Bauman 8 months ago
parent
commit
682cf0a18f
100 changed files with 1998 additions and 1806 deletions
  1. 2 1
      benchmarks/src/main/java/org/elasticsearch/benchmark/script/ScriptScoreBenchmark.java
  2. 6 0
      docs/changelog/119898.yaml
  3. 5 0
      docs/changelog/120334.yaml
  4. 5 0
      docs/changelog/120547.yaml
  5. 5 0
      docs/changelog/120590.yaml
  6. 5 0
      docs/changelog/120643.yaml
  7. 5 0
      docs/changelog/120662.yaml
  8. 29 8
      docs/reference/indices/resolve-cluster.asciidoc
  9. 1 1
      docs/reference/rest-api/common-parms.asciidoc
  10. 10 9
      docs/reference/search/count.asciidoc
  11. 1 1
      docs/reference/search/search-your-data/search-across-clusters.asciidoc
  12. 12 0
      libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java
  13. 2 5
      libs/entitlement/qa/build.gradle
  14. 0 24
      libs/entitlement/qa/entitlement-allowed-nonmodular/build.gradle
  15. 0 44
      libs/entitlement/qa/entitlement-allowed-nonmodular/src/main/java/org/elasticsearch/entitlement/qa/nonmodular/EntitlementAllowedNonModularPlugin.java
  16. 0 9
      libs/entitlement/qa/entitlement-allowed-nonmodular/src/main/plugin-metadata/entitlement-policy.yaml
  17. 0 25
      libs/entitlement/qa/entitlement-allowed/build.gradle
  18. 0 44
      libs/entitlement/qa/entitlement-allowed/src/main/java/org/elasticsearch/entitlement/qa/EntitlementAllowedPlugin.java
  19. 0 9
      libs/entitlement/qa/entitlement-allowed/src/main/plugin-metadata/entitlement-policy.yaml
  20. 0 24
      libs/entitlement/qa/entitlement-denied-nonmodular/build.gradle
  21. 0 44
      libs/entitlement/qa/entitlement-denied-nonmodular/src/main/java/org/elasticsearch/entitlement/qa/nonmodular/EntitlementDeniedNonModularPlugin.java
  22. 0 5
      libs/entitlement/qa/entitlement-denied-nonmodular/src/main/plugin-metadata/entitlement-policy.yaml
  23. 0 25
      libs/entitlement/qa/entitlement-denied/build.gradle
  24. 0 6
      libs/entitlement/qa/entitlement-denied/src/main/java/module-info.java
  25. 0 5
      libs/entitlement/qa/entitlement-denied/src/main/plugin-metadata/entitlement-policy.yaml
  26. 7 14
      libs/entitlement/qa/src/javaRestTest/java/org/elasticsearch/entitlement/qa/EntitlementsAllowedIT.java
  27. 59 0
      libs/entitlement/qa/src/javaRestTest/java/org/elasticsearch/entitlement/qa/EntitlementsAllowedNonModularIT.java
  28. 6 12
      libs/entitlement/qa/src/javaRestTest/java/org/elasticsearch/entitlement/qa/EntitlementsDeniedIT.java
  29. 61 0
      libs/entitlement/qa/src/javaRestTest/java/org/elasticsearch/entitlement/qa/EntitlementsDeniedNonModularIT.java
  30. 72 0
      libs/entitlement/qa/src/javaRestTest/java/org/elasticsearch/entitlement/qa/EntitlementsUtil.java
  31. 12 0
      libs/entitlement/qa/test-plugin/build.gradle
  32. 1 3
      libs/entitlement/qa/test-plugin/src/main/java/module-info.java
  33. 1 1
      libs/entitlement/qa/test-plugin/src/main/java/org/elasticsearch/entitlement/qa/test/DummyImplementations.java
  34. 3 4
      libs/entitlement/qa/test-plugin/src/main/java/org/elasticsearch/entitlement/qa/test/EntitlementTestPlugin.java
  35. 44 0
      libs/entitlement/qa/test-plugin/src/main/java/org/elasticsearch/entitlement/qa/test/LoadNativeLibrariesCheckActions.java
  36. 1 1
      libs/entitlement/qa/test-plugin/src/main/java/org/elasticsearch/entitlement/qa/test/NetworkAccessCheckActions.java
  37. 25 25
      libs/entitlement/qa/test-plugin/src/main/java/org/elasticsearch/entitlement/qa/test/RestEntitlementsCheckAction.java
  38. 1 1
      libs/entitlement/qa/test-plugin/src/main/java/org/elasticsearch/entitlement/qa/test/VersionSpecificNetworkChecks.java
  39. 1 1
      libs/entitlement/qa/test-plugin/src/main/java/org/elasticsearch/entitlement/qa/test/WritePropertiesCheckActions.java
  40. 1 1
      libs/entitlement/qa/test-plugin/src/main18/java/org/elasticsearch/entitlement/qa/test/VersionSpecificNetworkChecks.java
  41. 1 1
      libs/entitlement/qa/test-plugin/src/main21/java/org/elasticsearch/entitlement/qa/test/VersionSpecificNetworkChecks.java
  42. 8 13
      libs/entitlement/src/main/java/org/elasticsearch/entitlement/bootstrap/EntitlementBootstrap.java
  43. 6 72
      libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java
  44. 20 0
      libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java
  45. 8 5
      libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/LoadNativeLibrariesEntitlement.java
  46. 7 0
      libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java
  47. 28 4
      libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParser.java
  48. 95 0
      libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserUtils.java
  49. 43 0
      libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserTests.java
  50. 6 6
      modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java
  51. 50 4
      modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/EdgeNGramTokenizerTests.java
  52. 54 17
      modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/WordDelimiterGraphTokenFilterFactoryTests.java
  53. 1 0
      modules/apm/src/main/plugin-metadata/entitlement-policy.yaml
  54. 9 29
      muted-tests.yml
  55. 1 1
      plugins/discovery-ec2/src/javaRestTest/java/org/elasticsearch/discovery/ec2/DiscoveryEc2EnvironmentVariableCredentialsIT.java
  56. 1 1
      plugins/discovery-ec2/src/javaRestTest/java/org/elasticsearch/discovery/ec2/DiscoveryEc2KeystoreCredentialsIT.java
  57. 1 1
      plugins/discovery-ec2/src/javaRestTest/java/org/elasticsearch/discovery/ec2/DiscoveryEc2KeystoreSessionCredentialsIT.java
  58. 1 1
      plugins/discovery-ec2/src/javaRestTest/java/org/elasticsearch/discovery/ec2/DiscoveryEc2SystemPropertyCredentialsIT.java
  59. 1 1
      rest-api-spec/src/main/resources/rest-api-spec/api/indices.cancel_migrate_reindex.json
  60. 1 1
      rest-api-spec/src/main/resources/rest-api-spec/api/indices.create_from.json
  61. 1 1
      rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_migrate_reindex_status.json
  62. 1 1
      rest-api-spec/src/main/resources/rest-api-spec/api/indices.migrate_reindex.json
  63. 106 1
      server/src/internalClusterTest/java/org/elasticsearch/indices/cluster/ResolveClusterIT.java
  64. 1 0
      server/src/main/java/org/elasticsearch/TransportVersions.java
  65. 52 10
      server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveClusterActionRequest.java
  66. 10 10
      server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveClusterInfo.java
  67. 113 26
      server/src/main/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveClusterAction.java
  68. 9 7
      server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java
  69. 13 3
      server/src/main/java/org/elasticsearch/common/settings/Setting.java
  70. 0 9
      server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java
  71. 42 28
      server/src/main/java/org/elasticsearch/plugins/PluginsLoader.java
  72. 34 3
      server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResolveClusterAction.java
  73. 26 3
      server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java
  74. 17 20
      server/src/test/java/org/elasticsearch/action/admin/indices/resolve/ResolveClusterActionRequestTests.java
  75. 2 1
      server/src/test/java/org/elasticsearch/action/admin/indices/resolve/ResolveClusterActionResponseTests.java
  76. 2 6
      server/src/test/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveClusterActionTests.java
  77. 8 0
      server/src/test/java/org/elasticsearch/common/settings/SettingTests.java
  78. 6 1
      server/src/test/java/org/elasticsearch/plugins/PluginsLoaderTests.java
  79. 5 1
      server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java
  80. 14 8
      test/external-modules/esql-heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/heap_attack/HeapAttackIT.java
  81. 2 2
      test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java
  82. 5 5
      test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/MappingGenerator.java
  83. 5 0
      x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/LocalCircuitBreaker.java
  84. 40 9
      x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/lookup/QueryList.java
  85. 197 125
      x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/lookup/EnrichQuerySourceOperatorTests.java
  86. 1 2
      x-pack/plugin/esql/compute/test/src/main/java/org/elasticsearch/compute/test/OperatorTestCase.java
  87. 149 99
      x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec
  88. 141 522
      x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java
  89. 7 10
      x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersUsageTelemetryIT.java
  90. 2 1
      x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/LookupFromIndexIT.java
  91. 5 0
      x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java
  92. 6 6
      x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/AbstractLookupService.java
  93. 2 2
      x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java
  94. 5 2
      x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/LookupFromIndexOperator.java
  95. 3 3
      x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/LookupFromIndexService.java
  96. 1 1
      x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java
  97. 1 1
      x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java
  98. 96 89
      x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ClusterComputeHandler.java
  99. 15 225
      x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeListener.java
  100. 128 54
      x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java

+ 2 - 1
benchmarks/src/main/java/org/elasticsearch/benchmark/script/ScriptScoreBenchmark.java

@@ -60,6 +60,7 @@ import java.io.IOException;
 import java.nio.file.Path;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 import java.util.concurrent.TimeUnit;
 
 /**
@@ -77,7 +78,7 @@ public class ScriptScoreBenchmark {
     private final PluginsService pluginsService = new PluginsService(
         Settings.EMPTY,
         null,
-        PluginsLoader.createPluginsLoader(null, Path.of(System.getProperty("plugins.dir")))
+        PluginsLoader.createPluginsLoader(Set.of(), PluginsLoader.loadPluginsBundles(Path.of(System.getProperty("plugins.dir"))))
     );
     private final ScriptModule scriptModule = new ScriptModule(Settings.EMPTY, pluginsService.filterPlugins(ScriptPlugin.class).toList());
 

+ 6 - 0
docs/changelog/119898.yaml

@@ -0,0 +1,6 @@
+pr: 119898
+summary: Resolve/cluster allows querying for cluster info only (no index expression
+  required)
+area: CCS
+type: enhancement
+issues: []

+ 5 - 0
docs/changelog/120334.yaml

@@ -0,0 +1,5 @@
+pr: 120334
+summary: Introduce `IndexSettingDeprecatedInV8AndRemovedInV9` Setting property
+area: Infra/Settings
+type: enhancement
+issues: []

+ 5 - 0
docs/changelog/120547.yaml

@@ -0,0 +1,5 @@
+pr: 120547
+summary: Consistent mapping for OTel log and event bodies
+area: Data streams
+type: enhancement
+issues: []

+ 5 - 0
docs/changelog/120590.yaml

@@ -0,0 +1,5 @@
+pr: 120590
+summary: Map `scope.name` as a dimension
+area: Data streams
+type: bug
+issues: []

+ 5 - 0
docs/changelog/120643.yaml

@@ -0,0 +1,5 @@
+pr: 120643
+summary: Remove index blocks by default in `create_from`
+area: Indices APIs
+type: enhancement
+issues: []

+ 5 - 0
docs/changelog/120662.yaml

@@ -0,0 +1,5 @@
+pr: 120662
+summary: Fix broken yaml test `30_create_from`
+area: Indices APIs
+type: bug
+issues: []

+ 29 - 8
docs/reference/indices/resolve-cluster.asciidoc

@@ -11,7 +11,9 @@ For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[In
 --
 
 Resolves the specified index expressions to return information about
-each cluster, including the local cluster, if included.
+each cluster, including the local "querying" cluster, if included. If no index expression
+is provided, this endpoint will return information about all the remote
+clusters that are configured on the querying cluster.
 
 This endpoint is useful before doing a <<modules-cross-cluster-search,{ccs}>> in
 order to determine which remote clusters should be included in a search.
@@ -20,14 +22,13 @@ You use the same index expression with this endpoint as you would for cross-clus
 search. Index and <<exclude-problematic-clusters,cluster exclusions>> are also supported
 with this endpoint.
 
-For each cluster in the index expression, information is returned about:
+For each cluster in scope, information is returned about:
 
-1. whether the querying ("local") cluster is currently connected to each remote cluster
-   in the index expression scope
+1. whether the querying ("local") cluster is currently connected to it
 2. whether each remote cluster is configured with `skip_unavailable` as `true` or `false`
 3. whether there are any indices, aliases or data streams on that cluster that match
-   the index expression
-4. whether the search is likely to have errors returned when you do the {ccs} (including any
+   the index expression (if one provided)
+4. whether the search is likely to have errors returned when you do a {ccs} (including any
    authorization errors if your user does not have permission to query a remote cluster or
    the indices on that cluster)
 5. (in some cases) cluster version information, including the Elasticsearch server version
@@ -41,6 +42,11 @@ Once the proper security permissions are obtained, then you can rely on the `con
 in the response to determine whether the remote cluster is available and ready for querying.
 ====
 
+NOTE: When querying older clusters that do not support the _resolve/cluster endpoint
+without an index expression, the local cluster will send the index expression `dummy*`
+to those remote clusters, so if an errors occur, you may see a reference to that index
+expression even though you didn't request it. If it causes a problem, you can instead
+include an index expression like `*:*` to this endpoint to bypass the issue.
 
 ////
 [source,console]
@@ -71,14 +77,22 @@ PUT _cluster/settings
 // TEST[s/35.238.149.\d+:930\d+/\${transport_host}/]
 ////
 
+[source,console]
+----
+GET /_resolve/cluster
+----
+// TEST[continued]
+
+Returns information about all remote clusters configured on the local cluster.
+
 [source,console]
 ----
 GET /_resolve/cluster/my-index-*,cluster*:my-index-*
 ----
 // TEST[continued]
 
-This will return information about the local cluster and all remotely configured
-clusters that start with the alias `cluster*`. Each cluster will return information
+Returns information about the local cluster and all remote clusters that
+start with the alias `cluster*`. Each cluster will return information
 about whether it has any indices, aliases or data streams that match `my-index-*`.
 
 [[resolve-cluster-api-request]]
@@ -126,6 +140,13 @@ ignored when frozen. Defaults to `false`.
 +
 deprecated:[7.16.0]
 
+[TIP]
+====
+The index options above are only allowed when specifying an index expression.
+You will get an error if you specify index options to the _resolve/cluster API
+that takes no index expression.
+====
+
 
 [discrete]
 [[usecases-for-resolve-cluster]]

+ 1 - 1
docs/reference/rest-api/common-parms.asciidoc

@@ -875,7 +875,7 @@ end::search-q[]
 
 tag::query[]
 `query`::
-(Optional, <<query-dsl,query object>>) Defines the search definition using the
+(Optional, <<query-dsl,query object>>) Defines the search query using
 <<query-dsl,Query DSL>>.
 end::query[]
 

+ 10 - 9
docs/reference/search/count.asciidoc

@@ -18,10 +18,6 @@ GET /my-index-000001/_count?q=user:kimchy
 --------------------------------------------------
 // TEST[setup:my_index]
 
-NOTE: The query being sent in the body must be nested in a `query` key, same as
-the <<search-search,search API>> works.
-
-
 [[search-count-api-request]]
 ==== {api-request-title}
 
@@ -39,9 +35,12 @@ or alias.
 ==== {api-description-title}
 
 The count API allows you to execute a query and get the number of matches for
-that query. The query can either
-be provided using a simple query string as a parameter, or using the
-<<query-dsl,Query DSL>> defined within the request body.
+that query. You can provide the query by either:
+
+* Including the <<search-count-api-query-params,`q` query string parameter>> 
+* Defining <<query-dsl,Query DSL>> within the <<search-count-request-body,request body>> 
+
+Using both returns an error.
 
 The count API supports <<api-multi-index,multi-target syntax>>. You can run a single
 count API search across multiple data streams and indices.
@@ -92,6 +91,8 @@ include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=lenient]
 include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=preference]
 
 include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=search-q]
++
+NOTE: If the `q` parameter is specified, the count API does not accept a <<search-count-request-body,request body>>.
 
 include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=routing]
 
@@ -140,5 +141,5 @@ Both examples above do the same: count the number of documents in
 }
 --------------------------------------------------
 
-The query is optional, and when not provided, it will use `match_all` to
-count all the docs.
+The query is optional. When no query is provided, the API uses <<query-dsl-match-all-query,`match_all`>> to
+count all the documents.

+ 1 - 1
docs/reference/search/search-your-data/search-across-clusters.asciidoc

@@ -109,7 +109,7 @@ PUT _cluster/settings
 // end::ccs-remote-cluster-setup[]
 
 <1> Since `skip_unavailable` was not set on `cluster_three`, it uses
-the default of `false`. See the <<skip-unavailable-clusters>>
+the default of `true`. See the <<skip-unavailable-clusters>>
 section for details.
 
 

+ 12 - 0
libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java

@@ -398,4 +398,16 @@ public interface EntitlementChecker {
     void check$sun_nio_ch_DatagramChannelImpl$send(Class<?> callerClass, DatagramChannel that, ByteBuffer src, SocketAddress target);
 
     void check$sun_nio_ch_DatagramChannelImpl$receive(Class<?> callerClass, DatagramChannel that, ByteBuffer dst);
+
+    ////////////////////
+    //
+    // Load native libraries
+    //
+    void check$java_lang_Runtime$load(Class<?> callerClass, Runtime that, String filename);
+
+    void check$java_lang_Runtime$loadLibrary(Class<?> callerClass, Runtime that, String libname);
+
+    void check$java_lang_System$$load(Class<?> callerClass, String filename);
+
+    void check$java_lang_System$$loadLibrary(Class<?> callerClass, String libname);
 }

+ 2 - 5
libs/entitlement/qa/build.gradle

@@ -12,9 +12,6 @@ apply plugin: 'elasticsearch.internal-java-rest-test'
 apply plugin: 'elasticsearch.internal-test-artifact'
 
 dependencies {
-  javaRestTestImplementation project(':libs:entitlement:qa:common')
-  clusterModules project(':libs:entitlement:qa:entitlement-allowed')
-  clusterModules project(':libs:entitlement:qa:entitlement-allowed-nonmodular')
-  clusterPlugins project(':libs:entitlement:qa:entitlement-denied')
-  clusterPlugins project(':libs:entitlement:qa:entitlement-denied-nonmodular')
+  javaRestTestImplementation project(':libs:entitlement:qa:test-plugin')
+  clusterModules project(':libs:entitlement:qa:test-plugin')
 }

+ 0 - 24
libs/entitlement/qa/entitlement-allowed-nonmodular/build.gradle

@@ -1,24 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the "Elastic License
- * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
- * Public License v 1"; you may not use this file except in compliance with, at
- * your election, the "Elastic License 2.0", the "GNU Affero General Public
- * License v3.0 only", or the "Server Side Public License, v 1".
- */
-
-apply plugin: 'elasticsearch.base-internal-es-plugin'
-
-esplugin {
-  name = 'entitlement-allowed-nonmodular'
-  description = 'A non-modular test module that invokes entitlement checks that are supposed to be granted'
-  classname = 'org.elasticsearch.entitlement.qa.nonmodular.EntitlementAllowedNonModularPlugin'
-}
-
-dependencies {
-  implementation project(':libs:entitlement:qa:common')
-}
-
-tasks.named("javadoc").configure {
-  enabled = false
-}

+ 0 - 44
libs/entitlement/qa/entitlement-allowed-nonmodular/src/main/java/org/elasticsearch/entitlement/qa/nonmodular/EntitlementAllowedNonModularPlugin.java

@@ -1,44 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the "Elastic License
- * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
- * Public License v 1"; you may not use this file except in compliance with, at
- * your election, the "Elastic License 2.0", the "GNU Affero General Public
- * License v3.0 only", or the "Server Side Public License, v 1".
- */
-package org.elasticsearch.entitlement.qa.nonmodular;
-
-import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
-import org.elasticsearch.cluster.node.DiscoveryNodes;
-import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
-import org.elasticsearch.common.settings.ClusterSettings;
-import org.elasticsearch.common.settings.IndexScopedSettings;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.settings.SettingsFilter;
-import org.elasticsearch.entitlement.qa.common.RestEntitlementsCheckAction;
-import org.elasticsearch.features.NodeFeature;
-import org.elasticsearch.plugins.ActionPlugin;
-import org.elasticsearch.plugins.Plugin;
-import org.elasticsearch.rest.RestController;
-import org.elasticsearch.rest.RestHandler;
-
-import java.util.List;
-import java.util.function.Predicate;
-import java.util.function.Supplier;
-
-public class EntitlementAllowedNonModularPlugin extends Plugin implements ActionPlugin {
-    @Override
-    public List<RestHandler> getRestHandlers(
-        final Settings settings,
-        NamedWriteableRegistry namedWriteableRegistry,
-        final RestController restController,
-        final ClusterSettings clusterSettings,
-        final IndexScopedSettings indexScopedSettings,
-        final SettingsFilter settingsFilter,
-        final IndexNameExpressionResolver indexNameExpressionResolver,
-        final Supplier<DiscoveryNodes> nodesInCluster,
-        Predicate<NodeFeature> clusterSupportsFeature
-    ) {
-        return List.of(new RestEntitlementsCheckAction("allowed_nonmodular"));
-    }
-}

+ 0 - 9
libs/entitlement/qa/entitlement-allowed-nonmodular/src/main/plugin-metadata/entitlement-policy.yaml

@@ -1,9 +0,0 @@
-ALL-UNNAMED:
-  - create_class_loader
-  - set_https_connection_properties
-  - inbound_network
-  - outbound_network
-  - write_system_properties:
-      properties:
-        - es.entitlements.checkSetSystemProperty
-        - es.entitlements.checkClearSystemProperty

+ 0 - 25
libs/entitlement/qa/entitlement-allowed/build.gradle

@@ -1,25 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the "Elastic License
- * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
- * Public License v 1"; you may not use this file except in compliance with, at
- * your election, the "Elastic License 2.0", the "GNU Affero General Public
- * License v3.0 only", or the "Server Side Public License, v 1".
- */
-
-apply plugin: 'elasticsearch.base-internal-es-plugin'
-
-esplugin {
-  name = 'entitlement-allowed'
-  description = 'A test module that invokes entitlement checks that are supposed to be granted'
-  classname = 'org.elasticsearch.entitlement.qa.EntitlementAllowedPlugin'
-}
-
-dependencies {
-  implementation project(':libs:entitlement:qa:common')
-}
-
-tasks.named("javadoc").configure {
-  enabled = false
-}
-

+ 0 - 44
libs/entitlement/qa/entitlement-allowed/src/main/java/org/elasticsearch/entitlement/qa/EntitlementAllowedPlugin.java

@@ -1,44 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the "Elastic License
- * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
- * Public License v 1"; you may not use this file except in compliance with, at
- * your election, the "Elastic License 2.0", the "GNU Affero General Public
- * License v3.0 only", or the "Server Side Public License, v 1".
- */
-package org.elasticsearch.entitlement.qa;
-
-import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
-import org.elasticsearch.cluster.node.DiscoveryNodes;
-import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
-import org.elasticsearch.common.settings.ClusterSettings;
-import org.elasticsearch.common.settings.IndexScopedSettings;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.settings.SettingsFilter;
-import org.elasticsearch.entitlement.qa.common.RestEntitlementsCheckAction;
-import org.elasticsearch.features.NodeFeature;
-import org.elasticsearch.plugins.ActionPlugin;
-import org.elasticsearch.plugins.Plugin;
-import org.elasticsearch.rest.RestController;
-import org.elasticsearch.rest.RestHandler;
-
-import java.util.List;
-import java.util.function.Predicate;
-import java.util.function.Supplier;
-
-public class EntitlementAllowedPlugin extends Plugin implements ActionPlugin {
-    @Override
-    public List<RestHandler> getRestHandlers(
-        final Settings settings,
-        NamedWriteableRegistry namedWriteableRegistry,
-        final RestController restController,
-        final ClusterSettings clusterSettings,
-        final IndexScopedSettings indexScopedSettings,
-        final SettingsFilter settingsFilter,
-        final IndexNameExpressionResolver indexNameExpressionResolver,
-        final Supplier<DiscoveryNodes> nodesInCluster,
-        Predicate<NodeFeature> clusterSupportsFeature
-    ) {
-        return List.of(new RestEntitlementsCheckAction("allowed"));
-    }
-}

+ 0 - 9
libs/entitlement/qa/entitlement-allowed/src/main/plugin-metadata/entitlement-policy.yaml

@@ -1,9 +0,0 @@
-org.elasticsearch.entitlement.qa.common:
-  - create_class_loader
-  - set_https_connection_properties
-  - inbound_network
-  - outbound_network
-  - write_system_properties:
-      properties:
-        - es.entitlements.checkSetSystemProperty
-        - es.entitlements.checkClearSystemProperty

+ 0 - 24
libs/entitlement/qa/entitlement-denied-nonmodular/build.gradle

@@ -1,24 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the "Elastic License
- * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
- * Public License v 1"; you may not use this file except in compliance with, at
- * your election, the "Elastic License 2.0", the "GNU Affero General Public
- * License v3.0 only", or the "Server Side Public License, v 1".
- */
-
-apply plugin: 'elasticsearch.base-internal-es-plugin'
-
-esplugin {
-  name = 'entitlement-denied-nonmodular'
-  description = 'A non-modular test module that invokes non-granted entitlement and triggers exceptions'
-  classname = 'org.elasticsearch.entitlement.qa.nonmodular.EntitlementDeniedNonModularPlugin'
-}
-
-dependencies {
-  implementation project(':libs:entitlement:qa:common')
-}
-
-tasks.named("javadoc").configure {
-  enabled = false
-}

+ 0 - 44
libs/entitlement/qa/entitlement-denied-nonmodular/src/main/java/org/elasticsearch/entitlement/qa/nonmodular/EntitlementDeniedNonModularPlugin.java

@@ -1,44 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the "Elastic License
- * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
- * Public License v 1"; you may not use this file except in compliance with, at
- * your election, the "Elastic License 2.0", the "GNU Affero General Public
- * License v3.0 only", or the "Server Side Public License, v 1".
- */
-package org.elasticsearch.entitlement.qa.nonmodular;
-
-import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
-import org.elasticsearch.cluster.node.DiscoveryNodes;
-import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
-import org.elasticsearch.common.settings.ClusterSettings;
-import org.elasticsearch.common.settings.IndexScopedSettings;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.settings.SettingsFilter;
-import org.elasticsearch.entitlement.qa.common.RestEntitlementsCheckAction;
-import org.elasticsearch.features.NodeFeature;
-import org.elasticsearch.plugins.ActionPlugin;
-import org.elasticsearch.plugins.Plugin;
-import org.elasticsearch.rest.RestController;
-import org.elasticsearch.rest.RestHandler;
-
-import java.util.List;
-import java.util.function.Predicate;
-import java.util.function.Supplier;
-
-public class EntitlementDeniedNonModularPlugin extends Plugin implements ActionPlugin {
-    @Override
-    public List<RestHandler> getRestHandlers(
-        final Settings settings,
-        NamedWriteableRegistry namedWriteableRegistry,
-        final RestController restController,
-        final ClusterSettings clusterSettings,
-        final IndexScopedSettings indexScopedSettings,
-        final SettingsFilter settingsFilter,
-        final IndexNameExpressionResolver indexNameExpressionResolver,
-        final Supplier<DiscoveryNodes> nodesInCluster,
-        Predicate<NodeFeature> clusterSupportsFeature
-    ) {
-        return List.of(new RestEntitlementsCheckAction("denied_nonmodular"));
-    }
-}

+ 0 - 5
libs/entitlement/qa/entitlement-denied-nonmodular/src/main/plugin-metadata/entitlement-policy.yaml

@@ -1,5 +0,0 @@
-ALL-UNNAMED:
-  - write_system_properties:
-      properties:
-        # entitlement itself not sufficient, also no wildcard support
-        - "*"

+ 0 - 25
libs/entitlement/qa/entitlement-denied/build.gradle

@@ -1,25 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the "Elastic License
- * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
- * Public License v 1"; you may not use this file except in compliance with, at
- * your election, the "Elastic License 2.0", the "GNU Affero General Public
- * License v3.0 only", or the "Server Side Public License, v 1".
- */
-
-apply plugin: 'elasticsearch.base-internal-es-plugin'
-
-esplugin {
-  name = 'entitlement-denied'
-  description = 'A test module that invokes non-granted entitlement and triggers exceptions'
-  classname = 'org.elasticsearch.entitlement.qa.EntitlementDeniedPlugin'
-}
-
-dependencies {
-  implementation project(':libs:entitlement:qa:common')
-}
-
-tasks.named("javadoc").configure {
-  enabled = false
-}
-

+ 0 - 6
libs/entitlement/qa/entitlement-denied/src/main/java/module-info.java

@@ -1,6 +0,0 @@
-module org.elasticsearch.entitlement.qa.denied {
-    requires org.elasticsearch.server;
-    requires org.elasticsearch.base;
-    requires org.apache.logging.log4j;
-    requires org.elasticsearch.entitlement.qa.common;
-}

+ 0 - 5
libs/entitlement/qa/entitlement-denied/src/main/plugin-metadata/entitlement-policy.yaml

@@ -1,5 +0,0 @@
-org.elasticsearch.entitlement.qa.common:
-  - write_system_properties:
-      properties:
-        # entitlement itself not sufficient, also no wildcard support
-        - "*"

+ 7 - 14
libs/entitlement/qa/src/javaRestTest/java/org/elasticsearch/entitlement/qa/EntitlementsAllowedIT.java

@@ -14,41 +14,34 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
 
 import org.elasticsearch.client.Request;
 import org.elasticsearch.client.Response;
-import org.elasticsearch.entitlement.qa.common.RestEntitlementsCheckAction;
+import org.elasticsearch.entitlement.qa.test.RestEntitlementsCheckAction;
 import org.elasticsearch.test.cluster.ElasticsearchCluster;
 import org.elasticsearch.test.rest.ESRestTestCase;
 import org.junit.ClassRule;
 
 import java.io.IOException;
-import java.util.stream.Stream;
 
+import static org.elasticsearch.entitlement.qa.EntitlementsUtil.ALLOWED_ENTITLEMENTS;
 import static org.hamcrest.Matchers.equalTo;
 
 public class EntitlementsAllowedIT extends ESRestTestCase {
 
     @ClassRule
     public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
-        .module("entitlement-allowed")
-        .module("entitlement-allowed-nonmodular")
+        .module("test-plugin", spec -> EntitlementsUtil.setupEntitlements(spec, true, ALLOWED_ENTITLEMENTS))
         .systemProperty("es.entitlements.enabled", "true")
         .setting("xpack.security.enabled", "false")
         .build();
 
-    private final String pathPrefix;
     private final String actionName;
 
-    public EntitlementsAllowedIT(@Name("pathPrefix") String pathPrefix, @Name("actionName") String actionName) {
-        this.pathPrefix = pathPrefix;
+    public EntitlementsAllowedIT(@Name("actionName") String actionName) {
         this.actionName = actionName;
     }
 
     @ParametersFactory
     public static Iterable<Object[]> data() {
-        return Stream.of("allowed", "allowed_nonmodular")
-            .flatMap(
-                path -> RestEntitlementsCheckAction.getCheckActionsAllowedInPlugins().stream().map(action -> new Object[] { path, action })
-            )
-            .toList();
+        return RestEntitlementsCheckAction.getCheckActionsAllowedInPlugins().stream().map(action -> new Object[] { action }).toList();
     }
 
     @Override
@@ -57,8 +50,8 @@ public class EntitlementsAllowedIT extends ESRestTestCase {
     }
 
     public void testCheckActionWithPolicyPass() throws IOException {
-        logger.info("Executing Entitlement test [{}] for [{}]", pathPrefix, actionName);
-        var request = new Request("GET", "/_entitlement/" + pathPrefix + "/_check");
+        logger.info("Executing Entitlement test for [{}]", actionName);
+        var request = new Request("GET", "/_entitlement_check");
         request.addParameter("action", actionName);
         Response result = client().performRequest(request);
         assertThat(result.getStatusLine().getStatusCode(), equalTo(200));

+ 59 - 0
libs/entitlement/qa/src/javaRestTest/java/org/elasticsearch/entitlement/qa/EntitlementsAllowedNonModularIT.java

@@ -0,0 +1,59 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the "Elastic License
+ * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
+ * Public License v 1"; you may not use this file except in compliance with, at
+ * your election, the "Elastic License 2.0", the "GNU Affero General Public
+ * License v3.0 only", or the "Server Side Public License, v 1".
+ */
+
+package org.elasticsearch.entitlement.qa;
+
+import com.carrotsearch.randomizedtesting.annotations.Name;
+import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
+
+import org.elasticsearch.client.Request;
+import org.elasticsearch.client.Response;
+import org.elasticsearch.entitlement.qa.test.RestEntitlementsCheckAction;
+import org.elasticsearch.test.cluster.ElasticsearchCluster;
+import org.elasticsearch.test.rest.ESRestTestCase;
+import org.junit.ClassRule;
+
+import java.io.IOException;
+
+import static org.elasticsearch.entitlement.qa.EntitlementsUtil.ALLOWED_ENTITLEMENTS;
+import static org.hamcrest.Matchers.equalTo;
+
+public class EntitlementsAllowedNonModularIT extends ESRestTestCase {
+
+    @ClassRule
+    public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
+        .module("test-plugin", spec -> EntitlementsUtil.setupEntitlements(spec, false, ALLOWED_ENTITLEMENTS))
+        .systemProperty("es.entitlements.enabled", "true")
+        .setting("xpack.security.enabled", "false")
+        .build();
+
+    private final String actionName;
+
+    public EntitlementsAllowedNonModularIT(@Name("actionName") String actionName) {
+        this.actionName = actionName;
+    }
+
+    @ParametersFactory
+    public static Iterable<Object[]> data() {
+        return RestEntitlementsCheckAction.getCheckActionsAllowedInPlugins().stream().map(action -> new Object[] { action }).toList();
+    }
+
+    @Override
+    protected String getTestRestCluster() {
+        return cluster.getHttpAddresses();
+    }
+
+    public void testCheckActionWithPolicyPass() throws IOException {
+        logger.info("Executing Entitlement test for [{}]", actionName);
+        var request = new Request("GET", "/_entitlement_check");
+        request.addParameter("action", actionName);
+        Response result = client().performRequest(request);
+        assertThat(result.getStatusLine().getStatusCode(), equalTo(200));
+    }
+}

+ 6 - 12
libs/entitlement/qa/src/javaRestTest/java/org/elasticsearch/entitlement/qa/EntitlementsDeniedIT.java

@@ -13,13 +13,12 @@ import com.carrotsearch.randomizedtesting.annotations.Name;
 import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
 
 import org.elasticsearch.client.Request;
-import org.elasticsearch.entitlement.qa.common.RestEntitlementsCheckAction;
+import org.elasticsearch.entitlement.qa.test.RestEntitlementsCheckAction;
 import org.elasticsearch.test.cluster.ElasticsearchCluster;
 import org.elasticsearch.test.rest.ESRestTestCase;
 import org.junit.ClassRule;
 
 import java.io.IOException;
-import java.util.stream.Stream;
 
 import static org.hamcrest.Matchers.containsString;
 
@@ -27,8 +26,7 @@ public class EntitlementsDeniedIT extends ESRestTestCase {
 
     @ClassRule
     public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
-        .plugin("entitlement-denied")
-        .plugin("entitlement-denied-nonmodular")
+        .module("test-plugin", spec -> EntitlementsUtil.setupEntitlements(spec, true, null))
         .systemProperty("es.entitlements.enabled", "true")
         .setting("xpack.security.enabled", "false")
         // Logs in libs/entitlement/qa/build/test-results/javaRestTest/TEST-org.elasticsearch.entitlement.qa.EntitlementsDeniedIT.xml
@@ -40,25 +38,21 @@ public class EntitlementsDeniedIT extends ESRestTestCase {
         return cluster.getHttpAddresses();
     }
 
-    private final String pathPrefix;
     private final String actionName;
 
-    public EntitlementsDeniedIT(@Name("pathPrefix") String pathPrefix, @Name("actionName") String actionName) {
-        this.pathPrefix = pathPrefix;
+    public EntitlementsDeniedIT(@Name("actionName") String actionName) {
         this.actionName = actionName;
     }
 
     @ParametersFactory
     public static Iterable<Object[]> data() {
-        return Stream.of("denied", "denied_nonmodular")
-            .flatMap(path -> RestEntitlementsCheckAction.getAllCheckActions().stream().map(action -> new Object[] { path, action }))
-            .toList();
+        return RestEntitlementsCheckAction.getAllCheckActions().stream().map(action -> new Object[] { action }).toList();
     }
 
     public void testCheckThrows() {
-        logger.info("Executing Entitlement test [{}] for [{}]", pathPrefix, actionName);
+        logger.info("Executing Entitlement test for [{}]", actionName);
         var exception = expectThrows(IOException.class, () -> {
-            var request = new Request("GET", "/_entitlement/" + pathPrefix + "/_check");
+            var request = new Request("GET", "/_entitlement_check");
             request.addParameter("action", actionName);
             client().performRequest(request);
         });

+ 61 - 0
libs/entitlement/qa/src/javaRestTest/java/org/elasticsearch/entitlement/qa/EntitlementsDeniedNonModularIT.java

@@ -0,0 +1,61 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the "Elastic License
+ * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
+ * Public License v 1"; you may not use this file except in compliance with, at
+ * your election, the "Elastic License 2.0", the "GNU Affero General Public
+ * License v3.0 only", or the "Server Side Public License, v 1".
+ */
+
+package org.elasticsearch.entitlement.qa;
+
+import com.carrotsearch.randomizedtesting.annotations.Name;
+import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
+
+import org.elasticsearch.client.Request;
+import org.elasticsearch.entitlement.qa.test.RestEntitlementsCheckAction;
+import org.elasticsearch.test.cluster.ElasticsearchCluster;
+import org.elasticsearch.test.rest.ESRestTestCase;
+import org.junit.ClassRule;
+
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.containsString;
+
+public class EntitlementsDeniedNonModularIT extends ESRestTestCase {
+
+    @ClassRule
+    public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
+        .module("test-plugin", spec -> EntitlementsUtil.setupEntitlements(spec, false, null))
+        .systemProperty("es.entitlements.enabled", "true")
+        .setting("xpack.security.enabled", "false")
+        // Logs in libs/entitlement/qa/build/test-results/javaRestTest/TEST-org.elasticsearch.entitlement.qa.EntitlementsDeniedIT.xml
+        // .setting("logger.org.elasticsearch.entitlement", "DEBUG")
+        .build();
+
+    @Override
+    protected String getTestRestCluster() {
+        return cluster.getHttpAddresses();
+    }
+
+    private final String actionName;
+
+    public EntitlementsDeniedNonModularIT(@Name("actionName") String actionName) {
+        this.actionName = actionName;
+    }
+
+    @ParametersFactory
+    public static Iterable<Object[]> data() {
+        return RestEntitlementsCheckAction.getAllCheckActions().stream().map(action -> new Object[] { action }).toList();
+    }
+
+    public void testCheckThrows() {
+        logger.info("Executing Entitlement test for [{}]", actionName);
+        var exception = expectThrows(IOException.class, () -> {
+            var request = new Request("GET", "/_entitlement_check");
+            request.addParameter("action", actionName);
+            client().performRequest(request);
+        });
+        assertThat(exception.getMessage(), containsString("not_entitled_exception"));
+    }
+}

+ 72 - 0
libs/entitlement/qa/src/javaRestTest/java/org/elasticsearch/entitlement/qa/EntitlementsUtil.java

@@ -0,0 +1,72 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the "Elastic License
+ * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
+ * Public License v 1"; you may not use this file except in compliance with, at
+ * your election, the "Elastic License 2.0", the "GNU Affero General Public
+ * License v3.0 only", or the "Server Side Public License, v 1".
+ */
+
+package org.elasticsearch.entitlement.qa;
+
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.core.CheckedConsumer;
+import org.elasticsearch.test.cluster.local.PluginInstallSpec;
+import org.elasticsearch.test.cluster.util.resource.Resource;
+import org.elasticsearch.xcontent.XContentBuilder;
+import org.elasticsearch.xcontent.yaml.YamlXContent;
+
+import java.io.IOException;
+import java.io.UncheckedIOException;
+import java.util.List;
+import java.util.Map;
+
+class EntitlementsUtil {
+
+    static final CheckedConsumer<XContentBuilder, IOException> ALLOWED_ENTITLEMENTS = builder -> {
+        builder.value("create_class_loader");
+        builder.value("set_https_connection_properties");
+        builder.value("inbound_network");
+        builder.value("outbound_network");
+        builder.value("load_native_libraries");
+        builder.value(
+            Map.of(
+                "write_system_properties",
+                Map.of("properties", List.of("es.entitlements.checkSetSystemProperty", "es.entitlements.checkClearSystemProperty"))
+            )
+        );
+    };
+
+    static void setupEntitlements(PluginInstallSpec spec, boolean modular, CheckedConsumer<XContentBuilder, IOException> policyBuilder) {
+        String moduleName = modular ? "org.elasticsearch.entitlement.qa.test" : "ALL-UNNAMED";
+        if (policyBuilder != null) {
+            try {
+                try (var builder = YamlXContent.contentBuilder()) {
+                    builder.startObject();
+                    builder.field(moduleName);
+                    builder.startArray();
+                    policyBuilder.accept(builder);
+                    builder.endArray();
+                    builder.endObject();
+
+                    String policy = Strings.toString(builder);
+                    System.out.println("Using entitlement policy:\n" + policy);
+                    spec.withEntitlementsOverride(old -> Resource.fromString(policy));
+                }
+
+            } catch (IOException e) {
+                throw new UncheckedIOException(e);
+            }
+        }
+
+        if (modular == false) {
+            spec.withPropertiesOverride(old -> {
+                String props = old.replace("modulename=org.elasticsearch.entitlement.qa.test", "");
+                System.out.println("Using plugin properties:\n" + props);
+                return Resource.fromString(props);
+            });
+        }
+    }
+
+    private EntitlementsUtil() {}
+}

+ 12 - 0
libs/entitlement/qa/common/build.gradle → libs/entitlement/qa/test-plugin/build.gradle

@@ -9,14 +9,26 @@
 
 import org.elasticsearch.gradle.internal.precommit.CheckForbiddenApisTask
 
+apply plugin: 'elasticsearch.base-internal-es-plugin'
 apply plugin: 'elasticsearch.build'
 apply plugin: 'elasticsearch.mrjar'
 
+esplugin {
+  name = 'test-plugin'
+  description = 'A test plugin that invokes methods checked by entitlements'
+  classname = 'org.elasticsearch.entitlement.qa.test.EntitlementTestPlugin'
+}
+
 dependencies {
   implementation project(':server')
   implementation project(':libs:logging')
 }
 
+tasks.named("javadoc").configure {
+  enabled = false
+}
+
 tasks.withType(CheckForbiddenApisTask).configureEach {
   replaceSignatureFiles 'jdk-signatures'
 }
+

+ 1 - 3
libs/entitlement/qa/common/src/main/java/module-info.java → libs/entitlement/qa/test-plugin/src/main/java/module-info.java

@@ -7,7 +7,7 @@
  * License v3.0 only", or the "Server Side Public License, v 1".
  */
 
-module org.elasticsearch.entitlement.qa.common {
+module org.elasticsearch.entitlement.qa.test {
     requires org.elasticsearch.server;
     requires org.elasticsearch.base;
     requires org.elasticsearch.logging;
@@ -15,6 +15,4 @@ module org.elasticsearch.entitlement.qa.common {
     // Modules we'll attempt to use in order to exercise entitlements
     requires java.logging;
     requires java.net.http;
-
-    exports org.elasticsearch.entitlement.qa.common;
 }

+ 1 - 1
libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/DummyImplementations.java → libs/entitlement/qa/test-plugin/src/main/java/org/elasticsearch/entitlement/qa/test/DummyImplementations.java

@@ -7,7 +7,7 @@
  * License v3.0 only", or the "Server Side Public License, v 1".
  */
 
-package org.elasticsearch.entitlement.qa.common;
+package org.elasticsearch.entitlement.qa.test;
 
 import java.io.IOException;
 import java.io.InputStream;

+ 3 - 4
libs/entitlement/qa/entitlement-denied/src/main/java/org/elasticsearch/entitlement/qa/EntitlementDeniedPlugin.java → libs/entitlement/qa/test-plugin/src/main/java/org/elasticsearch/entitlement/qa/test/EntitlementTestPlugin.java

@@ -6,7 +6,7 @@
  * your election, the "Elastic License 2.0", the "GNU Affero General Public
  * License v3.0 only", or the "Server Side Public License, v 1".
  */
-package org.elasticsearch.entitlement.qa;
+package org.elasticsearch.entitlement.qa.test;
 
 import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
 import org.elasticsearch.cluster.node.DiscoveryNodes;
@@ -15,7 +15,6 @@ import org.elasticsearch.common.settings.ClusterSettings;
 import org.elasticsearch.common.settings.IndexScopedSettings;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.settings.SettingsFilter;
-import org.elasticsearch.entitlement.qa.common.RestEntitlementsCheckAction;
 import org.elasticsearch.features.NodeFeature;
 import org.elasticsearch.plugins.ActionPlugin;
 import org.elasticsearch.plugins.Plugin;
@@ -26,7 +25,7 @@ import java.util.List;
 import java.util.function.Predicate;
 import java.util.function.Supplier;
 
-public class EntitlementDeniedPlugin extends Plugin implements ActionPlugin {
+public class EntitlementTestPlugin extends Plugin implements ActionPlugin {
     @Override
     public List<RestHandler> getRestHandlers(
         final Settings settings,
@@ -39,6 +38,6 @@ public class EntitlementDeniedPlugin extends Plugin implements ActionPlugin {
         final Supplier<DiscoveryNodes> nodesInCluster,
         Predicate<NodeFeature> clusterSupportsFeature
     ) {
-        return List.of(new RestEntitlementsCheckAction("denied"));
+        return List.of(new RestEntitlementsCheckAction());
     }
 }

+ 44 - 0
libs/entitlement/qa/test-plugin/src/main/java/org/elasticsearch/entitlement/qa/test/LoadNativeLibrariesCheckActions.java

@@ -0,0 +1,44 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the "Elastic License
+ * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
+ * Public License v 1"; you may not use this file except in compliance with, at
+ * your election, the "Elastic License 2.0", the "GNU Affero General Public
+ * License v3.0 only", or the "Server Side Public License, v 1".
+ */
+
+package org.elasticsearch.entitlement.qa.test;
+
+class LoadNativeLibrariesCheckActions {
+    static void runtimeLoad() {
+        try {
+            Runtime.getRuntime().load("libSomeLibFile.so");
+        } catch (UnsatisfiedLinkError ignored) {
+            // The library does not exist, so we expect to fail loading it
+        }
+    }
+
+    static void systemLoad() {
+        try {
+            System.load("libSomeLibFile.so");
+        } catch (UnsatisfiedLinkError ignored) {
+            // The library does not exist, so we expect to fail loading it
+        }
+    }
+
+    static void runtimeLoadLibrary() {
+        try {
+            Runtime.getRuntime().loadLibrary("SomeLib");
+        } catch (UnsatisfiedLinkError ignored) {
+            // The library does not exist, so we expect to fail loading it
+        }
+    }
+
+    static void systemLoadLibrary() {
+        try {
+            System.loadLibrary("SomeLib");
+        } catch (UnsatisfiedLinkError ignored) {
+            // The library does not exist, so we expect to fail loading it
+        }
+    }
+}

+ 1 - 1
libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/NetworkAccessCheckActions.java → libs/entitlement/qa/test-plugin/src/main/java/org/elasticsearch/entitlement/qa/test/NetworkAccessCheckActions.java

@@ -7,7 +7,7 @@
  * License v3.0 only", or the "Server Side Public License, v 1".
  */
 
-package org.elasticsearch.entitlement.qa.common;
+package org.elasticsearch.entitlement.qa.test;
 
 import org.elasticsearch.core.SuppressForbidden;
 

+ 25 - 25
libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java → libs/entitlement/qa/test-plugin/src/main/java/org/elasticsearch/entitlement/qa/test/RestEntitlementsCheckAction.java

@@ -7,24 +7,24 @@
  * License v3.0 only", or the "Server Side Public License, v 1".
  */
 
-package org.elasticsearch.entitlement.qa.common;
+package org.elasticsearch.entitlement.qa.test;
 
 import org.elasticsearch.client.internal.node.NodeClient;
 import org.elasticsearch.common.Strings;
 import org.elasticsearch.core.CheckedRunnable;
 import org.elasticsearch.core.SuppressForbidden;
-import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyBreakIteratorProvider;
-import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyCalendarDataProvider;
-import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyCalendarNameProvider;
-import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyCollatorProvider;
-import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyCurrencyNameProvider;
-import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyDateFormatProvider;
-import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyDateFormatSymbolsProvider;
-import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyDecimalFormatSymbolsProvider;
-import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyLocaleNameProvider;
-import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyLocaleServiceProvider;
-import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyNumberFormatProvider;
-import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyTimeZoneNameProvider;
+import org.elasticsearch.entitlement.qa.test.DummyImplementations.DummyBreakIteratorProvider;
+import org.elasticsearch.entitlement.qa.test.DummyImplementations.DummyCalendarDataProvider;
+import org.elasticsearch.entitlement.qa.test.DummyImplementations.DummyCalendarNameProvider;
+import org.elasticsearch.entitlement.qa.test.DummyImplementations.DummyCollatorProvider;
+import org.elasticsearch.entitlement.qa.test.DummyImplementations.DummyCurrencyNameProvider;
+import org.elasticsearch.entitlement.qa.test.DummyImplementations.DummyDateFormatProvider;
+import org.elasticsearch.entitlement.qa.test.DummyImplementations.DummyDateFormatSymbolsProvider;
+import org.elasticsearch.entitlement.qa.test.DummyImplementations.DummyDecimalFormatSymbolsProvider;
+import org.elasticsearch.entitlement.qa.test.DummyImplementations.DummyLocaleNameProvider;
+import org.elasticsearch.entitlement.qa.test.DummyImplementations.DummyLocaleServiceProvider;
+import org.elasticsearch.entitlement.qa.test.DummyImplementations.DummyNumberFormatProvider;
+import org.elasticsearch.entitlement.qa.test.DummyImplementations.DummyTimeZoneNameProvider;
 import org.elasticsearch.logging.LogManager;
 import org.elasticsearch.logging.Logger;
 import org.elasticsearch.rest.BaseRestHandler;
@@ -61,16 +61,15 @@ import javax.net.ssl.HttpsURLConnection;
 import javax.net.ssl.SSLContext;
 
 import static java.util.Map.entry;
-import static org.elasticsearch.entitlement.qa.common.RestEntitlementsCheckAction.CheckAction.alwaysDenied;
-import static org.elasticsearch.entitlement.qa.common.RestEntitlementsCheckAction.CheckAction.deniedToPlugins;
-import static org.elasticsearch.entitlement.qa.common.RestEntitlementsCheckAction.CheckAction.forPlugins;
+import static org.elasticsearch.entitlement.qa.test.RestEntitlementsCheckAction.CheckAction.alwaysDenied;
+import static org.elasticsearch.entitlement.qa.test.RestEntitlementsCheckAction.CheckAction.deniedToPlugins;
+import static org.elasticsearch.entitlement.qa.test.RestEntitlementsCheckAction.CheckAction.forPlugins;
 import static org.elasticsearch.rest.RestRequest.Method.GET;
 
 @SuppressWarnings("unused")
 public class RestEntitlementsCheckAction extends BaseRestHandler {
     private static final Logger logger = LogManager.getLogger(RestEntitlementsCheckAction.class);
     public static final Thread NO_OP_SHUTDOWN_HOOK = new Thread(() -> {}, "Shutdown hook for testing");
-    private final String prefix;
 
     record CheckAction(CheckedRunnable<Exception> action, boolean isAlwaysDeniedToPlugins, Integer fromJavaVersion) {
         /**
@@ -192,7 +191,12 @@ public class RestEntitlementsCheckAction extends BaseRestHandler {
         entry("datagram_channel_bind", forPlugins(NetworkAccessCheckActions::datagramChannelBind)),
         entry("datagram_channel_connect", forPlugins(NetworkAccessCheckActions::datagramChannelConnect)),
         entry("datagram_channel_send", forPlugins(NetworkAccessCheckActions::datagramChannelSend)),
-        entry("datagram_channel_receive", forPlugins(NetworkAccessCheckActions::datagramChannelReceive))
+        entry("datagram_channel_receive", forPlugins(NetworkAccessCheckActions::datagramChannelReceive)),
+
+        entry("runtime_load", forPlugins(LoadNativeLibrariesCheckActions::runtimeLoad)),
+        entry("runtime_load_library", forPlugins(LoadNativeLibrariesCheckActions::runtimeLoadLibrary)),
+        entry("system_load", forPlugins(LoadNativeLibrariesCheckActions::systemLoad)),
+        entry("system_load_library", forPlugins(LoadNativeLibrariesCheckActions::systemLoadLibrary))
     )
         .filter(entry -> entry.getValue().fromJavaVersion() == null || Runtime.version().feature() >= entry.getValue().fromJavaVersion())
         .collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, Map.Entry::getValue));
@@ -437,10 +441,6 @@ public class RestEntitlementsCheckAction extends BaseRestHandler {
         }
     }
 
-    public RestEntitlementsCheckAction(String prefix) {
-        this.prefix = prefix;
-    }
-
     public static Set<String> getCheckActionsAllowedInPlugins() {
         return checkActions.entrySet()
             .stream()
@@ -455,17 +455,17 @@ public class RestEntitlementsCheckAction extends BaseRestHandler {
 
     @Override
     public List<Route> routes() {
-        return List.of(new Route(GET, "/_entitlement/" + prefix + "/_check"));
+        return List.of(new Route(GET, "/_entitlement_check"));
     }
 
     @Override
     public String getName() {
-        return "check_" + prefix + "_action";
+        return "check_entitlement_action";
     }
 
     @Override
     protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) {
-        logger.info("RestEntitlementsCheckAction rest handler [{}]", request.path());
+        logger.debug("RestEntitlementsCheckAction rest handler [{}]", request.path());
         var actionName = request.param("action");
         if (Strings.isNullOrEmpty(actionName)) {
             throw new IllegalArgumentException("Missing action parameter");

+ 1 - 1
libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/VersionSpecificNetworkChecks.java → libs/entitlement/qa/test-plugin/src/main/java/org/elasticsearch/entitlement/qa/test/VersionSpecificNetworkChecks.java

@@ -7,7 +7,7 @@
  * License v3.0 only", or the "Server Side Public License, v 1".
  */
 
-package org.elasticsearch.entitlement.qa.common;
+package org.elasticsearch.entitlement.qa.test;
 
 import java.io.IOException;
 import java.net.URI;

+ 1 - 1
libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/WritePropertiesCheckActions.java → libs/entitlement/qa/test-plugin/src/main/java/org/elasticsearch/entitlement/qa/test/WritePropertiesCheckActions.java

@@ -7,7 +7,7 @@
  * License v3.0 only", or the "Server Side Public License, v 1".
  */
 
-package org.elasticsearch.entitlement.qa.common;
+package org.elasticsearch.entitlement.qa.test;
 
 import org.elasticsearch.core.SuppressForbidden;
 

+ 1 - 1
libs/entitlement/qa/common/src/main18/java/org/elasticsearch/entitlement/qa/common/VersionSpecificNetworkChecks.java → libs/entitlement/qa/test-plugin/src/main18/java/org/elasticsearch/entitlement/qa/test/VersionSpecificNetworkChecks.java

@@ -7,7 +7,7 @@
  * License v3.0 only", or the "Server Side Public License, v 1".
  */
 
-package org.elasticsearch.entitlement.qa.common;
+package org.elasticsearch.entitlement.qa.test;
 
 import java.io.IOException;
 import java.net.URI;

+ 1 - 1
libs/entitlement/qa/common/src/main21/java/org/elasticsearch/entitlement/qa/common/VersionSpecificNetworkChecks.java → libs/entitlement/qa/test-plugin/src/main21/java/org/elasticsearch/entitlement/qa/test/VersionSpecificNetworkChecks.java

@@ -7,7 +7,7 @@
  * License v3.0 only", or the "Server Side Public License, v 1".
  */
 
-package org.elasticsearch.entitlement.qa.common;
+package org.elasticsearch.entitlement.qa.test;
 
 import java.io.IOException;
 import java.net.URI;

+ 8 - 13
libs/entitlement/src/main/java/org/elasticsearch/entitlement/bootstrap/EntitlementBootstrap.java

@@ -17,32 +17,27 @@ import com.sun.tools.attach.VirtualMachine;
 import org.elasticsearch.core.SuppressForbidden;
 import org.elasticsearch.entitlement.initialization.EntitlementInitialization;
 import org.elasticsearch.entitlement.runtime.api.NotEntitledException;
+import org.elasticsearch.entitlement.runtime.policy.Policy;
 import org.elasticsearch.logging.LogManager;
 import org.elasticsearch.logging.Logger;
 
 import java.io.IOException;
 import java.nio.file.Files;
 import java.nio.file.Path;
-import java.util.Collection;
+import java.util.Map;
 import java.util.function.Function;
 
 import static java.util.Objects.requireNonNull;
 
 public class EntitlementBootstrap {
 
-    public record BootstrapArgs(Collection<PluginData> pluginData, Function<Class<?>, String> pluginResolver) {
+    public record BootstrapArgs(Map<String, Policy> pluginPolicies, Function<Class<?>, String> pluginResolver) {
         public BootstrapArgs {
-            requireNonNull(pluginData);
+            requireNonNull(pluginPolicies);
             requireNonNull(pluginResolver);
         }
     }
 
-    public record PluginData(Path pluginPath, boolean isModular, boolean isExternalPlugin) {
-        public PluginData {
-            requireNonNull(pluginPath);
-        }
-    }
-
     private static BootstrapArgs bootstrapArgs;
 
     public static BootstrapArgs bootstrapArgs() {
@@ -52,16 +47,16 @@ public class EntitlementBootstrap {
     /**
      * Activates entitlement checking. Once this method returns, calls to methods protected by Entitlements from classes without a valid
      * policy will throw {@link org.elasticsearch.entitlement.runtime.api.NotEntitledException}.
-     * @param pluginData a collection of (plugin path, boolean, boolean), that holds the paths of all the installed Elasticsearch modules
-     *                   and plugins, whether they are Java modular or not, and whether they are Elasticsearch modules or external plugins.
+     *
+     * @param pluginPolicies a map holding policies for plugins (and modules), by plugin (or module) name.
      * @param pluginResolver a functor to map a Java Class to the plugin it belongs to (the plugin name).
      */
-    public static void bootstrap(Collection<PluginData> pluginData, Function<Class<?>, String> pluginResolver) {
+    public static void bootstrap(Map<String, Policy> pluginPolicies, Function<Class<?>, String> pluginResolver) {
         logger.debug("Loading entitlement agent");
         if (EntitlementBootstrap.bootstrapArgs != null) {
             throw new IllegalStateException("plugin data is already set");
         }
-        EntitlementBootstrap.bootstrapArgs = new BootstrapArgs(pluginData, pluginResolver);
+        EntitlementBootstrap.bootstrapArgs = new BootstrapArgs(pluginPolicies, pluginResolver);
         exportInitializationToAgent();
         loadAgent(findAgentJar());
         selfTest();

+ 6 - 72
libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java

@@ -9,7 +9,6 @@
 
 package org.elasticsearch.entitlement.initialization;
 
-import org.elasticsearch.core.Strings;
 import org.elasticsearch.core.internal.provider.ProviderLocator;
 import org.elasticsearch.entitlement.bootstrap.EntitlementBootstrap;
 import org.elasticsearch.entitlement.bridge.EntitlementChecker;
@@ -23,31 +22,21 @@ import org.elasticsearch.entitlement.runtime.policy.CreateClassLoaderEntitlement
 import org.elasticsearch.entitlement.runtime.policy.Entitlement;
 import org.elasticsearch.entitlement.runtime.policy.ExitVMEntitlement;
 import org.elasticsearch.entitlement.runtime.policy.InboundNetworkEntitlement;
+import org.elasticsearch.entitlement.runtime.policy.LoadNativeLibrariesEntitlement;
 import org.elasticsearch.entitlement.runtime.policy.OutboundNetworkEntitlement;
 import org.elasticsearch.entitlement.runtime.policy.Policy;
 import org.elasticsearch.entitlement.runtime.policy.PolicyManager;
-import org.elasticsearch.entitlement.runtime.policy.PolicyParser;
 import org.elasticsearch.entitlement.runtime.policy.Scope;
 
-import java.io.IOException;
 import java.lang.instrument.Instrumentation;
-import java.lang.module.ModuleFinder;
-import java.lang.module.ModuleReference;
 import java.lang.reflect.Constructor;
 import java.lang.reflect.InvocationTargetException;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.StandardOpenOption;
 import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.stream.Collectors;
 
-import static org.elasticsearch.entitlement.runtime.policy.PolicyManager.ALL_UNNAMED;
-
 /**
  * Called by the agent during {@code agentmain} to configure the entitlement system,
  * instantiate and configure an {@link EntitlementChecker},
@@ -57,7 +46,6 @@ import static org.elasticsearch.entitlement.runtime.policy.PolicyManager.ALL_UNN
  */
 public class EntitlementInitialization {
 
-    private static final String POLICY_FILE_NAME = "entitlement-policy.yaml";
     private static final Module ENTITLEMENTS_MODULE = PolicyManager.class.getModule();
 
     private static ElasticsearchEntitlementChecker manager;
@@ -90,8 +78,8 @@ public class EntitlementInitialization {
         return retransform.toArray(new Class<?>[0]);
     }
 
-    private static PolicyManager createPolicyManager() throws IOException {
-        Map<String, Policy> pluginPolicies = createPluginPolicies(EntitlementBootstrap.bootstrapArgs().pluginData());
+    private static PolicyManager createPolicyManager() {
+        Map<String, Policy> pluginPolicies = EntitlementBootstrap.bootstrapArgs().pluginPolicies();
 
         // TODO(ES-10031): Decide what goes in the elasticsearch default policy and extend it
         var serverPolicy = new Policy(
@@ -105,7 +93,8 @@ public class EntitlementInitialization {
                         new ExitVMEntitlement(),
                         new CreateClassLoaderEntitlement(),
                         new InboundNetworkEntitlement(),
-                        new OutboundNetworkEntitlement()
+                        new OutboundNetworkEntitlement(),
+                        new LoadNativeLibrariesEntitlement()
                     )
                 ),
                 new Scope("org.apache.httpcomponents.httpclient", List.of(new OutboundNetworkEntitlement())),
@@ -119,62 +108,7 @@ public class EntitlementInitialization {
         return new PolicyManager(serverPolicy, agentEntitlements, pluginPolicies, resolver, ENTITLEMENTS_MODULE);
     }
 
-    private static Map<String, Policy> createPluginPolicies(Collection<EntitlementBootstrap.PluginData> pluginData) throws IOException {
-        Map<String, Policy> pluginPolicies = new HashMap<>(pluginData.size());
-        for (var entry : pluginData) {
-            Path pluginRoot = entry.pluginPath();
-            String pluginName = pluginRoot.getFileName().toString();
-
-            final Policy policy = loadPluginPolicy(pluginRoot, entry.isModular(), pluginName, entry.isExternalPlugin());
-
-            pluginPolicies.put(pluginName, policy);
-        }
-        return pluginPolicies;
-    }
-
-    private static Policy loadPluginPolicy(Path pluginRoot, boolean isModular, String pluginName, boolean isExternalPlugin)
-        throws IOException {
-        Path policyFile = pluginRoot.resolve(POLICY_FILE_NAME);
-
-        final Set<String> moduleNames = getModuleNames(pluginRoot, isModular);
-        final Policy policy = parsePolicyIfExists(pluginName, policyFile, isExternalPlugin);
-
-        // TODO: should this check actually be part of the parser?
-        for (Scope scope : policy.scopes()) {
-            if (moduleNames.contains(scope.moduleName()) == false) {
-                throw new IllegalStateException(
-                    Strings.format(
-                        "Invalid module name in policy: plugin [%s] does not have module [%s]; available modules [%s]; policy file [%s]",
-                        pluginName,
-                        scope.moduleName(),
-                        String.join(", ", moduleNames),
-                        policyFile
-                    )
-                );
-            }
-        }
-        return policy;
-    }
-
-    private static Policy parsePolicyIfExists(String pluginName, Path policyFile, boolean isExternalPlugin) throws IOException {
-        if (Files.exists(policyFile)) {
-            return new PolicyParser(Files.newInputStream(policyFile, StandardOpenOption.READ), pluginName, isExternalPlugin).parsePolicy();
-        }
-        return new Policy(pluginName, List.of());
-    }
-
-    private static Set<String> getModuleNames(Path pluginRoot, boolean isModular) {
-        if (isModular) {
-            ModuleFinder moduleFinder = ModuleFinder.of(pluginRoot);
-            Set<ModuleReference> moduleReferences = moduleFinder.findAll();
-
-            return moduleReferences.stream().map(mr -> mr.descriptor().name()).collect(Collectors.toUnmodifiableSet());
-        }
-        // When isModular == false we use the same "ALL-UNNAMED" constant as the JDK to indicate (any) unnamed module for this plugin
-        return Set.of(ALL_UNNAMED);
-    }
-
-    private static ElasticsearchEntitlementChecker initChecker() throws IOException {
+    private static ElasticsearchEntitlementChecker initChecker() {
         final PolicyManager policyManager = createPolicyManager();
 
         int javaVersion = Runtime.version().feature();

+ 20 - 0
libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java

@@ -732,4 +732,24 @@ public class ElasticsearchEntitlementChecker implements EntitlementChecker {
     public void check$sun_nio_ch_DatagramChannelImpl$receive(Class<?> callerClass, DatagramChannel that, ByteBuffer dst) {
         policyManager.checkInboundNetworkAccess(callerClass);
     }
+
+    @Override
+    public void check$java_lang_Runtime$load(Class<?> callerClass, Runtime that, String filename) {
+        policyManager.checkLoadingNativeLibraries(callerClass);
+    }
+
+    @Override
+    public void check$java_lang_Runtime$loadLibrary(Class<?> callerClass, Runtime that, String libname) {
+        policyManager.checkLoadingNativeLibraries(callerClass);
+    }
+
+    @Override
+    public void check$java_lang_System$$load(Class<?> callerClass, String filename) {
+        policyManager.checkLoadingNativeLibraries(callerClass);
+    }
+
+    @Override
+    public void check$java_lang_System$$loadLibrary(Class<?> callerClass, String libname) {
+        policyManager.checkLoadingNativeLibraries(callerClass);
+    }
 }

+ 8 - 5
libs/entitlement/qa/entitlement-allowed/src/main/java/module-info.java → libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/LoadNativeLibrariesEntitlement.java

@@ -7,9 +7,12 @@
  * License v3.0 only", or the "Server Side Public License, v 1".
  */
 
-module org.elasticsearch.entitlement.qa.allowed {
-    requires org.elasticsearch.server;
-    requires org.elasticsearch.base;
-    requires org.elasticsearch.logging;
-    requires org.elasticsearch.entitlement.qa.common;
+package org.elasticsearch.entitlement.runtime.policy;
+
+/**
+ * An Entitlement to allow loading native libraries
+ */
+public record LoadNativeLibrariesEntitlement() implements Entitlement {
+    @ExternalEntitlement(esModulesOnly = false)
+    public LoadNativeLibrariesEntitlement {}
 }

+ 7 - 0
libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java

@@ -189,6 +189,13 @@ public class PolicyManager {
         neverEntitled(callerClass, "access sensitive network information");
     }
 
+    /**
+     * Check for operations that can access sensitive network information, e.g. secrets, tokens or SSL sessions
+     */
+    public void checkLoadingNativeLibraries(Class<?> callerClass) {
+        checkEntitlementPresent(callerClass, LoadNativeLibrariesEntitlement.class);
+    }
+
     private String operationDescription(String methodName) {
         // TODO: Use a more human-readable description. Perhaps share code with InstrumentationServiceImpl.parseCheckerMethodName
         return methodName.substring(methodName.indexOf('$'));

+ 28 - 4
libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParser.java

@@ -41,12 +41,14 @@ public class PolicyParser {
         SetHttpsConnectionPropertiesEntitlement.class,
         OutboundNetworkEntitlement.class,
         InboundNetworkEntitlement.class,
-        WriteSystemPropertiesEntitlement.class
+        WriteSystemPropertiesEntitlement.class,
+        LoadNativeLibrariesEntitlement.class
     ).collect(Collectors.toUnmodifiableMap(PolicyParser::getEntitlementTypeName, Function.identity()));
 
     protected final XContentParser policyParser;
     protected final String policyName;
     private final boolean isExternalPlugin;
+    private final Map<String, Class<?>> externalEntitlements;
 
     static String getEntitlementTypeName(Class<? extends Entitlement> entitlementClass) {
         var entitlementClassName = entitlementClass.getSimpleName();
@@ -65,9 +67,16 @@ public class PolicyParser {
     }
 
     public PolicyParser(InputStream inputStream, String policyName, boolean isExternalPlugin) throws IOException {
+        this(inputStream, policyName, isExternalPlugin, EXTERNAL_ENTITLEMENTS);
+    }
+
+    // package private for tests
+    PolicyParser(InputStream inputStream, String policyName, boolean isExternalPlugin, Map<String, Class<?>> externalEntitlements)
+        throws IOException {
         this.policyParser = YamlXContent.yamlXContent.createParser(XContentParserConfiguration.EMPTY, Objects.requireNonNull(inputStream));
         this.policyName = policyName;
         this.isExternalPlugin = isExternalPlugin;
+        this.externalEntitlements = externalEntitlements;
     }
 
     public Policy parsePolicy() {
@@ -123,14 +132,29 @@ public class PolicyParser {
 
     protected Entitlement parseEntitlement(String scopeName, String entitlementType) throws IOException {
         XContentLocation startLocation = policyParser.getTokenLocation();
-        Class<?> entitlementClass = EXTERNAL_ENTITLEMENTS.get(entitlementType);
+        Class<?> entitlementClass = externalEntitlements.get(entitlementType);
 
         if (entitlementClass == null) {
             throw newPolicyParserException(scopeName, "unknown entitlement type [" + entitlementType + "]");
         }
 
-        Constructor<?> entitlementConstructor = entitlementClass.getConstructors()[0];
-        ExternalEntitlement entitlementMetadata = entitlementConstructor.getAnnotation(ExternalEntitlement.class);
+        Constructor<?> entitlementConstructor = null;
+        ExternalEntitlement entitlementMetadata = null;
+        for (var ctor : entitlementClass.getConstructors()) {
+            var metadata = ctor.getAnnotation(ExternalEntitlement.class);
+            if (metadata != null) {
+                if (entitlementMetadata != null) {
+                    throw new IllegalStateException(
+                        "entitlement class ["
+                            + entitlementClass.getName()
+                            + "] has more than one constructor annotated with ExternalEntitlement"
+                    );
+                }
+                entitlementConstructor = ctor;
+                entitlementMetadata = metadata;
+            }
+
+        }
         if (entitlementMetadata == null) {
             throw newPolicyParserException(scopeName, "unknown entitlement type [" + entitlementType + "]");
         }

+ 95 - 0
libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserUtils.java

@@ -0,0 +1,95 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the "Elastic License
+ * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
+ * Public License v 1"; you may not use this file except in compliance with, at
+ * your election, the "Elastic License 2.0", the "GNU Affero General Public
+ * License v3.0 only", or the "Server Side Public License, v 1".
+ */
+
+package org.elasticsearch.entitlement.runtime.policy;
+
+import org.elasticsearch.core.Strings;
+
+import java.io.IOException;
+import java.lang.module.ModuleFinder;
+import java.lang.module.ModuleReference;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.StandardOpenOption;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import static java.util.Objects.requireNonNull;
+import static org.elasticsearch.entitlement.runtime.policy.PolicyManager.ALL_UNNAMED;
+
+public class PolicyParserUtils {
+
+    public record PluginData(Path pluginPath, boolean isModular, boolean isExternalPlugin) {
+        public PluginData {
+            requireNonNull(pluginPath);
+        }
+    }
+
+    private static final String POLICY_FILE_NAME = "entitlement-policy.yaml";
+
+    public static Map<String, Policy> createPluginPolicies(Collection<PluginData> pluginData) throws IOException {
+        Map<String, Policy> pluginPolicies = new HashMap<>(pluginData.size());
+        for (var entry : pluginData) {
+            Path pluginRoot = entry.pluginPath();
+            String pluginName = pluginRoot.getFileName().toString();
+
+            final Policy policy = loadPluginPolicy(pluginRoot, entry.isModular(), pluginName, entry.isExternalPlugin());
+
+            pluginPolicies.put(pluginName, policy);
+        }
+        return pluginPolicies;
+    }
+
+    private static Policy loadPluginPolicy(Path pluginRoot, boolean isModular, String pluginName, boolean isExternalPlugin)
+        throws IOException {
+        Path policyFile = pluginRoot.resolve(POLICY_FILE_NAME);
+
+        final Set<String> moduleNames = getModuleNames(pluginRoot, isModular);
+        final Policy policy = parsePolicyIfExists(pluginName, policyFile, isExternalPlugin);
+
+        // TODO: should this check actually be part of the parser?
+        for (Scope scope : policy.scopes()) {
+            if (moduleNames.contains(scope.moduleName()) == false) {
+                throw new IllegalStateException(
+                    Strings.format(
+                        "Invalid module name in policy: plugin [%s] does not have module [%s]; available modules [%s]; policy file [%s]",
+                        pluginName,
+                        scope.moduleName(),
+                        String.join(", ", moduleNames),
+                        policyFile
+                    )
+                );
+            }
+        }
+        return policy;
+    }
+
+    private static Policy parsePolicyIfExists(String pluginName, Path policyFile, boolean isExternalPlugin) throws IOException {
+        if (Files.exists(policyFile)) {
+            return new PolicyParser(Files.newInputStream(policyFile, StandardOpenOption.READ), pluginName, isExternalPlugin).parsePolicy();
+        }
+        return new Policy(pluginName, List.of());
+    }
+
+    private static Set<String> getModuleNames(Path pluginRoot, boolean isModular) {
+        if (isModular) {
+            ModuleFinder moduleFinder = ModuleFinder.of(pluginRoot);
+            Set<ModuleReference> moduleReferences = moduleFinder.findAll();
+
+            return moduleReferences.stream().map(mr -> mr.descriptor().name()).collect(Collectors.toUnmodifiableSet());
+        }
+        // When isModular == false we use the same "ALL-UNNAMED" constant as the JDK to indicate (any) unnamed module for this plugin
+        return Set.of(ALL_UNNAMED);
+    }
+
+}

+ 43 - 0
libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserTests.java

@@ -15,6 +15,7 @@ import java.io.ByteArrayInputStream;
 import java.io.IOException;
 import java.nio.charset.StandardCharsets;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 
 import static org.hamcrest.Matchers.equalTo;
@@ -23,6 +24,14 @@ public class PolicyParserTests extends ESTestCase {
 
     private static class TestWrongEntitlementName implements Entitlement {}
 
+    public static class ManyConstructorsEntitlement implements Entitlement {
+        @ExternalEntitlement
+        public ManyConstructorsEntitlement(String s) {}
+
+        @ExternalEntitlement
+        public ManyConstructorsEntitlement(int i) {}
+    }
+
     public void testGetEntitlementTypeName() {
         assertEquals("create_class_loader", PolicyParser.getEntitlementTypeName(CreateClassLoaderEntitlement.class));
 
@@ -123,4 +132,38 @@ public class PolicyParserTests extends ESTestCase {
         );
         assertEquals(expected, parsedPolicy);
     }
+
+    public void testParseLoadNativeLibraries() throws IOException {
+        Policy parsedPolicy = new PolicyParser(new ByteArrayInputStream("""
+            entitlement-module-name:
+              - load_native_libraries
+            """.getBytes(StandardCharsets.UTF_8)), "test-policy.yaml", true).parsePolicy();
+        Policy expected = new Policy(
+            "test-policy.yaml",
+            List.of(new Scope("entitlement-module-name", List.of(new LoadNativeLibrariesEntitlement())))
+        );
+        assertEquals(expected, parsedPolicy);
+    }
+
+    public void testMultipleConstructorsAnnotated() throws IOException {
+        var parser = new PolicyParser(
+            new ByteArrayInputStream("""
+                entitlement-module-name:
+                  - many_constructors
+                """.getBytes(StandardCharsets.UTF_8)),
+            "test-policy.yaml",
+            true,
+            Map.of("many_constructors", ManyConstructorsEntitlement.class)
+        );
+
+        var e = expectThrows(IllegalStateException.class, parser::parsePolicy);
+        assertThat(
+            e.getMessage(),
+            equalTo(
+                "entitlement class "
+                    + "[org.elasticsearch.entitlement.runtime.policy.PolicyParserTests$ManyConstructorsEntitlement]"
+                    + " has more than one constructor annotated with ExternalEntitlement"
+            )
+        );
+    }
 }

+ 6 - 6
modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java

@@ -58,7 +58,7 @@ public class CommonAnalysisPluginTests extends ESTestCase {
             .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir())
             .put(
                 IndexMetadata.SETTING_VERSION_CREATED,
-                IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersions.V_7_6_0)
+                IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersions.V_7_6_0)
             )
             .put("index.analysis.analyzer.custom_analyzer.type", "custom")
             .put("index.analysis.analyzer.custom_analyzer.tokenizer", "standard")
@@ -107,7 +107,7 @@ public class CommonAnalysisPluginTests extends ESTestCase {
             .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir())
             .put(
                 IndexMetadata.SETTING_VERSION_CREATED,
-                IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersions.V_7_6_0)
+                IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersions.V_7_6_0)
             )
             .put("index.analysis.analyzer.custom_analyzer.type", "custom")
             .put("index.analysis.analyzer.custom_analyzer.tokenizer", "standard")
@@ -133,13 +133,13 @@ public class CommonAnalysisPluginTests extends ESTestCase {
         doTestPrebuiltTokenizerDeprecation(
             "nGram",
             "ngram",
-            IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersions.V_7_5_2),
+            IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersions.V_7_5_2),
             false
         );
         doTestPrebuiltTokenizerDeprecation(
             "edgeNGram",
             "edge_ngram",
-            IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersions.V_7_5_2),
+            IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersions.V_7_5_2),
             false
         );
         doTestPrebuiltTokenizerDeprecation(
@@ -185,13 +185,13 @@ public class CommonAnalysisPluginTests extends ESTestCase {
         doTestCustomTokenizerDeprecation(
             "nGram",
             "ngram",
-            IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersions.V_7_5_2),
+            IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersions.V_7_5_2),
             false
         );
         doTestCustomTokenizerDeprecation(
             "edgeNGram",
             "edge_ngram",
-            IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersions.V_7_5_2),
+            IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersions.V_7_5_2),
             false
         );
         doTestCustomTokenizerDeprecation(

+ 50 - 4
modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/EdgeNGramTokenizerTests.java

@@ -18,12 +18,14 @@ import org.elasticsearch.index.Index;
 import org.elasticsearch.index.IndexService.IndexCreationContext;
 import org.elasticsearch.index.IndexSettings;
 import org.elasticsearch.index.IndexVersion;
+import org.elasticsearch.index.IndexVersions;
 import org.elasticsearch.index.analysis.IndexAnalyzers;
 import org.elasticsearch.index.analysis.NamedAnalyzer;
 import org.elasticsearch.indices.analysis.AnalysisModule;
 import org.elasticsearch.plugins.scanners.StablePluginsRegistry;
 import org.elasticsearch.test.ESTokenStreamTestCase;
 import org.elasticsearch.test.IndexSettingsModule;
+import org.elasticsearch.test.index.IndexVersionUtils;
 
 import java.io.IOException;
 import java.io.StringReader;
@@ -49,12 +51,56 @@ public class EdgeNGramTokenizerTests extends ESTokenStreamTestCase {
     }
 
     public void testPreConfiguredTokenizer() throws IOException {
-        try (IndexAnalyzers indexAnalyzers = buildAnalyzers(IndexVersion.current(), "edge_ngram")) {
-            NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer");
-            assertNotNull(analyzer);
-            assertAnalyzesTo(analyzer, "test", new String[] { "t", "te" });
+        // Before 7.3 we return ngrams of length 1 only
+        {
+            IndexVersion version = IndexVersionUtils.randomVersionBetween(
+                random(),
+                IndexVersions.MINIMUM_READONLY_COMPATIBLE,
+                IndexVersionUtils.getPreviousVersion(IndexVersions.V_7_3_0)
+            );
+            try (IndexAnalyzers indexAnalyzers = buildAnalyzers(version, "edge_ngram")) {
+                NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer");
+                assertNotNull(analyzer);
+                assertAnalyzesTo(analyzer, "test", new String[] { "t" });
+            }
         }
 
+        // Check deprecated name as well
+        {
+            IndexVersion version = IndexVersionUtils.randomVersionBetween(
+                random(),
+                IndexVersions.MINIMUM_READONLY_COMPATIBLE,
+                IndexVersionUtils.getPreviousVersion(IndexVersions.V_7_3_0)
+            );
+            try (IndexAnalyzers indexAnalyzers = buildAnalyzers(version, "edgeNGram")) {
+                NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer");
+                assertNotNull(analyzer);
+                assertAnalyzesTo(analyzer, "test", new String[] { "t" });
+            }
+        }
+
+        // Afterwards, we return ngrams of length 1 and 2, to match the default factory settings
+        {
+            try (IndexAnalyzers indexAnalyzers = buildAnalyzers(IndexVersion.current(), "edge_ngram")) {
+                NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer");
+                assertNotNull(analyzer);
+                assertAnalyzesTo(analyzer, "test", new String[] { "t", "te" });
+            }
+        }
+
+        // Check deprecated name as well, needs version before 8.0 because throws IAE after that
+        {
+            IndexVersion version = IndexVersionUtils.randomVersionBetween(
+                random(),
+                IndexVersions.V_7_3_0,
+                IndexVersionUtils.getPreviousVersion(IndexVersions.V_8_0_0)
+            );
+            try (IndexAnalyzers indexAnalyzers = buildAnalyzers(version, "edge_ngram")) {
+                NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer");
+                assertNotNull(analyzer);
+                assertAnalyzesTo(analyzer, "test", new String[] { "t", "te" });
+            }
+        }
     }
 
     public void testCustomTokenChars() throws IOException {

+ 54 - 17
modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/WordDelimiterGraphTokenFilterFactoryTests.java

@@ -17,6 +17,7 @@ import org.elasticsearch.env.TestEnvironment;
 import org.elasticsearch.index.IndexService.IndexCreationContext;
 import org.elasticsearch.index.IndexSettings;
 import org.elasticsearch.index.IndexVersion;
+import org.elasticsearch.index.IndexVersions;
 import org.elasticsearch.index.analysis.AnalysisTestsHelper;
 import org.elasticsearch.index.analysis.IndexAnalyzers;
 import org.elasticsearch.index.analysis.NamedAnalyzer;
@@ -25,6 +26,7 @@ import org.elasticsearch.indices.analysis.AnalysisModule;
 import org.elasticsearch.plugins.scanners.StablePluginsRegistry;
 import org.elasticsearch.test.ESTestCase;
 import org.elasticsearch.test.IndexSettingsModule;
+import org.elasticsearch.test.index.IndexVersionUtils;
 
 import java.io.IOException;
 import java.io.StringReader;
@@ -182,26 +184,61 @@ public class WordDelimiterGraphTokenFilterFactoryTests extends BaseWordDelimiter
     }
 
     public void testPreconfiguredFilter() throws IOException {
-        Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build();
-        Settings indexSettings = Settings.builder()
-            .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
-            .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard")
-            .putList("index.analysis.analyzer.my_analyzer.filter", "word_delimiter_graph")
-            .build();
-        IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings);
+        // Before 7.3 we don't adjust offsets
+        {
+            Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build();
+            Settings indexSettings = Settings.builder()
+                .put(
+                    IndexMetadata.SETTING_VERSION_CREATED,
+                    IndexVersionUtils.randomVersionBetween(
+                        random(),
+                        IndexVersions.MINIMUM_READONLY_COMPATIBLE,
+                        IndexVersionUtils.getPreviousVersion(IndexVersions.V_7_3_0)
+                    )
+                )
+                .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard")
+                .putList("index.analysis.analyzer.my_analyzer.filter", "word_delimiter_graph")
+                .build();
+            IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings);
+
+            try (
+                IndexAnalyzers indexAnalyzers = new AnalysisModule(
+                    TestEnvironment.newEnvironment(settings),
+                    Collections.singletonList(new CommonAnalysisPlugin()),
+                    new StablePluginsRegistry()
+                ).getAnalysisRegistry().build(IndexCreationContext.CREATE_INDEX, idxSettings)
+            ) {
+
+                NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer");
+                assertNotNull(analyzer);
+                assertAnalyzesTo(analyzer, "h100", new String[] { "h", "100" }, new int[] { 0, 0 }, new int[] { 4, 4 });
+
+            }
+        }
+
+        // Afger 7.3 we do adjust offsets
+        {
+            Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build();
+            Settings indexSettings = Settings.builder()
+                .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
+                .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard")
+                .putList("index.analysis.analyzer.my_analyzer.filter", "word_delimiter_graph")
+                .build();
+            IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings);
 
-        try (
-            IndexAnalyzers indexAnalyzers = new AnalysisModule(
-                TestEnvironment.newEnvironment(settings),
-                Collections.singletonList(new CommonAnalysisPlugin()),
-                new StablePluginsRegistry()
-            ).getAnalysisRegistry().build(IndexCreationContext.CREATE_INDEX, idxSettings)
-        ) {
+            try (
+                IndexAnalyzers indexAnalyzers = new AnalysisModule(
+                    TestEnvironment.newEnvironment(settings),
+                    Collections.singletonList(new CommonAnalysisPlugin()),
+                    new StablePluginsRegistry()
+                ).getAnalysisRegistry().build(IndexCreationContext.CREATE_INDEX, idxSettings)
+            ) {
 
-            NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer");
-            assertNotNull(analyzer);
-            assertAnalyzesTo(analyzer, "h100", new String[] { "h", "100" }, new int[] { 0, 1 }, new int[] { 1, 4 });
+                NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer");
+                assertNotNull(analyzer);
+                assertAnalyzesTo(analyzer, "h100", new String[] { "h", "100" }, new int[] { 0, 1 }, new int[] { 1, 4 });
 
+            }
         }
     }
 }

+ 1 - 0
modules/apm/src/main/plugin-metadata/entitlement-policy.yaml

@@ -88,3 +88,4 @@ elastic.apm.agent:
   - write_system_properties:
       properties:
         - AsyncProfiler.safemode
+  - load_native_libraries

+ 9 - 29
muted-tests.yml

@@ -222,43 +222,23 @@ tests:
 - class: org.elasticsearch.reservedstate.service.FileSettingsServiceTests
   method: testInvalidJSON
   issue: https://github.com/elastic/elasticsearch/issues/120482
-- class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT
-  issue: https://github.com/elastic/elasticsearch/issues/120497
 - class: org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeToCharProcessorTests
   issue: https://github.com/elastic/elasticsearch/issues/120575
-- class: org.elasticsearch.discovery.ec2.DiscoveryEc2KeystoreCredentialsIT
-  issue: https://github.com/elastic/elasticsearch/issues/120576
-- class: org.elasticsearch.discovery.ec2.DiscoveryEc2KeystoreSessionCredentialsIT
-  issue: https://github.com/elastic/elasticsearch/issues/120577
-- class: org.elasticsearch.discovery.ec2.DiscoveryEc2SystemPropertyCredentialsIT
-  issue: https://github.com/elastic/elasticsearch/issues/120578
-- class: org.elasticsearch.discovery.ec2.DiscoveryEc2EnvironmentVariableCredentialsIT
-  issue: https://github.com/elastic/elasticsearch/issues/120579
 - class: org.elasticsearch.action.search.SearchProgressActionListenerIT
   method: testSearchProgressWithHitsAndAggs
   issue: https://github.com/elastic/elasticsearch/issues/120583
-- class: org.elasticsearch.xpack.esql.heap_attack.HeapAttackIT
-  method: testEnrichExplosionManyMatches
-  issue: https://github.com/elastic/elasticsearch/issues/120587
-- class: org.elasticsearch.lucene.FullClusterRestartLuceneIndexCompatibilityIT
-  issue: https://github.com/elastic/elasticsearch/issues/120597
-- class: org.elasticsearch.lucene.RollingUpgradeLuceneIndexCompatibilityTestCase
-  issue: https://github.com/elastic/elasticsearch/issues/120598
-- class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT
-  method: test {lookup-join.NonUniqueRightKeyOnTheCoordinator ASYNC}
-  issue: https://github.com/elastic/elasticsearch/issues/120600
-- class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT
-  method: test {lookup-join.NonUniqueRightKeyFromRow ASYNC}
-  issue: https://github.com/elastic/elasticsearch/issues/120601
-- class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT
-  method: test {lookup-join.NonUniqueRightKeyFromRow SYNC}
-  issue: https://github.com/elastic/elasticsearch/issues/120602
-- class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT
-  method: test {lookup-join.NonUniqueRightKeyOnTheCoordinator SYNC}
-  issue: https://github.com/elastic/elasticsearch/issues/120603
 - class: org.elasticsearch.index.reindex.BulkByScrollUsesAllScrollDocumentsAfterConflictsIntegTests
   method: testReindex
   issue: https://github.com/elastic/elasticsearch/issues/120605
+- class: org.elasticsearch.xpack.inference.DefaultEndPointsIT
+  method: testMultipleInferencesTriggeringDownloadAndDeploy
+  issue: https://github.com/elastic/elasticsearch/issues/120668
+- class: org.elasticsearch.entitlement.qa.EntitlementsAllowedIT
+  issue: https://github.com/elastic/elasticsearch/issues/120674
+- class: org.elasticsearch.entitlement.qa.EntitlementsDeniedNonModularIT
+  issue: https://github.com/elastic/elasticsearch/issues/120675
+- class: org.elasticsearch.entitlement.qa.EntitlementsDeniedIT
+  issue: https://github.com/elastic/elasticsearch/issues/120676
 
 # Examples:
 #

+ 1 - 1
plugins/discovery-ec2/src/javaRestTest/java/org/elasticsearch/discovery/ec2/DiscoveryEc2EnvironmentVariableCredentialsIT.java

@@ -41,7 +41,7 @@ public class DiscoveryEc2EnvironmentVariableCredentialsIT extends DiscoveryEc2Cl
         .setting(Ec2ClientSettings.ENDPOINT_SETTING.getKey(), ec2ApiFixture::getAddress)
         .environment("AWS_REGION", REGION)
         .environment("AWS_ACCESS_KEY_ID", ACCESS_KEY)
-        .environment("AWS_SECRET_ACCESS_KEY", ESTestCase::randomIdentifier)
+        .environment("AWS_SECRET_ACCESS_KEY", ESTestCase::randomSecretKey)
         .build();
 
     private static List<String> getAvailableTransportEndpoints() {

+ 1 - 1
plugins/discovery-ec2/src/javaRestTest/java/org/elasticsearch/discovery/ec2/DiscoveryEc2KeystoreCredentialsIT.java

@@ -41,7 +41,7 @@ public class DiscoveryEc2KeystoreCredentialsIT extends DiscoveryEc2ClusterFormat
         .setting(Ec2ClientSettings.ENDPOINT_SETTING.getKey(), ec2ApiFixture::getAddress)
         .environment("AWS_REGION", REGION)
         .keystore("discovery.ec2.access_key", ACCESS_KEY)
-        .keystore("discovery.ec2.secret_key", ESTestCase::randomIdentifier)
+        .keystore("discovery.ec2.secret_key", ESTestCase::randomSecretKey)
         .build();
 
     private static List<String> getAvailableTransportEndpoints() {

+ 1 - 1
plugins/discovery-ec2/src/javaRestTest/java/org/elasticsearch/discovery/ec2/DiscoveryEc2KeystoreSessionCredentialsIT.java

@@ -42,7 +42,7 @@ public class DiscoveryEc2KeystoreSessionCredentialsIT extends DiscoveryEc2Cluste
         .setting(Ec2ClientSettings.ENDPOINT_SETTING.getKey(), ec2ApiFixture::getAddress)
         .environment("AWS_REGION", REGION)
         .keystore("discovery.ec2.access_key", ACCESS_KEY)
-        .keystore("discovery.ec2.secret_key", ESTestCase::randomIdentifier)
+        .keystore("discovery.ec2.secret_key", ESTestCase::randomSecretKey)
         .keystore("discovery.ec2.session_token", SESSION_TOKEN)
         .build();
 

+ 1 - 1
plugins/discovery-ec2/src/javaRestTest/java/org/elasticsearch/discovery/ec2/DiscoveryEc2SystemPropertyCredentialsIT.java

@@ -41,7 +41,7 @@ public class DiscoveryEc2SystemPropertyCredentialsIT extends DiscoveryEc2Cluster
         .setting(Ec2ClientSettings.ENDPOINT_SETTING.getKey(), ec2ApiFixture::getAddress)
         .environment("AWS_REGION", REGION)
         .systemProperty("aws.accessKeyId", ACCESS_KEY)
-        .systemProperty("aws.secretKey", ESTestCase::randomIdentifier)
+        .systemProperty("aws.secretKey", ESTestCase::randomSecretKey)
         .build();
 
     private static List<String> getAvailableTransportEndpoints() {

+ 1 - 1
rest-api-spec/src/main/resources/rest-api-spec/api/migrate.cancel_reindex.json → rest-api-spec/src/main/resources/rest-api-spec/api/indices.cancel_migrate_reindex.json

@@ -1,5 +1,5 @@
 {
-  "migrate.cancel_reindex":{
+  "indices.cancel_migrate_reindex":{
     "documentation":{
       "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/data-stream-reindex.html",
       "description":"This API returns the status of a migration reindex attempt for a data stream or index"

+ 1 - 1
rest-api-spec/src/main/resources/rest-api-spec/api/migrate.create_from.json → rest-api-spec/src/main/resources/rest-api-spec/api/indices.create_from.json

@@ -1,5 +1,5 @@
 {
-  "migrate.create_from":{
+  "indices.create_from":{
     "documentation":{
       "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/data-stream-reindex.html",
       "description":"This API creates a destination from a source index. It copies the mappings and settings from the source index while allowing request settings and mappings to override the source values."

+ 1 - 1
rest-api-spec/src/main/resources/rest-api-spec/api/migrate.get_reindex_status.json → rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_migrate_reindex_status.json

@@ -1,5 +1,5 @@
 {
-  "migrate.get_reindex_status":{
+  "indices.get_migrate_reindex_status":{
     "documentation":{
       "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/data-stream-reindex.html",
       "description":"This API returns the status of a migration reindex attempt for a data stream or index"

+ 1 - 1
rest-api-spec/src/main/resources/rest-api-spec/api/migrate.reindex.json → rest-api-spec/src/main/resources/rest-api-spec/api/indices.migrate_reindex.json

@@ -1,5 +1,5 @@
 {
-  "migrate.reindex":{
+  "indices.migrate_reindex":{
     "documentation":{
       "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/data-stream-reindex.html",
       "description":"This API reindexes all legacy backing indices for a data stream. It does this in a persistent task. The persistent task id is returned immediately, and the reindexing work is completed in that task"

+ 106 - 1
server/src/internalClusterTest/java/org/elasticsearch/indices/cluster/ResolveClusterIT.java

@@ -16,6 +16,7 @@ import org.elasticsearch.action.admin.indices.resolve.ResolveClusterActionReques
 import org.elasticsearch.action.admin.indices.resolve.ResolveClusterActionResponse;
 import org.elasticsearch.action.admin.indices.resolve.ResolveClusterInfo;
 import org.elasticsearch.action.admin.indices.resolve.TransportResolveClusterAction;
+import org.elasticsearch.action.support.IndicesOptions;
 import org.elasticsearch.client.internal.Client;
 import org.elasticsearch.cluster.metadata.IndexMetadata;
 import org.elasticsearch.common.Strings;
@@ -405,6 +406,52 @@ public class ResolveClusterIT extends AbstractMultiClustersTestCase {
         }
     }
 
+    // corresponds to the GET _resolve/cluster endpoint with no index expression specified
+    public void testClusterResolveWithNoIndexExpression() throws IOException {
+        Map<String, Object> testClusterInfo = setupThreeClusters(false);
+        boolean skipUnavailable1 = (Boolean) testClusterInfo.get("remote1.skip_unavailable");
+        boolean skipUnavailable2 = true;
+
+        {
+            String[] noIndexSpecified = new String[0];
+            boolean clusterInfoOnly = true;
+            boolean runningOnQueryingCluster = true;
+            ResolveClusterActionRequest request = new ResolveClusterActionRequest(
+                noIndexSpecified,
+                IndicesOptions.DEFAULT,
+                clusterInfoOnly,
+                runningOnQueryingCluster
+            );
+
+            ActionFuture<ResolveClusterActionResponse> future = client(LOCAL_CLUSTER).admin()
+                .indices()
+                .execute(TransportResolveClusterAction.TYPE, request);
+            ResolveClusterActionResponse response = future.actionGet(30, TimeUnit.SECONDS);
+            assertNotNull(response);
+
+            Map<String, ResolveClusterInfo> clusterInfo = response.getResolveClusterInfo();
+            assertEquals(2, clusterInfo.size());
+
+            // only remote clusters should be present (not local)
+            Set<String> expectedClusterNames = Set.of(REMOTE_CLUSTER_1, REMOTE_CLUSTER_2);
+            assertThat(clusterInfo.keySet(), equalTo(expectedClusterNames));
+
+            ResolveClusterInfo remote1 = clusterInfo.get(REMOTE_CLUSTER_1);
+            assertThat(remote1.isConnected(), equalTo(true));
+            assertThat(remote1.getSkipUnavailable(), equalTo(skipUnavailable1));
+            assertThat(remote1.getMatchingIndices(), equalTo(null));  // should not be set
+            assertNotNull(remote1.getBuild().version());
+            assertNull(remote1.getError());
+
+            ResolveClusterInfo remote2 = clusterInfo.get(REMOTE_CLUSTER_2);
+            assertThat(remote2.isConnected(), equalTo(true));
+            assertThat(remote2.getSkipUnavailable(), equalTo(skipUnavailable2));
+            assertThat(remote2.getMatchingIndices(), equalTo(null));  // should not be set
+            assertNotNull(remote2.getBuild().version());
+            assertNull(remote2.getError());
+        }
+    }
+
     public void testClusterResolveWithMatchingAliases() throws IOException {
         Map<String, Object> testClusterInfo = setupThreeClusters(true);
         String localAlias = (String) testClusterInfo.get("local.alias");
@@ -522,6 +569,24 @@ public class ResolveClusterIT extends AbstractMultiClustersTestCase {
         }
     }
 
+    public void testClusterResolveWithNoMatchingClustersReturnsEmptyResult() throws Exception {
+        setupThreeClusters(false);
+        {
+            String[] indexExpressions = new String[] { "no_matching_cluster*:foo" };
+            ResolveClusterActionRequest request = new ResolveClusterActionRequest(indexExpressions);
+
+            ActionFuture<ResolveClusterActionResponse> future = client(LOCAL_CLUSTER).admin()
+                .indices()
+                .execute(TransportResolveClusterAction.TYPE, request);
+            ResolveClusterActionResponse response = future.actionGet(10, TimeUnit.SECONDS);
+            assertNotNull(response);
+
+            Map<String, ResolveClusterInfo> clusterInfo = response.getResolveClusterInfo();
+            assertEquals(0, clusterInfo.size());
+            assertThat(Strings.toString(response), equalTo("{}"));
+        }
+    }
+
     public void testClusterResolveDisconnectedAndErrorScenarios() throws Exception {
         Map<String, Object> testClusterInfo = setupThreeClusters(false);
         String localIndex = (String) testClusterInfo.get("local.index");
@@ -615,9 +680,49 @@ public class ResolveClusterIT extends AbstractMultiClustersTestCase {
             assertNotNull(local.getBuild().version());
             assertNull(local.getError());
         }
+
+        // cluster1 was stopped/disconnected, so it should return a connected:false response when querying with no index expression,
+        // corresponding to GET _resolve/cluster endpoint
+        {
+            String[] noIndexSpecified = new String[0];
+            boolean clusterInfoOnly = true;
+            boolean runningOnQueryingCluster = true;
+            ResolveClusterActionRequest request = new ResolveClusterActionRequest(
+                noIndexSpecified,
+                IndicesOptions.DEFAULT,
+                clusterInfoOnly,
+                runningOnQueryingCluster
+            );
+
+            ActionFuture<ResolveClusterActionResponse> future = client(LOCAL_CLUSTER).admin()
+                .indices()
+                .execute(TransportResolveClusterAction.TYPE, request);
+            ResolveClusterActionResponse response = future.actionGet(30, TimeUnit.SECONDS);
+            assertNotNull(response);
+
+            Map<String, ResolveClusterInfo> clusterInfo = response.getResolveClusterInfo();
+            assertEquals(2, clusterInfo.size());
+            // local cluster is not present when querying without an index expression
+            Set<String> expectedClusterNames = Set.of(REMOTE_CLUSTER_1, REMOTE_CLUSTER_2);
+            assertThat(clusterInfo.keySet(), equalTo(expectedClusterNames));
+
+            ResolveClusterInfo remote1 = clusterInfo.get(REMOTE_CLUSTER_1);
+            assertThat(remote1.isConnected(), equalTo(false));
+            assertThat(remote1.getSkipUnavailable(), equalTo(skipUnavailable1));
+            assertNull(remote1.getMatchingIndices());
+            assertNull(remote1.getBuild());
+            assertNull(remote1.getError());
+
+            ResolveClusterInfo remote2 = clusterInfo.get(REMOTE_CLUSTER_2);
+            assertThat(remote2.isConnected(), equalTo(true));
+            assertThat(remote2.getSkipUnavailable(), equalTo(skipUnavailable2));
+            assertNull(remote2.getMatchingIndices());  // not present when no index expression specified
+            assertNotNull(remote2.getBuild().version());
+            assertNull(remote2.getError());
+        }
     }
 
-    private Map<String, Object> setupThreeClusters(boolean useAlias) throws IOException {
+    private Map<String, Object> setupThreeClusters(boolean useAlias) {
         String localAlias = randomAlphaOfLengthBetween(5, 25);
         String remoteAlias1 = randomAlphaOfLengthBetween(5, 25);
         String remoteAlias2 = randomAlphaOfLengthBetween(5, 25);

+ 1 - 0
server/src/main/java/org/elasticsearch/TransportVersions.java

@@ -161,6 +161,7 @@ public class TransportVersions {
     public static final TransportVersion REVERT_BYTE_SIZE_VALUE_ALWAYS_USES_BYTES_1 = def(8_826_00_0);
     public static final TransportVersion ESQL_SKIP_ES_INDEX_SERIALIZATION = def(8_827_00_0);
     public static final TransportVersion ADD_INDEX_BLOCK_TWO_PHASE = def(8_828_00_0);
+    public static final TransportVersion RESOLVE_CLUSTER_NO_INDEX_EXPRESSION = def(8_829_00_0);
 
     /*
      * WARNING: DO NOT MERGE INTO MAIN!

+ 52 - 10
server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveClusterActionRequest.java

@@ -14,7 +14,6 @@ import org.elasticsearch.TransportVersions;
 import org.elasticsearch.action.ActionRequest;
 import org.elasticsearch.action.ActionRequestValidationException;
 import org.elasticsearch.action.IndicesRequest;
-import org.elasticsearch.action.ValidateActions;
 import org.elasticsearch.action.support.IndicesOptions;
 import org.elasticsearch.common.Strings;
 import org.elasticsearch.common.io.stream.StreamInput;
@@ -53,15 +52,25 @@ public class ResolveClusterActionRequest extends ActionRequest implements Indice
     private boolean localIndicesRequested = false;
     private IndicesOptions indicesOptions;
 
+    // true if the user did not provide any index expression - they only want cluster level info, not index matching
+    private final boolean clusterInfoOnly;
+    // Whether this request is being processed on the primary ("local") cluster being queried or on a remote.
+    // This is needed when clusterInfoOnly=true since we need to know whether to list out all possible remotes
+    // on a node. (We don't want cross-cluster chaining on remotes that might be configured with their own remotes.)
+    private final boolean isQueryingCluster;
+
     public ResolveClusterActionRequest(String[] names) {
-        this(names, DEFAULT_INDICES_OPTIONS);
+        this(names, DEFAULT_INDICES_OPTIONS, false, true);
+        assert names != null && names.length > 0 : "One or more index expressions must be included with this constructor";
     }
 
     @SuppressWarnings("this-escape")
-    public ResolveClusterActionRequest(String[] names, IndicesOptions indicesOptions) {
+    public ResolveClusterActionRequest(String[] names, IndicesOptions indicesOptions, boolean clusterInfoOnly, boolean queryingCluster) {
         this.names = names;
         this.localIndicesRequested = localIndicesPresent(names);
         this.indicesOptions = indicesOptions;
+        this.clusterInfoOnly = clusterInfoOnly;
+        this.isQueryingCluster = queryingCluster;
     }
 
     @SuppressWarnings("this-escape")
@@ -73,6 +82,13 @@ public class ResolveClusterActionRequest extends ActionRequest implements Indice
         this.names = in.readStringArray();
         this.indicesOptions = IndicesOptions.readIndicesOptions(in);
         this.localIndicesRequested = localIndicesPresent(names);
+        if (in.getTransportVersion().onOrAfter(TransportVersions.RESOLVE_CLUSTER_NO_INDEX_EXPRESSION)) {
+            this.clusterInfoOnly = in.readBoolean();
+            this.isQueryingCluster = in.readBoolean();
+        } else {
+            this.clusterInfoOnly = false;
+            this.isQueryingCluster = false;
+        }
     }
 
     @Override
@@ -83,9 +99,13 @@ public class ResolveClusterActionRequest extends ActionRequest implements Indice
         }
         out.writeStringArray(names);
         indicesOptions.writeIndicesOptions(out);
+        if (out.getTransportVersion().onOrAfter(TransportVersions.RESOLVE_CLUSTER_NO_INDEX_EXPRESSION)) {
+            out.writeBoolean(clusterInfoOnly);
+            out.writeBoolean(isQueryingCluster);
+        }
     }
 
-    private String createVersionErrorMessage(TransportVersion versionFound) {
+    static String createVersionErrorMessage(TransportVersion versionFound) {
         return Strings.format(
             "%s %s but was %s",
             TRANSPORT_VERSION_ERROR_MESSAGE_PREFIX,
@@ -96,11 +116,7 @@ public class ResolveClusterActionRequest extends ActionRequest implements Indice
 
     @Override
     public ActionRequestValidationException validate() {
-        ActionRequestValidationException validationException = null;
-        if (names == null || names.length == 0) {
-            validationException = ValidateActions.addValidationError("no index expressions specified", validationException);
-        }
-        return validationException;
+        return null;
     }
 
     @Override
@@ -123,6 +139,14 @@ public class ResolveClusterActionRequest extends ActionRequest implements Indice
         return names;
     }
 
+    public boolean clusterInfoOnly() {
+        return clusterInfoOnly;
+    }
+
+    public boolean queryingCluster() {
+        return isQueryingCluster;
+    }
+
     public boolean isLocalIndicesRequested() {
         return localIndicesRequested;
     }
@@ -160,7 +184,11 @@ public class ResolveClusterActionRequest extends ActionRequest implements Indice
         return new CancellableTask(id, type, action, "", parentTaskId, headers) {
             @Override
             public String getDescription() {
-                return "resolve/cluster for " + Arrays.toString(indices());
+                if (indices().length == 0) {
+                    return "resolve/cluster";
+                } else {
+                    return "resolve/cluster for " + Arrays.toString(indices());
+                }
             }
         };
     }
@@ -173,4 +201,18 @@ public class ResolveClusterActionRequest extends ActionRequest implements Indice
         }
         return false;
     }
+
+    @Override
+    public String toString() {
+        return "ResolveClusterActionRequest{"
+            + "indices="
+            + Arrays.toString(names)
+            + ", localIndicesRequested="
+            + localIndicesRequested
+            + ", clusterInfoOnly="
+            + clusterInfoOnly
+            + ", queryingCluster="
+            + isQueryingCluster
+            + '}';
+    }
 }

+ 10 - 10
server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveClusterInfo.java

@@ -22,7 +22,7 @@ public class ResolveClusterInfo implements Writeable {
 
     private final boolean connected;
     private final Boolean skipUnavailable;  // remote clusters don't know their setting, so they put null and querying cluster fills in
-    private final Boolean matchingIndices;  // null means 'unknown' when not connected
+    private final Boolean matchingIndices;  // null means no index expression requested by user or remote cluster was not connected
     private final Build build;
     private final String error;
 
@@ -38,8 +38,14 @@ public class ResolveClusterInfo implements Writeable {
         this(connected, skipUnavailable, matchingIndices, build, null);
     }
 
-    public ResolveClusterInfo(ResolveClusterInfo copyFrom, boolean skipUnavailable) {
-        this(copyFrom.isConnected(), skipUnavailable, copyFrom.getMatchingIndices(), copyFrom.getBuild(), copyFrom.getError());
+    public ResolveClusterInfo(ResolveClusterInfo copyFrom, boolean skipUnavailable, boolean clusterInfoOnly) {
+        this(
+            copyFrom.isConnected(),
+            skipUnavailable,
+            clusterInfoOnly ? null : copyFrom.getMatchingIndices(),
+            copyFrom.getBuild(),
+            clusterInfoOnly ? null : copyFrom.getError()
+        );
     }
 
     private ResolveClusterInfo(boolean connected, Boolean skipUnavailable, Boolean matchingIndices, Build build, String error) {
@@ -48,7 +54,6 @@ public class ResolveClusterInfo implements Writeable {
         this.matchingIndices = matchingIndices;
         this.build = build;
         this.error = error;
-        assert error != null || matchingIndices != null || connected == false : "If matchingIndices is null, connected must be false";
     }
 
     public ResolveClusterInfo(StreamInput in) throws IOException {
@@ -67,12 +72,7 @@ public class ResolveClusterInfo implements Writeable {
     @Override
     public void writeTo(StreamOutput out) throws IOException {
         if (out.getTransportVersion().before(TransportVersions.V_8_13_0)) {
-            throw new UnsupportedOperationException(
-                "ResolveClusterAction requires at least version "
-                    + TransportVersions.V_8_13_0.toReleaseVersion()
-                    + " but was "
-                    + out.getTransportVersion().toReleaseVersion()
-            );
+            throw new UnsupportedOperationException(ResolveClusterActionRequest.createVersionErrorMessage(out.getTransportVersion()));
         }
         out.writeBoolean(connected);
         out.writeOptionalBoolean(skipUnavailable);

+ 113 - 26
server/src/main/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveClusterAction.java

@@ -59,6 +59,8 @@ public class TransportResolveClusterAction extends HandledTransportAction<Resolv
         ResolveClusterActionResponse::new
     );
 
+    private static final String DUMMY_INDEX_FOR_OLDER_CLUSTERS = "*:dummy*";
+
     private final Executor searchCoordinationExecutor;
     private final ClusterService clusterService;
     private final RemoteClusterService remoteClusterService;
@@ -94,10 +96,39 @@ public class TransportResolveClusterAction extends HandledTransportAction<Resolv
         assert task instanceof CancellableTask;
         final CancellableTask resolveClusterTask = (CancellableTask) task;
         ClusterState clusterState = clusterService.state();
-        Map<String, OriginalIndices> remoteClusterIndices = remoteClusterService.groupIndices(request.indicesOptions(), request.indices());
-        OriginalIndices localIndices = remoteClusterIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY);
 
         Map<String, ResolveClusterInfo> clusterInfoMap = new ConcurrentHashMap<>();
+        Map<String, OriginalIndices> remoteClusterIndices;
+        if (request.clusterInfoOnly()) {
+            if (request.queryingCluster()) {
+                /*
+                 * User does not want to check whether an index expression matches, so we use the "*:dummy*" index pattern to
+                 * 1) determine all the local configured remote cluster and
+                 * 2) for older clusters that do not understand the new clusterInfoOnly setting (or for even older clusters
+                 *    where we need to fall back to using _resolve/index), we have to provide an index expression so use dummy*
+                 *    and then ignore the matching_indices value that comes back from those remotes. This is preferable to sending
+                 *    just "*" since that could be an expensive operation on clusters with thousands of indices/aliases/datastreams
+                 */
+                String[] dummyIndexExpr = new String[] { DUMMY_INDEX_FOR_OLDER_CLUSTERS };
+                remoteClusterIndices = remoteClusterService.groupIndices(IndicesOptions.DEFAULT, dummyIndexExpr, false);
+                if (remoteClusterIndices.isEmpty()) {
+                    // no remote clusters are configured on the primary "querying" cluster
+                    listener.onResponse(new ResolveClusterActionResponse(Map.of()));
+                    return;
+                }
+            } else {
+                // on remote if clusterInfoOnly is requested, don't bother with index expression matching
+                ResolveClusterInfo resolveClusterInfo = new ResolveClusterInfo(true, false, null, Build.current());
+                clusterInfoMap.put(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, resolveClusterInfo);
+                listener.onResponse(new ResolveClusterActionResponse(clusterInfoMap));
+                return;
+            }
+        } else {
+            remoteClusterIndices = remoteClusterService.groupIndices(request.indicesOptions(), request.indices(), false);
+        }
+
+        OriginalIndices localIndices = remoteClusterIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY);
+
         // add local cluster info if in scope of the index-expression from user
         if (localIndices != null) {
             try {
@@ -142,7 +173,12 @@ public class TransportResolveClusterAction extends HandledTransportAction<Resolv
                     searchCoordinationExecutor,
                     RemoteClusterService.DisconnectedStrategy.FAIL_IF_DISCONNECTED
                 );
-                var remoteRequest = new ResolveClusterActionRequest(originalIndices.indices(), request.indicesOptions());
+                var remoteRequest = new ResolveClusterActionRequest(
+                    originalIndices.indices(),
+                    request.indicesOptions(),
+                    request.clusterInfoOnly(),
+                    false
+                );
                 // allow cancellation requests to propagate to remote clusters
                 remoteRequest.setParentTask(clusterService.localNode().getId(), task.getId());
 
@@ -155,7 +191,7 @@ public class TransportResolveClusterAction extends HandledTransportAction<Resolv
                         }
                         ResolveClusterInfo info = response.getResolveClusterInfo().get(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY);
                         if (info != null) {
-                            clusterInfoMap.put(clusterAlias, new ResolveClusterInfo(info, skipUnavailable));
+                            clusterInfoMap.put(clusterAlias, new ResolveClusterInfo(info, skipUnavailable, request.clusterInfoOnly()));
                         }
                         if (resolveClusterTask.isCancelled()) {
                             releaseResourcesOnCancel(clusterInfoMap);
@@ -196,28 +232,13 @@ public class TransportResolveClusterAction extends HandledTransportAction<Resolv
                                     originalIndices.indices(),
                                     originalIndices.indicesOptions()
                                 );
-                                ActionListener<ResolveIndexAction.Response> resolveIndexActionListener = new ActionListener<>() {
-                                    @Override
-                                    public void onResponse(ResolveIndexAction.Response response) {
-                                        boolean matchingIndices = response.getIndices().size() > 0
-                                            || response.getAliases().size() > 0
-                                            || response.getDataStreams().size() > 0;
-                                        clusterInfoMap.put(
-                                            clusterAlias,
-                                            new ResolveClusterInfo(true, skipUnavailable, matchingIndices, null)
-                                        );
-                                    }
-
-                                    @Override
-                                    public void onFailure(Exception e) {
-                                        Throwable cause = ExceptionsHelper.unwrapCause(e);
-                                        clusterInfoMap.put(clusterAlias, new ResolveClusterInfo(false, skipUnavailable, cause.toString()));
-                                        logger.warn(
-                                            () -> Strings.format("Failure from _resolve/cluster lookup against cluster %s: ", clusterAlias),
-                                            e
-                                        );
-                                    }
-                                };
+                                ActionListener<ResolveIndexAction.Response> resolveIndexActionListener = createResolveIndexActionListener(
+                                    clusterAlias,
+                                    request.clusterInfoOnly(),
+                                    skipUnavailable,
+                                    clusterInfoMap,
+                                    resolveClusterTask
+                                );
                                 remoteClusterClient.execute(
                                     ResolveIndexAction.REMOTE_TYPE,
                                     resolveIndexRequest,
@@ -238,7 +259,73 @@ public class TransportResolveClusterAction extends HandledTransportAction<Resolv
                             releaseResourcesOnCancel(clusterInfoMap);
                         }
                     }
+
+                    /**
+                     * Create an ActionListener to handle responses from calls when falling back to use the resolve/index
+                     * endpoint from older clusters that don't have the resolve/cluster endpoint.
+                     */
+                    private static ActionListener<ResolveIndexAction.Response> createResolveIndexActionListener(
+                        String clusterAlias,
+                        boolean clusterInfoOnly,
+                        boolean skipUnavailable,
+                        Map<String, ResolveClusterInfo> clusterInfoMap,
+                        CancellableTask resolveClusterTask
+                    ) {
+                        return new ActionListener<>() {
+                            @Override
+                            public void onResponse(ResolveIndexAction.Response response) {
+                                if (resolveClusterTask.isCancelled()) {
+                                    releaseResourcesOnCancel(clusterInfoMap);
+                                    return;
+                                }
+
+                                Boolean matchingIndices = null;
+                                if (clusterInfoOnly == false) {
+                                    matchingIndices = response.getIndices().size() > 0
+                                        || response.getAliases().size() > 0
+                                        || response.getDataStreams().size() > 0;
+                                }
+                                clusterInfoMap.put(clusterAlias, new ResolveClusterInfo(true, skipUnavailable, matchingIndices, null));
+                            }
+
+                            @Override
+                            public void onFailure(Exception e) {
+                                if (resolveClusterTask.isCancelled()) {
+                                    releaseResourcesOnCancel(clusterInfoMap);
+                                    return;
+                                }
+
+                                ResolveClusterInfo resolveClusterInfo;
+                                if (ExceptionsHelper.isRemoteUnavailableException((e))) {
+                                    resolveClusterInfo = new ResolveClusterInfo(false, skipUnavailable);
+                                } else if (ExceptionsHelper.unwrap(
+                                    e,
+                                    ElasticsearchSecurityException.class
+                                ) instanceof ElasticsearchSecurityException ese) {
+                                    /*
+                                     * some ElasticsearchSecurityExceptions come from the local cluster security interceptor after you've
+                                     * issued the client.execute call but before any call went to the remote cluster, so with an
+                                     * ElasticsearchSecurityException you can't tell whether the remote cluster is available or not, so mark
+                                     * it as connected=false
+                                     */
+                                    resolveClusterInfo = new ResolveClusterInfo(false, skipUnavailable, ese.getMessage());
+                                } else if (ExceptionsHelper.unwrap(e, IndexNotFoundException.class) instanceof IndexNotFoundException ie) {
+                                    resolveClusterInfo = new ResolveClusterInfo(true, skipUnavailable, ie.getMessage());
+                                } else {
+                                    // not clear what the error is here, so be safe and mark the cluster as not connected
+                                    String errorMessage = ExceptionsHelper.unwrapCause(e).getMessage();
+                                    resolveClusterInfo = new ResolveClusterInfo(false, skipUnavailable, errorMessage);
+                                    logger.warn(
+                                        () -> Strings.format("Failure from _resolve/index lookup against cluster %s: ", clusterAlias),
+                                        e
+                                    );
+                                }
+                                clusterInfoMap.put(clusterAlias, resolveClusterInfo);
+                            }
+                        };
+                    }
                 };
+
                 remoteClusterClient.execute(
                     TransportResolveClusterAction.REMOTE_TYPE,
                     remoteRequest,

+ 9 - 7
server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java

@@ -32,6 +32,7 @@ import org.elasticsearch.core.Booleans;
 import org.elasticsearch.core.IOUtils;
 import org.elasticsearch.core.SuppressForbidden;
 import org.elasticsearch.entitlement.bootstrap.EntitlementBootstrap;
+import org.elasticsearch.entitlement.runtime.policy.PolicyParserUtils;
 import org.elasticsearch.env.Environment;
 import org.elasticsearch.index.IndexVersion;
 import org.elasticsearch.jdk.JarHell;
@@ -216,24 +217,25 @@ class Elasticsearch {
         );
 
         // load the plugin Java modules and layers now for use in entitlements
-        var pluginsLoader = PluginsLoader.createPluginsLoader(nodeEnv.modulesFile(), nodeEnv.pluginsFile());
+        var modulesBundles = PluginsLoader.loadModulesBundles(nodeEnv.modulesFile());
+        var pluginsBundles = PluginsLoader.loadPluginsBundles(nodeEnv.pluginsFile());
+        var pluginsLoader = PluginsLoader.createPluginsLoader(modulesBundles, pluginsBundles);
         bootstrap.setPluginsLoader(pluginsLoader);
 
         if (bootstrap.useEntitlements()) {
             LogManager.getLogger(Elasticsearch.class).info("Bootstrapping Entitlements");
 
-            List<EntitlementBootstrap.PluginData> pluginData = Stream.concat(
+            var pluginData = Stream.concat(
                 pluginsLoader.moduleBundles()
                     .stream()
-                    .map(bundle -> new EntitlementBootstrap.PluginData(bundle.getDir(), bundle.pluginDescriptor().isModular(), false)),
+                    .map(bundle -> new PolicyParserUtils.PluginData(bundle.getDir(), bundle.pluginDescriptor().isModular(), false)),
                 pluginsLoader.pluginBundles()
                     .stream()
-                    .map(bundle -> new EntitlementBootstrap.PluginData(bundle.getDir(), bundle.pluginDescriptor().isModular(), true))
+                    .map(bundle -> new PolicyParserUtils.PluginData(bundle.getDir(), bundle.pluginDescriptor().isModular(), true))
             ).toList();
-
+            var pluginPolicies = PolicyParserUtils.createPluginPolicies(pluginData);
             var pluginsResolver = PluginsResolver.create(pluginsLoader);
-
-            EntitlementBootstrap.bootstrap(pluginData, pluginsResolver::resolveClassToPluginName);
+            EntitlementBootstrap.bootstrap(pluginPolicies, pluginsResolver::resolveClassToPluginName);
         } else if (RuntimeVersionFeature.isSecurityManagerAvailable()) {
             // install SM after natives, shutdown hooks, etc.
             LogManager.getLogger(Elasticsearch.class).info("Bootstrapping java SecurityManager");

+ 13 - 3
server/src/main/java/org/elasticsearch/common/settings/Setting.java

@@ -24,6 +24,7 @@ import org.elasticsearch.core.Booleans;
 import org.elasticsearch.core.Nullable;
 import org.elasticsearch.core.TimeValue;
 import org.elasticsearch.core.Tuple;
+import org.elasticsearch.core.UpdateForV10;
 import org.elasticsearch.core.UpdateForV9;
 import org.elasticsearch.index.mapper.DateFieldMapper;
 import org.elasticsearch.xcontent.ToXContentObject;
@@ -151,10 +152,16 @@ public class Setting<T> implements ToXContentObject {
          * Indicates that this index-level setting was deprecated in {@link Version#V_7_17_0} and is
          * forbidden in indices created from {@link Version#V_8_0_0} onwards.
          */
-        @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) // introduce IndexSettingDeprecatedInV8AndRemovedInV10
+        @UpdateForV10(owner = UpdateForV10.Owner.CORE_INFRA)  // remove constant if indices created in V7 couldn't be read by v10 anymore
         // note we still need v7 settings in v9 because we support reading from N-2 indices now
         IndexSettingDeprecatedInV7AndRemovedInV8,
 
+        /**
+         * Indicates that this index-level setting was deprecated in {@link Version#V_8_18_0} and is
+         * forbidden in indices created from {@link Version#V_9_0_0} onwards.
+         */
+        IndexSettingDeprecatedInV8AndRemovedInV9,
+
         /**
          * Indicates that this setting is accessible by non-operator users (public) in serverless
          * Users will be allowed to set and see values of this setting.
@@ -176,7 +183,8 @@ public class Setting<T> implements ToXContentObject {
     private static final EnumSet<Property> DEPRECATED_PROPERTIES = EnumSet.of(
         Property.Deprecated,
         Property.DeprecatedWarning,
-        Property.IndexSettingDeprecatedInV7AndRemovedInV8
+        Property.IndexSettingDeprecatedInV7AndRemovedInV8,
+        Property.IndexSettingDeprecatedInV8AndRemovedInV9
     );
 
     @SuppressWarnings("this-escape")
@@ -216,6 +224,7 @@ public class Setting<T> implements ToXContentObject {
             checkPropertyRequiresIndexScope(propertiesAsSet, Property.InternalIndex);
             checkPropertyRequiresIndexScope(propertiesAsSet, Property.PrivateIndex);
             checkPropertyRequiresIndexScope(propertiesAsSet, Property.IndexSettingDeprecatedInV7AndRemovedInV8);
+            checkPropertyRequiresIndexScope(propertiesAsSet, Property.IndexSettingDeprecatedInV8AndRemovedInV9);
             checkPropertyRequiresNodeScope(propertiesAsSet);
             this.properties = propertiesAsSet;
         }
@@ -450,7 +459,8 @@ public class Setting<T> implements ToXContentObject {
     private boolean isDeprecated() {
         return properties.contains(Property.Deprecated)
             || properties.contains(Property.DeprecatedWarning)
-            || properties.contains(Property.IndexSettingDeprecatedInV7AndRemovedInV8);
+            || properties.contains(Property.IndexSettingDeprecatedInV7AndRemovedInV8)
+            || properties.contains(Property.IndexSettingDeprecatedInV8AndRemovedInV9);
     }
 
     private boolean isDeprecatedWarningOnly() {

+ 0 - 9
server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java

@@ -19,15 +19,6 @@ import java.util.Set;
  */
 public class MapperFeatures implements FeatureSpecification {
 
-    // Used to avoid noise in mixed cluster and rest compatibility tests. Must not be backported to 8.x branch.
-    // This label gets added to tests with such failures before merging with main, then removed when backported to 8.x.
-    public static final NodeFeature BWC_WORKAROUND_9_0 = new NodeFeature("mapper.bwc_workaround_9_0", true);
-
-    @Override
-    public Set<NodeFeature> getFeatures() {
-        return Set.of(BWC_WORKAROUND_9_0);
-    }
-
     public static final NodeFeature CONSTANT_KEYWORD_SYNTHETIC_SOURCE_WRITE_FIX = new NodeFeature(
         "mapper.constant_keyword.synthetic_source_write_fix"
     );

+ 42 - 28
server/src/main/java/org/elasticsearch/plugins/PluginsLoader.java

@@ -126,47 +126,31 @@ public class PluginsLoader {
     private final Set<PluginBundle> pluginBundles;
 
     /**
-     * Constructs a new PluginsLoader
+     * Loads a set of PluginBundles from the modules directory
      *
      * @param modulesDirectory The directory modules exist in, or null if modules should not be loaded from the filesystem
-     * @param pluginsDirectory The directory plugins exist in, or null if plugins should not be loaded from the filesystem
      */
-    public static PluginsLoader createPluginsLoader(Path modulesDirectory, Path pluginsDirectory) {
-        return createPluginsLoader(modulesDirectory, pluginsDirectory, true);
-    }
-
-    /**
-     * Constructs a new PluginsLoader
-     *
-     * @param modulesDirectory The directory modules exist in, or null if modules should not be loaded from the filesystem
-     * @param pluginsDirectory The directory plugins exist in, or null if plugins should not be loaded from the filesystem
-     * @param withServerExports {@code true} to add server module exports
-     */
-    public static PluginsLoader createPluginsLoader(Path modulesDirectory, Path pluginsDirectory, boolean withServerExports) {
-        Map<String, List<ModuleQualifiedExportsService>> qualifiedExports;
-        if (withServerExports) {
-            qualifiedExports = new HashMap<>(ModuleQualifiedExportsService.getBootServices());
-            addServerExportsService(qualifiedExports);
-        } else {
-            qualifiedExports = Collections.emptyMap();
-        }
-
-        Set<PluginBundle> seenBundles = new LinkedHashSet<>();
-
+    public static Set<PluginBundle> loadModulesBundles(Path modulesDirectory) {
         // load (elasticsearch) module layers
         final Set<PluginBundle> modules;
         if (modulesDirectory != null) {
             try {
                 modules = PluginsUtils.getModuleBundles(modulesDirectory);
-                seenBundles.addAll(modules);
             } catch (IOException ex) {
                 throw new IllegalStateException("Unable to initialize modules", ex);
             }
         } else {
             modules = Collections.emptySet();
         }
+        return modules;
+    }
 
-        // load plugin layers
+    /**
+     * Loads a set of PluginBundles from the plugins directory
+     *
+     * @param pluginsDirectory The directory plugins exist in, or null if plugins should not be loaded from the filesystem
+     */
+    public static Set<PluginBundle> loadPluginsBundles(Path pluginsDirectory) {
         final Set<PluginBundle> plugins;
         if (pluginsDirectory != null) {
             try {
@@ -174,8 +158,6 @@ public class PluginsLoader {
                 if (isAccessibleDirectory(pluginsDirectory, logger)) {
                     PluginsUtils.checkForFailedPluginRemovals(pluginsDirectory);
                     plugins = PluginsUtils.getPluginBundles(pluginsDirectory);
-
-                    seenBundles.addAll(plugins);
                 } else {
                     plugins = Collections.emptySet();
                 }
@@ -185,6 +167,38 @@ public class PluginsLoader {
         } else {
             plugins = Collections.emptySet();
         }
+        return plugins;
+    }
+
+    /**
+     * Constructs a new PluginsLoader
+     *
+     * @param modules           The set of module bundles present on the filesystem
+     * @param plugins           The set of plugin bundles present on the filesystem
+     */
+    public static PluginsLoader createPluginsLoader(Set<PluginBundle> modules, Set<PluginBundle> plugins) {
+        return createPluginsLoader(modules, plugins, true);
+    }
+
+    /**
+     * Constructs a new PluginsLoader
+     *
+     * @param modules           The set of module bundles present on the filesystem
+     * @param plugins           The set of plugin bundles present on the filesystem
+     * @param withServerExports {@code true} to add server module exports
+     */
+    public static PluginsLoader createPluginsLoader(Set<PluginBundle> modules, Set<PluginBundle> plugins, boolean withServerExports) {
+        Map<String, List<ModuleQualifiedExportsService>> qualifiedExports;
+        if (withServerExports) {
+            qualifiedExports = new HashMap<>(ModuleQualifiedExportsService.getBootServices());
+            addServerExportsService(qualifiedExports);
+        } else {
+            qualifiedExports = Collections.emptyMap();
+        }
+
+        Set<PluginBundle> seenBundles = new LinkedHashSet<>();
+        seenBundles.addAll(modules);
+        seenBundles.addAll(plugins);
 
         Map<String, LoadedPluginLayer> loadedPluginLayers = new LinkedHashMap<>();
         Map<String, Set<URL>> transitiveUrls = new HashMap<>();

+ 34 - 3
server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResolveClusterAction.java

@@ -15,6 +15,7 @@ import org.elasticsearch.action.admin.indices.resolve.TransportResolveClusterAct
 import org.elasticsearch.action.support.IndicesOptions;
 import org.elasticsearch.client.internal.node.NodeClient;
 import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.util.set.Sets;
 import org.elasticsearch.rest.BaseRestHandler;
 import org.elasticsearch.rest.RestRequest;
 import org.elasticsearch.rest.action.RestCancellableNodeClient;
@@ -22,11 +23,19 @@ import org.elasticsearch.rest.action.RestToXContentListener;
 
 import java.io.IOException;
 import java.util.List;
+import java.util.Set;
 
 import static org.elasticsearch.rest.RestRequest.Method.GET;
 
 public class RestResolveClusterAction extends BaseRestHandler {
 
+    private static Set<String> INDEX_OPTIONS_PARAMS = Set.of(
+        "expand_wildcards",
+        "ignore_unavailable",
+        "allow_no_indices",
+        "ignore_throttled"
+    );
+
     @Override
     public String getName() {
         return "resolve_cluster_action";
@@ -34,18 +43,40 @@ public class RestResolveClusterAction extends BaseRestHandler {
 
     @Override
     public List<Route> routes() {
-        return List.of(new Route(GET, "/_resolve/cluster/{name}"));
+        return List.of(new Route(GET, "/_resolve/cluster"), new Route(GET, "/_resolve/cluster/{name}"));
     }
 
     @Override
     protected BaseRestHandler.RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException {
-        String[] indexExpressions = Strings.splitStringByCommaToArray(request.param("name"));
+        String[] indexExpressions;
+        boolean clusterInfoOnly;
+        if (request.hasParam("name")) {
+            indexExpressions = Strings.splitStringByCommaToArray(request.param("name"));
+            clusterInfoOnly = false;
+        } else {
+            indexExpressions = new String[0];
+            clusterInfoOnly = true;
+            Set<String> indexOptions = requestIndexOptionsParams(request);
+            if (indexOptions.isEmpty() == false) {
+                // this restriction avoids problems with having to send wildcarded index expressions to older clusters
+                // when no index expression is provided by the user
+                throw new IllegalArgumentException(
+                    "No index options are allowed on _resolve/cluster when no index expression is specified, but received: " + indexOptions
+                );
+            }
+        }
         ResolveClusterActionRequest resolveRequest = new ResolveClusterActionRequest(
             indexExpressions,
-            IndicesOptions.fromRequest(request, ResolveIndexAction.Request.DEFAULT_INDICES_OPTIONS)
+            IndicesOptions.fromRequest(request, ResolveIndexAction.Request.DEFAULT_INDICES_OPTIONS),
+            clusterInfoOnly,
+            true
         );
         return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).admin()
             .indices()
             .execute(TransportResolveClusterAction.TYPE, resolveRequest, new RestToXContentListener<>(channel));
     }
+
+    private static Set<String> requestIndexOptionsParams(RestRequest request) {
+        return Sets.intersection(request.params().keySet(), INDEX_OPTIONS_PARAMS);
+    }
 }

+ 26 - 3
server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java

@@ -183,12 +183,24 @@ public final class RemoteClusterService extends RemoteClusterAware
         return remoteClusters.get(remoteCluster).isNodeConnected(node);
     }
 
-    public Map<String, OriginalIndices> groupIndices(IndicesOptions indicesOptions, String[] indices) {
+    /**
+     * Group indices by cluster alias mapped to OriginalIndices for that cluster.
+     * @param indicesOptions IndicesOptions to clarify how the index expressions should be parsed/applied
+     * @param indices Multiple index expressions as string[].
+     * @param returnLocalAll whether to support the _all functionality needed by _search
+     *        (See https://github.com/elastic/elasticsearch/pull/33899). If true, and no indices are specified,
+     *        then a Map with one entry for the local cluster with an empty index array is returned.
+     *        If false, an empty map is returned when no indices are specified.
+     * @return Map keyed by cluster alias having OriginalIndices as the map value parsed from the String[] indices argument
+     */
+    public Map<String, OriginalIndices> groupIndices(IndicesOptions indicesOptions, String[] indices, boolean returnLocalAll) {
         final Map<String, OriginalIndices> originalIndicesMap = new HashMap<>();
         final Map<String, List<String>> groupedIndices = groupClusterIndices(getRemoteClusterNames(), indices);
         if (groupedIndices.isEmpty()) {
-            // search on _all in the local cluster if neither local indices nor remote indices were specified
-            originalIndicesMap.put(LOCAL_CLUSTER_GROUP_KEY, new OriginalIndices(Strings.EMPTY_ARRAY, indicesOptions));
+            if (returnLocalAll) {
+                // search on _all in the local cluster if neither local indices nor remote indices were specified
+                originalIndicesMap.put(LOCAL_CLUSTER_GROUP_KEY, new OriginalIndices(Strings.EMPTY_ARRAY, indicesOptions));
+            }
         } else {
             for (Map.Entry<String, List<String>> entry : groupedIndices.entrySet()) {
                 String clusterAlias = entry.getKey();
@@ -199,6 +211,17 @@ public final class RemoteClusterService extends RemoteClusterAware
         return originalIndicesMap;
     }
 
+    /**
+     * If no indices are specified, then a Map with one entry for the local cluster with an empty index array is returned.
+     * For details see {@code groupIndices(IndicesOptions indicesOptions, String[] indices, boolean returnLocalAll)}
+     * @param indicesOptions IndicesOptions to clarify how the index expressions should be parsed/applied
+     * @param indices Multiple index expressions as string[].
+     * @return Map keyed by cluster alias having OriginalIndices as the map value parsed from the String[] indices argument
+     */
+    public Map<String, OriginalIndices> groupIndices(IndicesOptions indicesOptions, String[] indices) {
+        return groupIndices(indicesOptions, indices, true);
+    }
+
     /**
      * Returns <code>true</code> iff the given cluster is configured as a remote cluster. Otherwise <code>false</code>
      */

+ 17 - 20
server/src/test/java/org/elasticsearch/action/admin/indices/resolve/ResolveClusterActionRequestTests.java

@@ -9,7 +9,6 @@
 
 package org.elasticsearch.action.admin.indices.resolve;
 
-import org.elasticsearch.action.ActionRequestValidationException;
 import org.elasticsearch.action.support.IndicesOptions;
 import org.elasticsearch.common.io.stream.Writeable;
 import org.elasticsearch.common.util.ArrayUtils;
@@ -31,19 +30,23 @@ public class ResolveClusterActionRequestTests extends AbstractWireSerializingTes
 
     @Override
     protected ResolveClusterActionRequest createTestInstance() {
-        String[] names = generateRandomStringArray(1, 7, false);
-        IndicesOptions indicesOptions = IndicesOptions.fromOptions(
-            randomBoolean(),
-            randomBoolean(),
-            randomBoolean(),
-            randomBoolean(),
-            randomBoolean(),
-            randomBoolean(),
-            randomBoolean(),
-            randomBoolean(),
-            randomBoolean()
-        );
-        return new ResolveClusterActionRequest(names, indicesOptions);
+        if (randomInt(5) == 3) {
+            return new ResolveClusterActionRequest(new String[0], IndicesOptions.DEFAULT, true, randomBoolean());
+        } else {
+            String[] names = generateRandomStringArray(1, 7, false);
+            IndicesOptions indicesOptions = IndicesOptions.fromOptions(
+                randomBoolean(),
+                randomBoolean(),
+                randomBoolean(),
+                randomBoolean(),
+                randomBoolean(),
+                randomBoolean(),
+                randomBoolean(),
+                randomBoolean(),
+                randomBoolean()
+            );
+            return new ResolveClusterActionRequest(names, indicesOptions, false, randomBoolean());
+        }
     }
 
     @Override
@@ -71,12 +74,6 @@ public class ResolveClusterActionRequestTests extends AbstractWireSerializingTes
         return mutatedInstance;
     }
 
-    public void testValidation() {
-        ResolveClusterActionRequest request = new ResolveClusterActionRequest(new String[0]);
-        ActionRequestValidationException exception = request.validate();
-        assertNotNull(exception);
-    }
-
     public void testLocalIndicesPresent() {
         {
             String[] indicesOrig = new String[] { "*" };

+ 2 - 1
server/src/test/java/org/elasticsearch/action/admin/indices/resolve/ResolveClusterActionResponseTests.java

@@ -54,11 +54,12 @@ public class ResolveClusterActionResponseTests extends AbstractWireSerializingTe
     }
 
     static ResolveClusterInfo randomResolveClusterInfo() {
-        int val = randomIntBetween(1, 3);
+        int val = randomIntBetween(1, 4);
         return switch (val) {
             case 1 -> new ResolveClusterInfo(false, randomBoolean());
             case 2 -> new ResolveClusterInfo(randomBoolean(), randomBoolean(), randomAlphaOfLength(15));
             case 3 -> new ResolveClusterInfo(randomBoolean(), randomBoolean(), randomBoolean(), Build.current());
+            case 4 -> new ResolveClusterInfo(true, randomBoolean(), null, Build.current());
             default -> throw new UnsupportedOperationException("should not get here");
         };
     }

+ 2 - 6
server/src/test/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveClusterActionTests.java

@@ -66,12 +66,8 @@ public class TransportResolveClusterActionTests extends ESTestCase {
             ResolveClusterActionRequest request = new ResolveClusterActionRequest(new String[] { "test" }) {
                 @Override
                 public void writeTo(StreamOutput out) throws IOException {
-                    throw new UnsupportedOperationException(
-                        "ResolveClusterAction requires at least version "
-                            + TransportVersions.V_8_13_0.toReleaseVersion()
-                            + " but was "
-                            + out.getTransportVersion().toReleaseVersion()
-                    );
+                    String versionErrorMessage = ResolveClusterActionRequest.createVersionErrorMessage(out.getTransportVersion());
+                    throw new UnsupportedOperationException(versionErrorMessage);
                 }
             };
             ClusterService clusterService = new ClusterService(

+ 8 - 0
server/src/test/java/org/elasticsearch/common/settings/SettingTests.java

@@ -1521,6 +1521,14 @@ public class SettingTests extends ESTestCase {
             IllegalArgumentException.class,
             () -> Setting.boolSetting("a.bool.setting", true, Property.DeprecatedWarning, Property.IndexSettingDeprecatedInV7AndRemovedInV8)
         );
+        expectThrows(
+            IllegalArgumentException.class,
+            () -> Setting.boolSetting("a.bool.setting", true, Property.Deprecated, Property.IndexSettingDeprecatedInV8AndRemovedInV9)
+        );
+        expectThrows(
+            IllegalArgumentException.class,
+            () -> Setting.boolSetting("a.bool.setting", true, Property.DeprecatedWarning, Property.IndexSettingDeprecatedInV8AndRemovedInV9)
+        );
     }
 
     public void testIntSettingBounds() {

+ 6 - 1
server/src/test/java/org/elasticsearch/plugins/PluginsLoaderTests.java

@@ -28,6 +28,7 @@ import java.net.URLClassLoader;
 import java.nio.file.Files;
 import java.nio.file.Path;
 import java.util.Map;
+import java.util.Set;
 
 import static java.util.Map.entry;
 import static org.elasticsearch.test.LambdaMatchers.transformedMatch;
@@ -45,7 +46,11 @@ public class PluginsLoaderTests extends ESTestCase {
     private static final Logger logger = LogManager.getLogger(PluginsLoaderTests.class);
 
     static PluginsLoader newPluginsLoader(Settings settings) {
-        return PluginsLoader.createPluginsLoader(null, TestEnvironment.newEnvironment(settings).pluginsFile(), false);
+        return PluginsLoader.createPluginsLoader(
+            Set.of(),
+            PluginsLoader.loadPluginsBundles(TestEnvironment.newEnvironment(settings).pluginsFile()),
+            false
+        );
     }
 
     public void testToModuleName() {

+ 5 - 1
server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java

@@ -68,7 +68,11 @@ public class PluginsServiceTests extends ESTestCase {
         return new PluginsService(
             settings,
             null,
-            PluginsLoader.createPluginsLoader(null, TestEnvironment.newEnvironment(settings).pluginsFile(), false)
+            PluginsLoader.createPluginsLoader(
+                Set.of(),
+                PluginsLoader.loadPluginsBundles(TestEnvironment.newEnvironment(settings).pluginsFile()),
+                false
+            )
         );
     }
 

+ 14 - 8
test/external-modules/esql-heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/heap_attack/HeapAttackIT.java

@@ -631,40 +631,46 @@ public class HeapAttackIT extends ESRestTestCase {
     public void testLookupExplosion() throws IOException {
         int sensorDataCount = 7500;
         int lookupEntries = 10000;
-        Map<?, ?> map = responseAsMap(lookupExplosion(sensorDataCount, lookupEntries));
+        Map<?, ?> map = lookupExplosion(sensorDataCount, lookupEntries);
         assertMap(map, matchesMap().extraOk().entry("values", List.of(List.of(sensorDataCount * lookupEntries))));
     }
 
     public void testLookupExplosionManyMatches() throws IOException {
-        assertCircuitBreaks(() -> lookupExplosion(8500, 10000));
+        assertCircuitBreaks(() -> {
+            Map<?, ?> result = lookupExplosion(8500, 10000);
+            logger.error("should have failed but got {}", result);
+        });
     }
 
-    private Response lookupExplosion(int sensorDataCount, int lookupEntries) throws IOException {
+    private Map<?, ?> lookupExplosion(int sensorDataCount, int lookupEntries) throws IOException {
         initSensorData(sensorDataCount, 1);
         initSensorLookup(lookupEntries, 1, i -> "73.9857 40.7484");
         StringBuilder query = startQuery();
         query.append("FROM sensor_data | LOOKUP JOIN sensor_lookup ON id | STATS COUNT(*)\"}");
-        return query(query.toString(), null);
+        return responseAsMap(query(query.toString(), null));
     }
 
     public void testEnrichExplosion() throws IOException {
         int sensorDataCount = 1000;
         int lookupEntries = 100;
-        Map<?, ?> map = responseAsMap(enrichExplosion(sensorDataCount, lookupEntries));
+        Map<?, ?> map = enrichExplosion(sensorDataCount, lookupEntries);
         assertMap(map, matchesMap().extraOk().entry("values", List.of(List.of(sensorDataCount))));
     }
 
     public void testEnrichExplosionManyMatches() throws IOException {
-        assertCircuitBreaks(() -> enrichExplosion(1000, 10000));
+        assertCircuitBreaks(() -> {
+            Map<?, ?> result = enrichExplosion(3000, 10000);
+            logger.error("should have failed but got {}", result);
+        });
     }
 
-    private Response enrichExplosion(int sensorDataCount, int lookupEntries) throws IOException {
+    private Map<?, ?> enrichExplosion(int sensorDataCount, int lookupEntries) throws IOException {
         initSensorData(sensorDataCount, 1);
         initSensorEnrich(lookupEntries, 1, i -> "73.9857 40.7484");
         try {
             StringBuilder query = startQuery();
             query.append("FROM sensor_data | ENRICH sensor ON id | STATS COUNT(*)\"}");
-            return query(query.toString(), null);
+            return responseAsMap(query(query.toString(), null));
         } finally {
             Request delete = new Request("DELETE", "/_enrich/policy/sensor");
             assertMap(responseAsMap(client().performRequest(delete)), matchesMap().entry("acknowledged", true));

+ 2 - 2
test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java

@@ -196,7 +196,7 @@ public abstract class MapperServiceTestCase extends FieldTypeTestCase {
         return createMapperService(getVersion(), getIndexSettings(), idFieldEnabled, mappings);
     }
 
-    protected final MapperService createMapperService(String mappings) throws IOException {
+    public final MapperService createMapperService(String mappings) throws IOException {
         MapperService mapperService = createMapperService(mapping(b -> {}));
         merge(mapperService, mappings);
         return mapperService;
@@ -744,7 +744,7 @@ public abstract class MapperServiceTestCase extends FieldTypeTestCase {
         return createSearchExecutionContext(mapperService, null, Settings.EMPTY);
     }
 
-    protected SearchExecutionContext createSearchExecutionContext(MapperService mapperService, IndexSearcher searcher) {
+    public final SearchExecutionContext createSearchExecutionContext(MapperService mapperService, IndexSearcher searcher) {
         return createSearchExecutionContext(mapperService, searcher, Settings.EMPTY);
     }
 

+ 5 - 5
test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/MappingGenerator.java

@@ -101,11 +101,6 @@ public class MappingGenerator {
             }
 
             if (templateEntry instanceof Template.Leaf leaf) {
-                // For simplicity we only copy to keyword fields, synthetic source logic to handle copy_to is generic.
-                if (leaf.type() == FieldType.KEYWORD) {
-                    context.addCopyToCandidate(fieldName);
-                }
-
                 var mappingParametersGenerator = specification.dataSource()
                     .get(
                         new DataSourceRequest.LeafMappingParametersGenerator(
@@ -120,6 +115,11 @@ public class MappingGenerator {
                 mappingParameters.put("type", leaf.type().toString());
                 mappingParameters.putAll(mappingParametersGenerator.get());
 
+                // For simplicity we only copy to keyword fields, synthetic source logic to handle copy_to is generic.
+                if (leaf.type() == FieldType.KEYWORD) {
+                    context.addCopyToCandidate(fieldName);
+                }
+
             } else if (templateEntry instanceof Template.Object object) {
                 var mappingParametersGenerator = specification.dataSource()
                     .get(new DataSourceRequest.ObjectMappingParametersGenerator(false, object.nested(), context.parentSubobjects()))

+ 5 - 0
x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/LocalCircuitBreaker.java

@@ -134,4 +134,9 @@ public final class LocalCircuitBreaker implements CircuitBreaker, Releasable {
             breaker.addWithoutBreaking(-reservedBytes);
         }
     }
+
+    @Override
+    public String toString() {
+        return "LocalCircuitBreaker[" + reservedBytes + "/" + overReservedBytes + ":" + maxOverReservedBytes + "]";
+    }
 }

+ 40 - 9
x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/lookup/QueryList.java

@@ -39,9 +39,11 @@ import java.util.function.IntFunction;
  */
 public abstract class QueryList {
     protected final Block block;
+    protected final boolean onlySingleValues;
 
-    protected QueryList(Block block) {
+    protected QueryList(Block block, boolean onlySingleValues) {
         this.block = block;
+        this.onlySingleValues = onlySingleValues;
     }
 
     /**
@@ -51,6 +53,12 @@ public abstract class QueryList {
         return block.getPositionCount();
     }
 
+    /**
+     * Returns a copy of this query list that only returns queries for single-valued positions.
+     * That is, it returns `null` queries for either multivalued or null positions.
+     */
+    public abstract QueryList onlySingleValues();
+
     /**
      * Returns the query at the given position.
      */
@@ -93,7 +101,7 @@ public abstract class QueryList {
             case COMPOSITE -> throw new IllegalArgumentException("can't read values from [composite] block");
             case UNKNOWN -> throw new IllegalArgumentException("can't read values from [" + block + "]");
         };
-        return new TermQueryList(field, searchExecutionContext, block, blockToJavaObject);
+        return new TermQueryList(field, searchExecutionContext, block, false, blockToJavaObject);
     }
 
     /**
@@ -103,7 +111,7 @@ public abstract class QueryList {
     public static QueryList ipTermQueryList(MappedFieldType field, SearchExecutionContext searchExecutionContext, BytesRefBlock block) {
         BytesRef scratch = new BytesRef();
         byte[] ipBytes = new byte[InetAddressPoint.BYTES];
-        return new TermQueryList(field, searchExecutionContext, block, offset -> {
+        return new TermQueryList(field, searchExecutionContext, block, false, offset -> {
             final var bytes = block.getBytesRef(offset, scratch);
             if (ipBytes.length != bytes.length) {
                 // Lucene only support 16-byte IP addresses, even IPv4 is encoded in 16 bytes
@@ -123,6 +131,7 @@ public abstract class QueryList {
             field,
             searchExecutionContext,
             block,
+            false,
             field instanceof RangeFieldMapper.RangeFieldType rangeFieldType
                 ? offset -> rangeFieldType.dateTimeFormatter().formatMillis(block.getLong(offset))
                 : block::getLong
@@ -133,7 +142,7 @@ public abstract class QueryList {
      * Returns a list of geo_shape queries for the given field and the input block.
      */
     public static QueryList geoShapeQueryList(MappedFieldType field, SearchExecutionContext searchExecutionContext, Block block) {
-        return new GeoShapeQueryList(field, searchExecutionContext, block);
+        return new GeoShapeQueryList(field, searchExecutionContext, block, false);
     }
 
     private static class TermQueryList extends QueryList {
@@ -145,18 +154,27 @@ public abstract class QueryList {
             MappedFieldType field,
             SearchExecutionContext searchExecutionContext,
             Block block,
+            boolean onlySingleValues,
             IntFunction<Object> blockValueReader
         ) {
-            super(block);
+            super(block, onlySingleValues);
             this.field = field;
             this.searchExecutionContext = searchExecutionContext;
             this.blockValueReader = blockValueReader;
         }
 
+        @Override
+        public TermQueryList onlySingleValues() {
+            return new TermQueryList(field, searchExecutionContext, block, true, blockValueReader);
+        }
+
         @Override
         Query getQuery(int position) {
-            final int first = block.getFirstValueIndex(position);
             final int count = block.getValueCount(position);
+            if (onlySingleValues && count != 1) {
+                return null;
+            }
+            final int first = block.getFirstValueIndex(position);
             return switch (count) {
                 case 0 -> null;
                 case 1 -> field.termQuery(blockValueReader.apply(first), searchExecutionContext);
@@ -179,8 +197,13 @@ public abstract class QueryList {
         private final IntFunction<Geometry> blockValueReader;
         private final IntFunction<Query> shapeQuery;
 
-        private GeoShapeQueryList(MappedFieldType field, SearchExecutionContext searchExecutionContext, Block block) {
-            super(block);
+        private GeoShapeQueryList(
+            MappedFieldType field,
+            SearchExecutionContext searchExecutionContext,
+            Block block,
+            boolean onlySingleValues
+        ) {
+            super(block, onlySingleValues);
 
             this.field = field;
             this.searchExecutionContext = searchExecutionContext;
@@ -188,10 +211,18 @@ public abstract class QueryList {
             this.shapeQuery = shapeQuery();
         }
 
+        @Override
+        public GeoShapeQueryList onlySingleValues() {
+            return new GeoShapeQueryList(field, searchExecutionContext, block, true);
+        }
+
         @Override
         Query getQuery(int position) {
-            final int first = block.getFirstValueIndex(position);
             final int count = block.getValueCount(position);
+            if (onlySingleValues && count != 1) {
+                return null;
+            }
+            final int first = block.getFirstValueIndex(position);
             return switch (count) {
                 case 0 -> null;
                 case 1 -> shapeQuery.apply(first);

+ 197 - 125
x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/lookup/EnrichQuerySourceOperatorTests.java

@@ -42,11 +42,14 @@ import org.elasticsearch.test.ESTestCase;
 import org.junit.After;
 import org.junit.Before;
 
+import java.io.IOException;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
 
 import static org.hamcrest.Matchers.equalTo;
 import static org.hamcrest.Matchers.lessThanOrEqualTo;
@@ -70,149 +73,218 @@ public class EnrichQuerySourceOperatorTests extends ESTestCase {
     }
 
     public void testQueries() throws Exception {
-        MockDirectoryWrapper dir = newMockDirectory();
-        IndexWriterConfig iwc = new IndexWriterConfig();
-        iwc.setMergePolicy(NoMergePolicy.INSTANCE);
-        IndexWriter writer = new IndexWriter(dir, iwc);
-        List<List<String>> terms = List.of(
-            List.of("a2"),
-            List.of("a1", "c1", "b2"),
-            List.of("a2"),
-            List.of("a3"),
-            List.of("b2", "b1", "a1")
-        );
-        for (List<String> ts : terms) {
-            Document doc = new Document();
-            for (String t : ts) {
-                doc.add(new StringField("uid", t, Field.Store.NO));
+        try (
+            var directoryData = makeDirectoryWith(
+                List.of(List.of("a2"), List.of("a1", "c1", "b2"), List.of("a2"), List.of("a3"), List.of("b2", "b1", "a1"))
+            )
+        ) {
+            final BytesRefBlock inputTerms;
+            try (BytesRefBlock.Builder termBuilder = blockFactory.newBytesRefBlockBuilder(6)) {
+                termBuilder.appendBytesRef(new BytesRef("b2"))
+                    .beginPositionEntry()
+                    .appendBytesRef(new BytesRef("c1"))
+                    .appendBytesRef(new BytesRef("a2"))
+                    .endPositionEntry()
+                    .appendBytesRef(new BytesRef("z2"))
+                    .appendNull()
+                    .appendBytesRef(new BytesRef("a3"))
+                    .appendNull();
+                inputTerms = termBuilder.build();
             }
-            writer.addDocument(doc);
-        }
-        writer.commit();
-        DirectoryReader reader = DirectoryReader.open(writer);
-        writer.close();
-
-        final BytesRefBlock inputTerms;
-        try (BytesRefBlock.Builder termBuilder = blockFactory.newBytesRefBlockBuilder(6)) {
-            termBuilder.appendBytesRef(new BytesRef("b2"))
-                .beginPositionEntry()
-                .appendBytesRef(new BytesRef("c1"))
-                .appendBytesRef(new BytesRef("a2"))
-                .endPositionEntry()
-                .appendBytesRef(new BytesRef("z2"))
-                .appendNull()
-                .appendBytesRef(new BytesRef("a3"))
-                .appendNull();
-            inputTerms = termBuilder.build();
+            MappedFieldType uidField = new KeywordFieldMapper.KeywordFieldType("uid");
+            QueryList queryList = QueryList.rawTermQueryList(uidField, mock(SearchExecutionContext.class), inputTerms);
+            assertThat(queryList.getPositionCount(), equalTo(6));
+            assertThat(queryList.getQuery(0), equalTo(new TermQuery(new Term("uid", new BytesRef("b2")))));
+            assertThat(queryList.getQuery(1), equalTo(new TermInSetQuery("uid", List.of(new BytesRef("c1"), new BytesRef("a2")))));
+            assertThat(queryList.getQuery(2), equalTo(new TermQuery(new Term("uid", new BytesRef("z2")))));
+            assertNull(queryList.getQuery(3));
+            assertThat(queryList.getQuery(4), equalTo(new TermQuery(new Term("uid", new BytesRef("a3")))));
+            assertNull(queryList.getQuery(5));
+            // pos -> terms -> docs
+            // -----------------------------
+            // 0 -> [b2] -> [1, 4]
+            // 1 -> [c1, a2] -> [1, 0, 2]
+            // 2 -> [z2] -> []
+            // 3 -> [] -> []
+            // 4 -> [a1] -> [3]
+            // 5 -> [] -> []
+            var warnings = Warnings.createWarnings(DriverContext.WarningsMode.IGNORE, 0, 0, "test enrich");
+            EnrichQuerySourceOperator queryOperator = new EnrichQuerySourceOperator(
+                blockFactory,
+                128,
+                queryList,
+                directoryData.reader,
+                warnings
+            );
+            Page page = queryOperator.getOutput();
+            assertNotNull(page);
+            assertThat(page.getPositionCount(), equalTo(6));
+            IntVector docs = getDocVector(page, 0);
+            assertThat(docs.getInt(0), equalTo(1));
+            assertThat(docs.getInt(1), equalTo(4));
+            assertThat(docs.getInt(2), equalTo(0));
+            assertThat(docs.getInt(3), equalTo(1));
+            assertThat(docs.getInt(4), equalTo(2));
+            assertThat(docs.getInt(5), equalTo(3));
+
+            Block positions = page.getBlock(1);
+            assertThat(BlockUtils.toJavaObject(positions, 0), equalTo(0));
+            assertThat(BlockUtils.toJavaObject(positions, 1), equalTo(0));
+            assertThat(BlockUtils.toJavaObject(positions, 2), equalTo(1));
+            assertThat(BlockUtils.toJavaObject(positions, 3), equalTo(1));
+            assertThat(BlockUtils.toJavaObject(positions, 4), equalTo(1));
+            assertThat(BlockUtils.toJavaObject(positions, 5), equalTo(4));
+            page.releaseBlocks();
+            assertTrue(queryOperator.isFinished());
+            IOUtils.close(inputTerms);
         }
-        MappedFieldType uidField = new KeywordFieldMapper.KeywordFieldType("uid");
-        QueryList queryList = QueryList.rawTermQueryList(uidField, mock(SearchExecutionContext.class), inputTerms);
-        assertThat(queryList.getPositionCount(), equalTo(6));
-        assertThat(queryList.getQuery(0), equalTo(new TermQuery(new Term("uid", new BytesRef("b2")))));
-        assertThat(queryList.getQuery(1), equalTo(new TermInSetQuery("uid", List.of(new BytesRef("c1"), new BytesRef("a2")))));
-        assertThat(queryList.getQuery(2), equalTo(new TermQuery(new Term("uid", new BytesRef("z2")))));
-        assertNull(queryList.getQuery(3));
-        assertThat(queryList.getQuery(4), equalTo(new TermQuery(new Term("uid", new BytesRef("a3")))));
-        assertNull(queryList.getQuery(5));
-        // pos -> terms -> docs
-        // -----------------------------
-        // 0 -> [b2] -> [1, 4]
-        // 1 -> [c1, a2] -> [1, 0, 2]
-        // 2 -> [z2] -> []
-        // 3 -> [] -> []
-        // 4 -> [a1] -> [3]
-        // 5 -> [] -> []
-        var warnings = Warnings.createWarnings(DriverContext.WarningsMode.IGNORE, 0, 0, "test enrich");
-        EnrichQuerySourceOperator queryOperator = new EnrichQuerySourceOperator(blockFactory, 128, queryList, reader, warnings);
-        Page p0 = queryOperator.getOutput();
-        assertNotNull(p0);
-        assertThat(p0.getPositionCount(), equalTo(6));
-        IntVector docs = getDocVector(p0, 0);
-        assertThat(docs.getInt(0), equalTo(1));
-        assertThat(docs.getInt(1), equalTo(4));
-        assertThat(docs.getInt(2), equalTo(0));
-        assertThat(docs.getInt(3), equalTo(1));
-        assertThat(docs.getInt(4), equalTo(2));
-        assertThat(docs.getInt(5), equalTo(3));
-
-        Block positions = p0.getBlock(1);
-        assertThat(BlockUtils.toJavaObject(positions, 0), equalTo(0));
-        assertThat(BlockUtils.toJavaObject(positions, 1), equalTo(0));
-        assertThat(BlockUtils.toJavaObject(positions, 2), equalTo(1));
-        assertThat(BlockUtils.toJavaObject(positions, 3), equalTo(1));
-        assertThat(BlockUtils.toJavaObject(positions, 4), equalTo(1));
-        assertThat(BlockUtils.toJavaObject(positions, 5), equalTo(4));
-        p0.releaseBlocks();
-        assertTrue(queryOperator.isFinished());
-        IOUtils.close(reader, dir, inputTerms);
     }
 
     public void testRandomMatchQueries() throws Exception {
-        MockDirectoryWrapper dir = newMockDirectory();
-        IndexWriterConfig iwc = new IndexWriterConfig();
-        iwc.setMergePolicy(NoMergePolicy.INSTANCE);
-        IndexWriter writer = new IndexWriter(dir, iwc);
         int numTerms = randomIntBetween(10, 1000);
-        Map<String, Integer> terms = new HashMap<>();
-        for (int i = 0; i < numTerms; i++) {
-            Document doc = new Document();
-            String term = "term-" + i;
-            terms.put(term, i);
-            doc.add(new StringField("uid", term, Field.Store.NO));
-            writer.addDocument(doc);
-        }
-        writer.forceMerge(1);
-        writer.commit();
-        DirectoryReader reader = DirectoryReader.open(writer);
-        writer.close();
-
-        Map<Integer, Set<Integer>> expectedPositions = new HashMap<>();
-        int numPositions = randomIntBetween(1, 1000);
-        final BytesRefBlock inputTerms;
-        try (BytesRefBlock.Builder builder = blockFactory.newBytesRefBlockBuilder(numPositions)) {
-            for (int i = 0; i < numPositions; i++) {
-                if (randomBoolean()) {
-                    String term = randomFrom(terms.keySet());
-                    builder.appendBytesRef(new BytesRef(term));
-                    Integer position = terms.get(term);
-                    expectedPositions.put(i, Set.of(position));
-                } else {
+        List<List<String>> termsList = IntStream.range(0, numTerms).mapToObj(i -> List.of("term-" + i)).toList();
+        Map<String, Integer> terms = IntStream.range(0, numTerms).boxed().collect(Collectors.toMap(i -> "term-" + i, i -> i));
+
+        try (var directoryData = makeDirectoryWith(termsList)) {
+            Map<Integer, Set<Integer>> expectedPositions = new HashMap<>();
+            int numPositions = randomIntBetween(1, 1000);
+            final BytesRefBlock inputTerms;
+            try (BytesRefBlock.Builder builder = blockFactory.newBytesRefBlockBuilder(numPositions)) {
+                for (int i = 0; i < numPositions; i++) {
                     if (randomBoolean()) {
-                        builder.appendNull();
-                    } else {
-                        String term = "other-" + randomIntBetween(1, 100);
+                        String term = randomFrom(terms.keySet());
                         builder.appendBytesRef(new BytesRef(term));
+                        Integer position = terms.get(term);
+                        expectedPositions.put(i, Set.of(position));
+                    } else {
+                        if (randomBoolean()) {
+                            builder.appendNull();
+                        } else {
+                            String term = "other-" + randomIntBetween(1, 100);
+                            builder.appendBytesRef(new BytesRef(term));
+                        }
                     }
                 }
+                inputTerms = builder.build();
             }
-            inputTerms = builder.build();
-        }
-        MappedFieldType uidField = new KeywordFieldMapper.KeywordFieldType("uid");
-        var queryList = QueryList.rawTermQueryList(uidField, mock(SearchExecutionContext.class), inputTerms);
-        int maxPageSize = between(1, 256);
-        var warnings = Warnings.createWarnings(DriverContext.WarningsMode.IGNORE, 0, 0, "test enrich");
-        EnrichQuerySourceOperator queryOperator = new EnrichQuerySourceOperator(blockFactory, maxPageSize, queryList, reader, warnings);
-        Map<Integer, Set<Integer>> actualPositions = new HashMap<>();
-        while (queryOperator.isFinished() == false) {
-            Page page = queryOperator.getOutput();
-            if (page != null) {
-                IntVector docs = getDocVector(page, 0);
-                IntBlock positions = page.getBlock(1);
-                assertThat(positions.getPositionCount(), lessThanOrEqualTo(maxPageSize));
-                for (int i = 0; i < page.getPositionCount(); i++) {
-                    int doc = docs.getInt(i);
-                    int position = positions.getInt(i);
-                    actualPositions.computeIfAbsent(position, k -> new HashSet<>()).add(doc);
+            var queryList = QueryList.rawTermQueryList(directoryData.field, mock(SearchExecutionContext.class), inputTerms);
+            int maxPageSize = between(1, 256);
+            var warnings = Warnings.createWarnings(DriverContext.WarningsMode.IGNORE, 0, 0, "test enrich");
+            EnrichQuerySourceOperator queryOperator = new EnrichQuerySourceOperator(
+                blockFactory,
+                maxPageSize,
+                queryList,
+                directoryData.reader,
+                warnings
+            );
+            Map<Integer, Set<Integer>> actualPositions = new HashMap<>();
+            while (queryOperator.isFinished() == false) {
+                Page page = queryOperator.getOutput();
+                if (page != null) {
+                    IntVector docs = getDocVector(page, 0);
+                    IntBlock positions = page.getBlock(1);
+                    assertThat(positions.getPositionCount(), lessThanOrEqualTo(maxPageSize));
+                    for (int i = 0; i < page.getPositionCount(); i++) {
+                        int doc = docs.getInt(i);
+                        int position = positions.getInt(i);
+                        actualPositions.computeIfAbsent(position, k -> new HashSet<>()).add(doc);
+                    }
+                    page.releaseBlocks();
                 }
-                page.releaseBlocks();
             }
+            assertThat(actualPositions, equalTo(expectedPositions));
+            IOUtils.close(inputTerms);
+        }
+    }
+
+    public void testQueries_OnlySingleValues() throws Exception {
+        try (
+            var directoryData = makeDirectoryWith(
+                List.of(List.of("a2"), List.of("a1", "c1", "b2"), List.of("a2"), List.of("a3"), List.of("b2", "b1", "a1"))
+            )
+        ) {
+            final BytesRefBlock inputTerms;
+            try (BytesRefBlock.Builder termBuilder = blockFactory.newBytesRefBlockBuilder(6)) {
+                termBuilder.appendBytesRef(new BytesRef("b2"))
+                    .beginPositionEntry()
+                    .appendBytesRef(new BytesRef("c1"))
+                    .appendBytesRef(new BytesRef("a2"))
+                    .endPositionEntry()
+                    .appendBytesRef(new BytesRef("z2"))
+                    .appendNull()
+                    .appendBytesRef(new BytesRef("a3"))
+                    .beginPositionEntry()
+                    .appendBytesRef(new BytesRef("a3"))
+                    .appendBytesRef(new BytesRef("a2"))
+                    .appendBytesRef(new BytesRef("z2"))
+                    .appendBytesRef(new BytesRef("xx"))
+                    .endPositionEntry();
+                inputTerms = termBuilder.build();
+            }
+            QueryList queryList = QueryList.rawTermQueryList(directoryData.field, mock(SearchExecutionContext.class), inputTerms)
+                .onlySingleValues();
+            // pos -> terms -> docs
+            // -----------------------------
+            // 0 -> [b2] -> [1, 4]
+            // 1 -> [c1, a2] -> []
+            // 2 -> [z2] -> []
+            // 3 -> [] -> []
+            // 4 -> [a1] -> [3]
+            // 5 -> [a3, a2, z2, xx] -> []
+            var warnings = Warnings.createWarnings(DriverContext.WarningsMode.IGNORE, 0, 0, "test lookup");
+            EnrichQuerySourceOperator queryOperator = new EnrichQuerySourceOperator(
+                blockFactory,
+                128,
+                queryList,
+                directoryData.reader,
+                warnings
+            );
+            Page page = queryOperator.getOutput();
+            assertNotNull(page);
+            assertThat(page.getPositionCount(), equalTo(3));
+            IntVector docs = getDocVector(page, 0);
+            assertThat(docs.getInt(0), equalTo(1));
+            assertThat(docs.getInt(1), equalTo(4));
+            assertThat(docs.getInt(2), equalTo(3));
+
+            Block positions = page.getBlock(1);
+            assertThat(BlockUtils.toJavaObject(positions, 0), equalTo(0));
+            assertThat(BlockUtils.toJavaObject(positions, 1), equalTo(0));
+            assertThat(BlockUtils.toJavaObject(positions, 2), equalTo(4));
+            page.releaseBlocks();
+            assertTrue(queryOperator.isFinished());
+            IOUtils.close(inputTerms);
         }
-        assertThat(actualPositions, equalTo(expectedPositions));
-        IOUtils.close(reader, dir, inputTerms);
     }
 
     private static IntVector getDocVector(Page page, int blockIndex) {
         DocBlock doc = page.getBlock(blockIndex);
         return doc.asVector().docs();
     }
+
+    private record DirectoryData(DirectoryReader reader, MockDirectoryWrapper dir, MappedFieldType field) implements AutoCloseable {
+        @Override
+        public void close() throws IOException {
+            IOUtils.close(reader, dir);
+        }
+    }
+
+    private static DirectoryData makeDirectoryWith(List<List<String>> terms) throws IOException {
+        MockDirectoryWrapper dir = newMockDirectory();
+        IndexWriterConfig iwc = new IndexWriterConfig();
+        iwc.setMergePolicy(NoMergePolicy.INSTANCE);
+        try (IndexWriter writer = new IndexWriter(dir, iwc)) {
+            for (var termList : terms) {
+                Document doc = new Document();
+                for (String term : termList) {
+                    doc.add(new StringField("uid", term, Field.Store.NO));
+                }
+                writer.addDocument(doc);
+            }
+            writer.forceMerge(1);
+            writer.commit();
+
+            return new DirectoryData(DirectoryReader.open(writer), dir, new KeywordFieldMapper.KeywordFieldType("uid"));
+        }
+    }
 }

+ 1 - 2
x-pack/plugin/esql/compute/test/src/main/java/org/elasticsearch/compute/test/OperatorTestCase.java

@@ -241,7 +241,7 @@ public abstract class OperatorTestCase extends AnyOperatorTestCase {
      * Tests that finish then close without calling {@link Operator#getOutput} to
      * retrieve a potential last page, releases all memory.
      */
-    public void testSimpleFinishClose() {
+    public void testSimpleFinishClose() throws Exception {
         DriverContext driverContext = driverContext();
         List<Page> input = CannedSourceOperator.collectPages(simpleInput(driverContext.blockFactory(), 1));
         assert input.size() == 1 : "Expected single page, got: " + input;
@@ -253,7 +253,6 @@ public abstract class OperatorTestCase extends AnyOperatorTestCase {
             operator.addInput(page);
             operator.finish();
         }
-        assertThat(driverContext.blockFactory().breaker().getUsed(), equalTo(0L));
     }
 
     protected final List<Page> drive(Operator operator, Iterator<Page> input, DriverContext driverContext) {

+ 149 - 99
x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec

@@ -105,6 +105,21 @@ emp_no:integer | language_code:integer | language_name:keyword
 10003          | 4                     | German
 ;
 
+repeatedIndexOnFrom
+required_capability: join_lookup_v11
+
+FROM languages_lookup
+| LOOKUP JOIN languages_lookup ON language_code
+| SORT language_code
+;
+
+language_code:integer | language_name:keyword
+1                     | English
+2                     | French
+3                     | Spanish
+4                     | German
+;
+
 nonUniqueLeftKeyOnTheDataNode
 required_capability: join_lookup_v11
 
@@ -129,6 +144,10 @@ emp_no:integer | language_code:integer | language_name:keyword
 10030          |0                      | null
 ;
 
+###########################################################################
+# multiple match behavior with languages_lookup_non_unique_key index
+###########################################################################
+
 nonUniqueRightKeyOnTheDataNode
 required_capability: join_lookup_v11
 
@@ -164,6 +183,7 @@ FROM employees
 | KEEP emp_no, language_code, language_name, country
 ;
 
+ignoreOrder:true
 emp_no:integer | language_code:integer | language_name:keyword | country:text
 10001          | 1                     | English               | Canada
 10001          | 1                     | English               | null
@@ -177,6 +197,31 @@ emp_no:integer | language_code:integer | language_name:keyword | country:text
 10005          | 5                     | null                  | Atlantis
 ;
 
+nonUniqueRightKeyOnTheCoordinatorCorrectOrdering
+// Same as above, but don't ignore the order completely. At least the emp_no col must remain correctly ordered.
+required_capability: join_lookup_v11
+
+FROM employees
+| SORT emp_no
+| LIMIT 5
+| EVAL language_code = emp_no % 10
+| LOOKUP JOIN languages_lookup_non_unique_key ON language_code
+| KEEP emp_no, language_code
+;
+
+emp_no:integer | language_code:integer
+10001          | 1
+10001          | 1
+10001          | 1
+10001          | 1
+10002          | 2
+10002          | 2
+10002          | 2
+10003          | 3
+10004          | 4
+10005          | 5
+;
+
 nonUniqueRightKeyFromRow
 required_capability: join_lookup_v11
 
@@ -185,28 +230,14 @@ ROW language_code = 2
 | DROP country.keyword
 ;
 
+ignoreOrder:true
 language_code:integer | country:text       | language_name:keyword
 2                     | [Germany, Austria] | German
 2                     | Switzerland        | German
 2                     | null               | German
 ;
 
-repeatedIndexOnFrom
-required_capability: join_lookup_v11
-
-FROM languages_lookup
-| LOOKUP JOIN languages_lookup ON language_code
-| SORT language_code
-;
-
-language_code:integer | language_name:keyword
-1                     | English
-2                     | French
-3                     | Spanish
-4                     | German
-;
-
-keepFieldNotInLookup
+keepFieldNotInLookupOnTheDataNode
 required_capability: join_lookup_v11
 
 FROM employees
@@ -214,7 +245,7 @@ FROM employees
 | LOOKUP JOIN languages_lookup_non_unique_key ON language_code
 | WHERE emp_no == 10001
 | SORT emp_no
-| keep emp_no
+| KEEP emp_no
 ;
 
 emp_no:integer
@@ -224,12 +255,12 @@ emp_no:integer
 10001
 ;
 
-dropAllFieldsUsedInLookup
+dropAllFieldsUsedInLookupOnTheCoordinator
 required_capability: join_lookup_v11
 
 FROM employees
 | WHERE emp_no == 10001
-| keep emp_no
+| KEEP emp_no
 | EVAL language_code = emp_no % 10
 | LOOKUP JOIN languages_lookup_non_unique_key ON language_code
 | DROP language_*, country*
@@ -242,6 +273,104 @@ emp_no:integer
 10001
 ;
 
+###########################################################################
+# null and multi-value behavior with languages_lookup_non_unique_key index
+###########################################################################
+
+nullJoinKeyOnTheDataNode
+required_capability: join_lookup_v11
+
+FROM employees
+| WHERE emp_no < 10004
+| EVAL language_code = emp_no % 10, language_code = CASE(language_code == 3, null, language_code)
+| LOOKUP JOIN languages_lookup_non_unique_key ON language_code
+| SORT emp_no, language_code, language_name
+| KEEP emp_no, language_code, language_name
+;
+
+emp_no:integer | language_code:integer | language_name:keyword
+10001          | 1                     | English
+10001          | 1                     | English
+10001          | 1                     | English
+10001          | 1                     | null
+10002          | 2                     | German
+10002          | 2                     | German
+10002          | 2                     | German
+10003          | null                  | null
+;
+
+mvJoinKeyOnTheLookupIndex
+required_capability: join_lookup_v11
+
+FROM employees
+| WHERE 10003 < emp_no AND emp_no < 10008
+| EVAL language_code = emp_no % 10
+| LOOKUP JOIN languages_lookup_non_unique_key ON language_code
+| SORT emp_no, language_name
+| KEEP emp_no, language_code, language_name
+;
+
+emp_no:integer | language_code:integer | language_name:keyword
+10004          | 4                     | Quenya
+10005          | 5                     | null
+10006          | 6                     | Mv-Lang
+10007          | 7                     | Mv-Lang
+10007          | 7                     | Mv-Lang2
+;
+
+mvJoinKeyOnFrom
+required_capability: join_lookup_v11
+required_capability: join_lookup_skip_mv
+
+FROM employees
+| WHERE emp_no < 10006
+| EVAL language_code = salary_change.int
+| LOOKUP JOIN languages_lookup ON language_code
+| SORT emp_no
+| KEEP emp_no, language_code, language_name
+;
+
+emp_no:integer | language_code:integer | language_name:keyword
+10001          | 1                     | English              
+10002          | [-7, 11]              | null                 
+10003          | [12, 14]              | null                 
+10004          | [0, 1, 3, 13]         | null                 
+10005          | [-2, 13]              | null
+;
+
+mvJoinKeyFromRow
+required_capability: join_lookup_v11
+required_capability: join_lookup_skip_mv
+
+ROW language_code = [4, 5, 6, 7]
+| LOOKUP JOIN languages_lookup_non_unique_key ON language_code
+| KEEP language_code, language_name, country
+| SORT language_code, language_name, country
+;
+
+language_code:integer | language_name:keyword | country:text
+[4, 5, 6, 7]          | null                  | null
+;
+
+mvJoinKeyFromRowExpanded
+required_capability: join_lookup_v11
+
+ROW language_code = [4, 5, 6, 7, 8]
+| MV_EXPAND language_code
+| LOOKUP JOIN languages_lookup_non_unique_key ON language_code
+| KEEP language_code, language_name, country
+| SORT language_code, language_name, country
+;
+
+language_code:integer | language_name:keyword | country:text
+4                     | Quenya                | null
+5                     | null                  | Atlantis
+6                     | Mv-Lang               | Mv-Land
+7                     | Mv-Lang               | Mv-Land
+7                     | Mv-Lang2              | Mv-Land2
+8                     | Mv-Lang2              | Mv-Land2
+;
+
 ###############################################
 # Filtering tests with languages_lookup index
 ###############################################
@@ -390,86 +519,6 @@ emp_no:integer | language_code:integer | language_name:keyword
 10092          | 1                     | English
 ;
 
-###########################################################################
-# null and multi-value behavior with languages_lookup_non_unique_key index
-###########################################################################
-
-nullJoinKeyOnTheDataNode
-required_capability: join_lookup_v11
-
-FROM employees
-| WHERE emp_no < 10004
-| EVAL language_code = emp_no % 10, language_code = CASE(language_code == 3, null, language_code)
-| LOOKUP JOIN languages_lookup_non_unique_key ON language_code
-| SORT emp_no, language_code, language_name
-| KEEP emp_no, language_code, language_name
-;
-
-emp_no:integer | language_code:integer | language_name:keyword
-10001          | 1                     | English
-10001          | 1                     | English
-10001          | 1                     | English
-10001          | 1                     | null
-10002          | 2                     | German
-10002          | 2                     | German
-10002          | 2                     | German
-10003          | null                  | null
-;
-
-mvJoinKeyOnTheDataNode
-required_capability: join_lookup_v11
-
-FROM employees
-| WHERE 10003 < emp_no AND emp_no < 10008
-| EVAL language_code = emp_no % 10
-| LOOKUP JOIN languages_lookup_non_unique_key ON language_code
-| SORT emp_no, language_name
-| KEEP emp_no, language_code, language_name
-;
-
-emp_no:integer | language_code:integer | language_name:keyword
-10004          | 4                     | Quenya
-10005          | 5                     | null
-10006          | 6                     | Mv-Lang
-10007          | 7                     | Mv-Lang
-10007          | 7                     | Mv-Lang2
-;
-
-mvJoinKeyFromRow
-required_capability: join_lookup_v11
-
-ROW language_code = [4, 5, 6, 7]
-| LOOKUP JOIN languages_lookup_non_unique_key ON language_code
-| KEEP language_code, language_name, country
-| SORT language_code, language_name, country
-;
-
-language_code:integer | language_name:keyword | country:text
-[4, 5, 6, 7]          | Mv-Lang               | Mv-Land
-[4, 5, 6, 7]          | Mv-Lang2              | Mv-Land2
-[4, 5, 6, 7]          | Quenya                | null
-[4, 5, 6, 7]          | null                  | Atlantis
-;
-
-mvJoinKeyFromRowExpanded
-required_capability: join_lookup_v11
-
-ROW language_code = [4, 5, 6, 7, 8]
-| MV_EXPAND language_code
-| LOOKUP JOIN languages_lookup_non_unique_key ON language_code
-| KEEP language_code, language_name, country
-| SORT language_code, language_name, country
-;
-
-language_code:integer | language_name:keyword | country:text
-4                     | Quenya                | null
-5                     | null                  | Atlantis
-6                     | Mv-Lang               | Mv-Land
-7                     | Mv-Lang               | Mv-Land
-7                     | Mv-Lang2              | Mv-Land2
-8                     | Mv-Lang2              | Mv-Land2
-;
-
 ###########################################################################
 # nested filed join behavior with languages_nested_fields index
 ###########################################################################
@@ -1202,6 +1251,7 @@ ignoreOrder:true
 ;
 
 lookupIndexInFromRepeatedRowBug
+// Test for https://github.com/elastic/elasticsearch/issues/118852
 required_capability: join_lookup_v11
 FROM languages_lookup_non_unique_key
 | WHERE language_code == 1

+ 141 - 522
x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java

@@ -220,21 +220,13 @@ public class CrossClustersQueryIT extends AbstractMultiClustersTestCase {
         }
         {
             String q = "FROM nomatch";
-            VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, false));
-            assertThat(e.getDetailedMessage(), containsString("Unknown index [nomatch]"));
-
-            String limit0 = q + " | LIMIT 0";
-            e = expectThrows(VerificationException.class, () -> runQuery(limit0, false));
-            assertThat(e.getDetailedMessage(), containsString("Unknown index [nomatch]"));
+            String expectedError = "Unknown index [nomatch]";
+            expectVerificationExceptionForQuery(q, expectedError, false);
         }
         {
             String q = "FROM nomatch*";
-            VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, false));
-            assertThat(e.getDetailedMessage(), containsString("Unknown index [nomatch*]"));
-
-            String limit0 = q + " | LIMIT 0";
-            e = expectThrows(VerificationException.class, () -> runQuery(limit0, false));
-            assertThat(e.getDetailedMessage(), containsString("Unknown index [nomatch*]"));
+            String expectedError = "Unknown index [nomatch*]";
+            expectVerificationExceptionForQuery(q, expectedError, false);
         }
     }
 
@@ -296,554 +288,181 @@ public class CrossClustersQueryIT extends AbstractMultiClustersTestCase {
         }
     }
 
-    public void testSearchesAgainstNonMatchingIndicesWithSkipUnavailableTrue() {
+    public void testSearchesAgainstNonMatchingIndices() {
         int numClusters = 3;
         Map<String, Object> testClusterInfo = setupClusters(numClusters);
         int localNumShards = (Integer) testClusterInfo.get("local.num_shards");
         int remote1NumShards = (Integer) testClusterInfo.get("remote.num_shards");
-        int remote2NumShards = (Integer) testClusterInfo.get("remote2.num_shards");
         String localIndex = (String) testClusterInfo.get("local.index");
         String remote1Index = (String) testClusterInfo.get("remote.index");
         String remote2Index = (String) testClusterInfo.get("remote2.index");
 
         createIndexAliases(numClusters);
-        setSkipUnavailable(REMOTE_CLUSTER_1, true);
-        setSkipUnavailable(REMOTE_CLUSTER_2, true);
 
         Tuple<Boolean, Boolean> includeCCSMetadata = randomIncludeCCSMetadata();
         Boolean requestIncludeMeta = includeCCSMetadata.v1();
         boolean responseExpectMeta = includeCCSMetadata.v2();
 
-        try {
-            // missing concrete local index is fatal
-            {
-                String q = "FROM nomatch,cluster-a:" + randomFrom(remote1Index, IDX_ALIAS, FILTERED_IDX_ALIAS);
-                VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta));
-                assertThat(e.getDetailedMessage(), containsString("Unknown index [nomatch]"));
-
-                String limit0 = q + " | LIMIT 0";
-                e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta));
-                assertThat(e.getDetailedMessage(), containsString("Unknown index [nomatch]"));
-            }
-
-            // missing concrete remote index is not fatal when skip_unavailable=true (as long as an index matches on another cluster)
-            {
-                String localIndexName = randomFrom(localIndex, IDX_ALIAS, FILTERED_IDX_ALIAS);
-                String q = Strings.format("FROM %s,cluster-a:nomatch", localIndexName);
-                try (EsqlQueryResponse resp = runQuery(q, requestIncludeMeta)) {
-                    assertThat(getValuesList(resp).size(), greaterThanOrEqualTo(1));
-                    EsqlExecutionInfo executionInfo = resp.getExecutionInfo();
-                    assertThat(executionInfo.isCrossClusterSearch(), is(true));
-                    assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta));
-                    assertExpectedClustersForMissingIndicesTests(
-                        executionInfo,
-                        List.of(
-                            new ExpectedCluster(LOCAL_CLUSTER, localIndexName, EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, localNumShards),
-                            new ExpectedCluster(REMOTE_CLUSTER_1, "nomatch", EsqlExecutionInfo.Cluster.Status.SKIPPED, 0)
-                        )
-                    );
-                }
-
-                String limit0 = q + " | LIMIT 0";
-                try (EsqlQueryResponse resp = runQuery(limit0, requestIncludeMeta)) {
-                    assertThat(resp.columns().size(), greaterThan(0));
-                    assertThat(getValuesList(resp).size(), greaterThanOrEqualTo(0));
-                    EsqlExecutionInfo executionInfo = resp.getExecutionInfo();
-                    assertThat(executionInfo.isCrossClusterSearch(), is(true));
-                    assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta));
-                    assertExpectedClustersForMissingIndicesTests(
-                        executionInfo,
-                        List.of(
-                            new ExpectedCluster(LOCAL_CLUSTER, localIndexName, EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0),
-                            new ExpectedCluster(REMOTE_CLUSTER_1, "nomatch", EsqlExecutionInfo.Cluster.Status.SKIPPED, 0)
-                        )
-                    );
-                }
-            }
-
-            // since there is at least one matching index in the query, the missing wildcarded local index is not an error
-            {
-                String remoteIndexName = randomFrom(remote1Index, IDX_ALIAS, FILTERED_IDX_ALIAS);
-                String q = "FROM nomatch*,cluster-a:" + remoteIndexName;
-                try (EsqlQueryResponse resp = runQuery(q, requestIncludeMeta)) {
-                    assertThat(getValuesList(resp).size(), greaterThanOrEqualTo(1));
-                    EsqlExecutionInfo executionInfo = resp.getExecutionInfo();
-                    assertThat(executionInfo.isCrossClusterSearch(), is(true));
-                    assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta));
-                    assertExpectedClustersForMissingIndicesTests(
-                        executionInfo,
-                        List.of(
-                            // local cluster is never marked as SKIPPED even when no matching indices - just marked as 0 shards searched
-                            new ExpectedCluster(LOCAL_CLUSTER, "nomatch*", EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0),
-                            new ExpectedCluster(
-                                REMOTE_CLUSTER_1,
-                                remoteIndexName,
-                                EsqlExecutionInfo.Cluster.Status.SUCCESSFUL,
-                                remote1NumShards
-                            )
-                        )
-                    );
-                }
-
-                String limit0 = q + " | LIMIT 0";
-                try (EsqlQueryResponse resp = runQuery(limit0, requestIncludeMeta)) {
-                    assertThat(getValuesList(resp).size(), equalTo(0));
-                    assertThat(resp.columns().size(), greaterThan(0));
-                    EsqlExecutionInfo executionInfo = resp.getExecutionInfo();
-                    assertThat(executionInfo.isCrossClusterSearch(), is(true));
-                    assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta));
-                    assertExpectedClustersForMissingIndicesTests(
-                        executionInfo,
-                        List.of(
-                            // local cluster is never marked as SKIPPED even when no matching indices - just marked as 0 shards searched
-                            new ExpectedCluster(LOCAL_CLUSTER, "nomatch*", EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0),
-                            // LIMIT 0 searches always have total shards = 0
-                            new ExpectedCluster(REMOTE_CLUSTER_1, remoteIndexName, EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0)
-                        )
-                    );
-                }
-            }
+        // missing concrete local index is an error
+        {
+            String q = "FROM nomatch,cluster-a:" + remote1Index;
+            String expectedError = "Unknown index [nomatch]";
+            expectVerificationExceptionForQuery(q, expectedError, requestIncludeMeta);
+        }
 
-            // since at least one index of the query matches on some cluster, a wildcarded index on skip_un=true is not an error
-            {
-                String localIndexName = randomFrom(localIndex, IDX_ALIAS, FILTERED_IDX_ALIAS);
-                String q = Strings.format("FROM %s,cluster-a:nomatch*", localIndexName);
-                try (EsqlQueryResponse resp = runQuery(q, requestIncludeMeta)) {
-                    assertThat(getValuesList(resp).size(), greaterThanOrEqualTo(1));
-                    EsqlExecutionInfo executionInfo = resp.getExecutionInfo();
-                    assertThat(executionInfo.isCrossClusterSearch(), is(true));
-                    assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta));
-                    assertExpectedClustersForMissingIndicesTests(
-                        executionInfo,
-                        List.of(
-                            new ExpectedCluster(LOCAL_CLUSTER, localIndexName, EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, localNumShards),
-                            new ExpectedCluster(REMOTE_CLUSTER_1, "nomatch*", EsqlExecutionInfo.Cluster.Status.SKIPPED, 0)
-                        )
-                    );
-                }
+        // missing concrete remote index is fatal
+        {
+            String q = "FROM logs*,cluster-a:nomatch";
+            String expectedError = "Unknown index [cluster-a:nomatch]";
+            expectVerificationExceptionForQuery(q, expectedError, requestIncludeMeta);
+        }
 
-                String limit0 = q + " | LIMIT 0";
-                try (EsqlQueryResponse resp = runQuery(limit0, requestIncludeMeta)) {
-                    assertThat(resp.columns().size(), greaterThan(0));
-                    assertThat(getValuesList(resp).size(), greaterThanOrEqualTo(0));
-                    EsqlExecutionInfo executionInfo = resp.getExecutionInfo();
-                    assertThat(executionInfo.isCrossClusterSearch(), is(true));
-                    assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta));
-                    assertExpectedClustersForMissingIndicesTests(
-                        executionInfo,
-                        List.of(
-                            new ExpectedCluster(LOCAL_CLUSTER, localIndexName, EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0),
-                            new ExpectedCluster(REMOTE_CLUSTER_1, "nomatch*", EsqlExecutionInfo.Cluster.Status.SKIPPED, 0)
+        // No error since local non-matching index has wildcard and the remote cluster index expression matches
+        {
+            String remote1IndexName = randomFrom(remote1Index, IDX_ALIAS, FILTERED_IDX_ALIAS);
+            String q = Strings.format("FROM nomatch*,%s:%s", REMOTE_CLUSTER_1, remote1IndexName);
+            try (EsqlQueryResponse resp = runQuery(q, requestIncludeMeta)) {
+                assertThat(getValuesList(resp).size(), greaterThanOrEqualTo(1));
+                EsqlExecutionInfo executionInfo = resp.getExecutionInfo();
+                assertThat(executionInfo.isCrossClusterSearch(), is(true));
+                assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta));
+                assertExpectedClustersForMissingIndicesTests(
+                    executionInfo,
+                    List.of(
+                        // local cluster is never marked as SKIPPED even when no matcing indices - just marked as 0 shards searched
+                        new ExpectedCluster(LOCAL_CLUSTER, "nomatch*", EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0),
+                        new ExpectedCluster(
+                            REMOTE_CLUSTER_1,
+                            remote1IndexName,
+                            EsqlExecutionInfo.Cluster.Status.SUCCESSFUL,
+                            remote1NumShards
                         )
-                    );
-                }
+                    )
+                );
             }
 
-            // an error is thrown if there are no matching indices at all, even when the cluster is skip_unavailable=true
-            {
-                // with non-matching concrete index
-                String q = "FROM cluster-a:nomatch";
-                VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta));
-                assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch]"));
-
-                String limit0 = q + " | LIMIT 0";
-                e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta));
-                assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch]"));
-            }
-
-            // an error is thrown if there are no matching indices at all, even when the cluster is skip_unavailable=true and the
-            // index was wildcarded
-            {
-                // with non-matching wildcard index
-                String q = "FROM cluster-a:nomatch*";
-                VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta));
-                assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*]"));
-
-                String limit0 = q + " | LIMIT 0";
-                e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta));
-                assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*]"));
-            }
-
-            // an error is thrown if there are no matching indices at all - local with wildcard, remote with concrete
-            {
-                String q = "FROM nomatch*,cluster-a:nomatch";
-                VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta));
-                assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch,nomatch*]"));
-
-                String limit0 = q + " | LIMIT 0";
-                e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta));
-                assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch,nomatch*]"));
-            }
-
-            // an error is thrown if there are no matching indices at all - local with wildcard, remote with wildcard
-            {
-                String q = "FROM nomatch*,cluster-a:nomatch*";
-                VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta));
-                assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*,nomatch*]"));
-
-                String limit0 = q + " | LIMIT 0";
-                e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta));
-                assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*,nomatch*]"));
+            String limit0 = q + " | LIMIT 0";
+            try (EsqlQueryResponse resp = runQuery(limit0, requestIncludeMeta)) {
+                assertThat(getValuesList(resp).size(), equalTo(0));
+                assertThat(resp.columns().size(), greaterThan(0));
+                EsqlExecutionInfo executionInfo = resp.getExecutionInfo();
+                assertThat(executionInfo.isCrossClusterSearch(), is(true));
+                assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta));
+                assertExpectedClustersForMissingIndicesTests(
+                    executionInfo,
+                    List.of(
+                        // local cluster is never marked as SKIPPED even when no matching indices - just marked as 0 shards searched
+                        new ExpectedCluster(LOCAL_CLUSTER, "nomatch*", EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0),
+                        // LIMIT 0 searches always have total shards = 0
+                        new ExpectedCluster(REMOTE_CLUSTER_1, remote1IndexName, EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0)
+                    )
+                );
             }
+        }
 
-            // an error is thrown if there are no matching indices at all - local with concrete, remote with concrete
-            {
-                String q = "FROM nomatch,cluster-a:nomatch";
-                VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta));
-                assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch,nomatch]"));
+        // No error since remote non-matching index has wildcard and the local cluster index expression matches
+        {
+            String indexLoc = randomFrom(localIndex, IDX_ALIAS, FILTERED_IDX_ALIAS);
+            String q = Strings.format("FROM %s,cluster-a:nomatch*", indexLoc);
 
-                String limit0 = q + " | LIMIT 0";
-                e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta));
-                assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch,nomatch]"));
+            try (EsqlQueryResponse resp = runQuery(q, requestIncludeMeta)) {
+                assertThat(getValuesList(resp).size(), greaterThanOrEqualTo(1));
+                EsqlExecutionInfo executionInfo = resp.getExecutionInfo();
+                assertThat(executionInfo.isCrossClusterSearch(), is(true));
+                assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta));
+                assertExpectedClustersForMissingIndicesTests(
+                    executionInfo,
+                    List.of(
+                        // local cluster is never marked as SKIPPED even when no matching indices - just marked as 0 shards searched
+                        new ExpectedCluster(LOCAL_CLUSTER, indexLoc, EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, localNumShards),
+                        new ExpectedCluster(REMOTE_CLUSTER_1, "nomatch*", EsqlExecutionInfo.Cluster.Status.SKIPPED, 0)
+                    )
+                );
             }
 
-            // an error is thrown if there are no matching indices at all - local with concrete, remote with wildcard
-            {
-                String q = "FROM nomatch,cluster-a:nomatch*";
-                VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta));
-                assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*,nomatch]"));
-
-                String limit0 = q + " | LIMIT 0";
-                e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta));
-                assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*,nomatch]"));
+            String limit0 = q + " | LIMIT 0";
+            try (EsqlQueryResponse resp = runQuery(limit0, requestIncludeMeta)) {
+                assertThat(getValuesList(resp).size(), equalTo(0));
+                assertThat(resp.columns().size(), greaterThan(0));
+                EsqlExecutionInfo executionInfo = resp.getExecutionInfo();
+                assertThat(executionInfo.isCrossClusterSearch(), is(true));
+                assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta));
+                assertExpectedClustersForMissingIndicesTests(
+                    executionInfo,
+                    List.of(
+                        // LIMIT 0 searches always have total shards = 0
+                        new ExpectedCluster(LOCAL_CLUSTER, indexLoc, EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0),
+                        new ExpectedCluster(REMOTE_CLUSTER_1, "nomatch*", EsqlExecutionInfo.Cluster.Status.SKIPPED, 0)
+                    )
+                );
             }
+        }
 
-            // since cluster-a is skip_unavailable=true and at least one cluster has a matching indices, no error is thrown
-            {
-                // TODO solve in follow-on PR which does skip_unavailable handling at execution time
-                // String q = Strings.format("FROM %s,cluster-a:nomatch,cluster-a:%s*", localIndex, remote1Index);
-                // try (EsqlQueryResponse resp = runQuery(q, requestIncludeMeta)) {
-                // assertThat(getValuesList(resp).size(), greaterThanOrEqualTo(1));
-                // EsqlExecutionInfo executionInfo = resp.getExecutionInfo();
-                // assertThat(executionInfo.isCrossClusterSearch(), is(true));
-                // assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta));
-                // assertExpectedClustersForMissingIndicesTests(executionInfo, List.of(
-                // // local cluster is never marked as SKIPPED even when no matching indices - just marked as 0 shards searched
-                // new ExpectedCluster(REMOTE_CLUSTER_1, "nomatch", EsqlExecutionInfo.Cluster.Status.SKIPPED, 0),
-                // new ExpectedCluster(REMOTE_CLUSTER_1, "*", EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, remote2NumShards)
-                // ));
-                // }
-
-                // TODO: handle LIMIT 0 for this case in follow-on PR
-                // String limit0 = q + " | LIMIT 0";
-                // try (EsqlQueryResponse resp = runQuery(limit0, requestIncludeMeta)) {
-                // assertThat(resp.columns().size(), greaterThanOrEqualTo(1));
-                // assertThat(getValuesList(resp).size(), greaterThanOrEqualTo(0));
-                // EsqlExecutionInfo executionInfo = resp.getExecutionInfo();
-                // assertThat(executionInfo.isCrossClusterSearch(), is(true));
-                // assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta));
-                // assertExpectedClustersForMissingIndicesTests(executionInfo, List.of(
-                // // local cluster is never marked as SKIPPED even when no matching indices - just marked as 0 shards searched
-                // new ExpectedCluster(LOCAL_CLUSTER, localIndex, EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0),
-                // new ExpectedCluster(REMOTE_CLUSTER_1, "nomatch," + remote1Index + "*", EsqlExecutionInfo.Cluster.Status.SKIPPED, 0)
-                // ));
-                // }
-            }
+        // an error is thrown if there is a concrete index that does not match
+        {
+            String q = "FROM cluster-a:nomatch";
+            String expectedError = "Unknown index [cluster-a:nomatch]";
+            expectVerificationExceptionForQuery(q, expectedError, requestIncludeMeta);
+        }
 
-            // tests with three clusters ---
+        // an error is thrown if there are no matching indices at all - single remote cluster with wildcard index expression
+        {
+            String q = "FROM cluster-a:nomatch*";
+            String expectedError = "Unknown index [cluster-a:nomatch*]";
+            expectVerificationExceptionForQuery(q, expectedError, requestIncludeMeta);
+        }
 
-            // since cluster-a is skip_unavailable=true and at least one cluster has a matching indices, no error is thrown
-            // cluster-a should be marked as SKIPPED with VerificationException
-            {
-                String remote2IndexName = randomFrom(remote2Index, IDX_ALIAS, FILTERED_IDX_ALIAS);
-                String q = Strings.format("FROM nomatch*,cluster-a:nomatch,%s:%s", REMOTE_CLUSTER_2, remote2IndexName);
-                try (EsqlQueryResponse resp = runQuery(q, requestIncludeMeta)) {
-                    assertThat(getValuesList(resp).size(), greaterThanOrEqualTo(1));
-                    EsqlExecutionInfo executionInfo = resp.getExecutionInfo();
-                    assertThat(executionInfo.isCrossClusterSearch(), is(true));
-                    assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta));
-                    assertExpectedClustersForMissingIndicesTests(
-                        executionInfo,
-                        List.of(
-                            // local cluster is never marked as SKIPPED even when no matching indices - just marked as 0 shards searched
-                            new ExpectedCluster(LOCAL_CLUSTER, "nomatch*", EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0),
-                            new ExpectedCluster(REMOTE_CLUSTER_1, "nomatch", EsqlExecutionInfo.Cluster.Status.SKIPPED, 0),
-                            new ExpectedCluster(
-                                REMOTE_CLUSTER_2,
-                                remote2IndexName,
-                                EsqlExecutionInfo.Cluster.Status.SUCCESSFUL,
-                                remote2NumShards
-                            )
-                        )
-                    );
-                }
+        // an error is thrown if there is a concrete index that does not match
+        {
+            String q = "FROM nomatch*,cluster-a:nomatch";
+            String expectedError = "Unknown index [cluster-a:nomatch,nomatch*]";
+            expectVerificationExceptionForQuery(q, expectedError, requestIncludeMeta);
+        }
 
-                String limit0 = q + " | LIMIT 0";
-                try (EsqlQueryResponse resp = runQuery(limit0, requestIncludeMeta)) {
-                    assertThat(resp.columns().size(), greaterThanOrEqualTo(1));
-                    assertThat(getValuesList(resp).size(), greaterThanOrEqualTo(0));
-                    EsqlExecutionInfo executionInfo = resp.getExecutionInfo();
-                    assertThat(executionInfo.isCrossClusterSearch(), is(true));
-                    assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta));
-                    assertExpectedClustersForMissingIndicesTests(
-                        executionInfo,
-                        List.of(
-                            // local cluster is never marked as SKIPPED even when no matching indices - just marked as 0 shards searched
-                            new ExpectedCluster(LOCAL_CLUSTER, "nomatch*", EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0),
-                            new ExpectedCluster(REMOTE_CLUSTER_1, "nomatch", EsqlExecutionInfo.Cluster.Status.SKIPPED, 0),
-                            new ExpectedCluster(REMOTE_CLUSTER_2, remote2IndexName, EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0)
-                        )
-                    );
-                }
-            }
+        // an error is thrown if there are no matching indices at all - local with wildcard, remote with wildcard
+        {
+            String q = "FROM nomatch*,cluster-a:nomatch*";
+            String expectedError = "Unknown index [cluster-a:nomatch*,nomatch*]";
+            expectVerificationExceptionForQuery(q, expectedError, requestIncludeMeta);
+        }
+        {
+            String q = "FROM nomatch,cluster-a:nomatch";
+            String expectedError = "Unknown index [cluster-a:nomatch,nomatch]";
+            expectVerificationExceptionForQuery(q, expectedError, requestIncludeMeta);
+        }
+        {
+            String q = "FROM nomatch,cluster-a:nomatch*";
+            String expectedError = "Unknown index [cluster-a:nomatch*,nomatch]";
+            expectVerificationExceptionForQuery(q, expectedError, requestIncludeMeta);
+        }
 
-            // since cluster-a is skip_unavailable=true and at least one cluster has a matching indices, no error is thrown
-            // cluster-a should be marked as SKIPPED with a "NoMatchingIndicesException" since a wildcard index was requested
-            {
-                String remote2IndexName = randomFrom(remote2Index, IDX_ALIAS, FILTERED_IDX_ALIAS);
-                String q = Strings.format("FROM nomatch*,cluster-a:nomatch*,%s:%s", REMOTE_CLUSTER_2, remote2IndexName);
-                try (EsqlQueryResponse resp = runQuery(q, requestIncludeMeta)) {
-                    assertThat(getValuesList(resp).size(), greaterThanOrEqualTo(1));
-                    EsqlExecutionInfo executionInfo = resp.getExecutionInfo();
-                    assertThat(executionInfo.isCrossClusterSearch(), is(true));
-                    assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta));
-                    assertExpectedClustersForMissingIndicesTests(
-                        executionInfo,
-                        List.of(
-                            // local cluster is never marked as SKIPPED even when no matching indices - just marked as 0 shards searched
-                            new ExpectedCluster(LOCAL_CLUSTER, "nomatch*", EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0),
-                            new ExpectedCluster(REMOTE_CLUSTER_1, "nomatch*", EsqlExecutionInfo.Cluster.Status.SKIPPED, 0),
-                            new ExpectedCluster(
-                                REMOTE_CLUSTER_2,
-                                remote2IndexName,
-                                EsqlExecutionInfo.Cluster.Status.SUCCESSFUL,
-                                remote2NumShards
-                            )
-                        )
-                    );
-                }
+        // --- test against 3 clusters
 
-                String limit0 = q + " | LIMIT 0";
-                try (EsqlQueryResponse resp = runQuery(limit0, requestIncludeMeta)) {
-                    assertThat(resp.columns().size(), greaterThanOrEqualTo(1));
-                    assertThat(getValuesList(resp).size(), greaterThanOrEqualTo(0));
-                    EsqlExecutionInfo executionInfo = resp.getExecutionInfo();
-                    assertThat(executionInfo.isCrossClusterSearch(), is(true));
-                    assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta));
-                    assertExpectedClustersForMissingIndicesTests(
-                        executionInfo,
-                        List.of(
-                            // local cluster is never marked as SKIPPED even when no matching indices - just marked as 0 shards searched
-                            new ExpectedCluster(LOCAL_CLUSTER, "nomatch*", EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0),
-                            new ExpectedCluster(REMOTE_CLUSTER_1, "nomatch*", EsqlExecutionInfo.Cluster.Status.SKIPPED, 0),
-                            new ExpectedCluster(REMOTE_CLUSTER_2, remote2IndexName, EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0)
-                        )
-                    );
-                }
-            }
-        } finally {
-            clearSkipUnavailable();
+        // missing concrete index (on remote) is error
+        {
+            String localIndexName = randomFrom(localIndex, IDX_ALIAS, FILTERED_IDX_ALIAS);
+            String remote2IndexName = randomFrom(remote2Index, IDX_ALIAS, FILTERED_IDX_ALIAS);
+            String q = Strings.format("FROM %s*,cluster-a:nomatch,%s:%s*", localIndexName, REMOTE_CLUSTER_2, remote2IndexName);
+            String expectedError = "Unknown index [cluster-a:nomatch]";
+            expectVerificationExceptionForQuery(q, expectedError, requestIncludeMeta);
         }
     }
 
-    public void testSearchesAgainstNonMatchingIndicesWithSkipUnavailableFalse() {
-        int numClusters = 3;
-        Map<String, Object> testClusterInfo = setupClusters(numClusters);
-        int remote1NumShards = (Integer) testClusterInfo.get("remote.num_shards");
-        String localIndex = (String) testClusterInfo.get("local.index");
-        String remote1Index = (String) testClusterInfo.get("remote.index");
-        String remote2Index = (String) testClusterInfo.get("remote2.index");
-
-        createIndexAliases(numClusters);
-        setSkipUnavailable(REMOTE_CLUSTER_1, false);
-        setSkipUnavailable(REMOTE_CLUSTER_2, false);
-
-        Tuple<Boolean, Boolean> includeCCSMetadata = randomIncludeCCSMetadata();
-        Boolean requestIncludeMeta = includeCCSMetadata.v1();
-        boolean responseExpectMeta = includeCCSMetadata.v2();
-
-        try {
-            // missing concrete local index is an error
-            {
-                String q = "FROM nomatch,cluster-a:" + remote1Index;
-                VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta));
-                assertThat(e.getDetailedMessage(), containsString("Unknown index [nomatch]"));
-
-                String limit0 = q + " | LIMIT 0";
-                e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta));
-                assertThat(e.getDetailedMessage(), containsString("Unknown index [nomatch]"));
-            }
-
-            // missing concrete remote index is fatal when skip_unavailable=false
-            {
-                String q = "FROM logs*,cluster-a:nomatch";
-                VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta));
-                assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch]"));
-
-                String limit0 = q + " | LIMIT 0";
-                e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta));
-                assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch]"));
-            }
-
-            // No error since local non-matching has wildcard and the remote cluster matches
-            {
-                String remote1IndexName = randomFrom(remote1Index, IDX_ALIAS, FILTERED_IDX_ALIAS);
-                String q = Strings.format("FROM nomatch*,%s:%s", REMOTE_CLUSTER_1, remote1IndexName);
-                try (EsqlQueryResponse resp = runQuery(q, requestIncludeMeta)) {
-                    assertThat(getValuesList(resp).size(), greaterThanOrEqualTo(1));
-                    EsqlExecutionInfo executionInfo = resp.getExecutionInfo();
-                    assertThat(executionInfo.isCrossClusterSearch(), is(true));
-                    assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta));
-                    assertExpectedClustersForMissingIndicesTests(
-                        executionInfo,
-                        List.of(
-                            // local cluster is never marked as SKIPPED even when no matcing indices - just marked as 0 shards searched
-                            new ExpectedCluster(LOCAL_CLUSTER, "nomatch*", EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0),
-                            new ExpectedCluster(
-                                REMOTE_CLUSTER_1,
-                                remote1IndexName,
-                                EsqlExecutionInfo.Cluster.Status.SUCCESSFUL,
-                                remote1NumShards
-                            )
-                        )
-                    );
-                }
-
-                String limit0 = q + " | LIMIT 0";
-                try (EsqlQueryResponse resp = runQuery(limit0, requestIncludeMeta)) {
-                    assertThat(getValuesList(resp).size(), equalTo(0));
-                    assertThat(resp.columns().size(), greaterThan(0));
-                    EsqlExecutionInfo executionInfo = resp.getExecutionInfo();
-                    assertThat(executionInfo.isCrossClusterSearch(), is(true));
-                    assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta));
-                    assertExpectedClustersForMissingIndicesTests(
-                        executionInfo,
-                        List.of(
-                            // local cluster is never marked as SKIPPED even when no matcing indices - just marked as 0 shards searched
-                            new ExpectedCluster(LOCAL_CLUSTER, "nomatch*", EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0),
-                            // LIMIT 0 searches always have total shards = 0
-                            new ExpectedCluster(REMOTE_CLUSTER_1, remote1IndexName, EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0)
-                        )
-                    );
-                }
-            }
-
-            // query is fatal since cluster-a has skip_unavailable=false and has no matching indices
-            {
-                String q = Strings.format("FROM %s,cluster-a:nomatch*", randomFrom(localIndex, IDX_ALIAS, FILTERED_IDX_ALIAS));
-                VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta));
-                assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*]"));
-
-                String limit0 = q + " | LIMIT 0";
-                e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta));
-                assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*]"));
-            }
-
-            // an error is thrown if there are no matching indices at all - single remote cluster with concrete index expression
-            {
-                String q = "FROM cluster-a:nomatch";
-                VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta));
-                assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch]"));
-
-                String limit0 = q + " | LIMIT 0";
-                e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta));
-                assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch]"));
-            }
-
-            // an error is thrown if there are no matching indices at all - single remote cluster with wildcard index expression
-            {
-                String q = "FROM cluster-a:nomatch*";
-                VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta));
-                assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*]"));
-
-                String limit0 = q + " | LIMIT 0";
-                e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta));
-                assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*]"));
-            }
-
-            // an error is thrown if there are no matching indices at all - local with wildcard, remote with concrete
-            {
-                String q = "FROM nomatch*,cluster-a:nomatch";
-                VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta));
-                assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch,nomatch*]"));
-
-                String limit0 = q + " | LIMIT 0";
-                e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta));
-                assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch,nomatch*]"));
-            }
-
-            // an error is thrown if there are no matching indices at all - local with wildcard, remote with wildcard
-            {
-                String q = "FROM nomatch*,cluster-a:nomatch*";
-                VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta));
-                assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*,nomatch*]"));
-
-                String limit0 = q + " | LIMIT 0";
-                e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta));
-                assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*,nomatch*]"));
-            }
-
-            // an error is thrown if there are no matching indices at all - local with concrete, remote with concrete
-            {
-                String q = "FROM nomatch,cluster-a:nomatch";
-                VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta));
-                assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch,nomatch]"));
-
-                String limit0 = q + " | LIMIT 0";
-                e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta));
-                assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch,nomatch]"));
-            }
-
-            // an error is thrown if there are no matching indices at all - local with concrete, remote with wildcard
-            {
-                String q = "FROM nomatch,cluster-a:nomatch*";
-                VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta));
-                assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*,nomatch]"));
-
-                String limit0 = q + " | LIMIT 0";
-                e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta));
-                assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*,nomatch]"));
-            }
-
-            // Missing concrete index on skip_unavailable=false cluster is a fatal error, even when another index expression
-            // against that cluster matches
-            {
-                String remote2IndexName = randomFrom(remote2Index, IDX_ALIAS, FILTERED_IDX_ALIAS);
-                String q = Strings.format("FROM %s,cluster-a:nomatch,cluster-a:%s*", localIndex, remote2IndexName);
-                IndexNotFoundException e = expectThrows(IndexNotFoundException.class, () -> runQuery(q, requestIncludeMeta));
-                assertThat(e.getDetailedMessage(), containsString("no such index [nomatch]"));
-
-                // TODO: in follow on PR, add support for throwing a VerificationException from this scenario
-                // String limit0 = q + " | LIMIT 0";
-                // VerificationException e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta));
-                // assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*,nomatch]"));
-            }
-
-            // --- test against 3 clusters
-
-            // skip_unavailable=false cluster having no matching indices is a fatal error. This error
-            // is fatal at plan time, so it throws VerificationException, not IndexNotFoundException (thrown at execution time)
-            {
-                String localIndexName = randomFrom(localIndex, IDX_ALIAS, FILTERED_IDX_ALIAS);
-                String remote2IndexName = randomFrom(remote2Index, IDX_ALIAS, FILTERED_IDX_ALIAS);
-                String q = Strings.format("FROM %s*,cluster-a:nomatch,%s:%s*", localIndexName, REMOTE_CLUSTER_2, remote2IndexName);
-                VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta));
-                assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch]"));
-
-                String limit0 = q + " | LIMIT 0";
-                e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta));
-                assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch]"));
-            }
+    record ExpectedCluster(String clusterAlias, String indexExpression, EsqlExecutionInfo.Cluster.Status status, Integer totalShards) {}
 
-            // skip_unavailable=false cluster having no matching indices is a fatal error (even if wildcarded)
-            {
-                String localIndexName = randomFrom(localIndex, IDX_ALIAS, FILTERED_IDX_ALIAS);
-                String remote2IndexName = randomFrom(remote2Index, IDX_ALIAS, FILTERED_IDX_ALIAS);
-                String q = Strings.format("FROM %s*,cluster-a:nomatch*,%s:%s*", localIndexName, REMOTE_CLUSTER_2, remote2IndexName);
-                VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta));
-                assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*]"));
+    /**
+     * Runs the provided query, expecting a VerificationError. It then runs the same query with a "| LIMIT 0"
+     * extra processing step to ensure that ESQL coordinator-only operations throw the same VerificationError.
+     */
+    private void expectVerificationExceptionForQuery(String query, String error, Boolean requestIncludeMeta) {
+        VerificationException e = expectThrows(VerificationException.class, () -> runQuery(query, requestIncludeMeta));
+        assertThat(e.getDetailedMessage(), containsString(error));
 
-                String limit0 = q + " | LIMIT 0";
-                e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta));
-                assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*]"));
-            }
-        } finally {
-            clearSkipUnavailable();
-        }
+        String limit0 = query + " | LIMIT 0";
+        e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta));
+        assertThat(e.getDetailedMessage(), containsString(error));
     }
 
-    record ExpectedCluster(String clusterAlias, String indexExpression, EsqlExecutionInfo.Cluster.Status status, Integer totalShards) {}
-
     public void assertExpectedClustersForMissingIndicesTests(EsqlExecutionInfo executionInfo, List<ExpectedCluster> expected) {
         long overallTookMillis = executionInfo.overallTook().millis();
         assertThat(overallTookMillis, greaterThanOrEqualTo(0L));

+ 7 - 10
x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersUsageTelemetryIT.java

@@ -91,35 +91,32 @@ public class CrossClustersUsageTelemetryIT extends AbstractCrossClustersUsageTel
         assertThat(telemetry.getSuccessCount(), equalTo(0L));
         assertThat(telemetry.getByRemoteCluster().size(), equalTo(0));
 
-        // One remote is skipped, one is not
+        // Errors from both remotes
         telemetry = getTelemetryFromFailedQuery("from logs-*,c*:no_such_index | stats sum (v)");
 
         assertThat(telemetry.getTotalCount(), equalTo(1L));
         assertThat(telemetry.getSuccessCount(), equalTo(0L));
-        assertThat(telemetry.getByRemoteCluster().size(), equalTo(1));
+        assertThat(telemetry.getByRemoteCluster().size(), equalTo(0));
         assertThat(telemetry.getRemotesPerSearchAvg(), equalTo(2.0));
         assertThat(telemetry.getRemotesPerSearchMax(), equalTo(2L));
-        assertThat(telemetry.getSearchCountWithSkippedRemotes(), equalTo(1L));
+        assertThat(telemetry.getSearchCountWithSkippedRemotes(), equalTo(0L));
         Map<String, Long> expectedFailure = Map.of(CCSUsageTelemetry.Result.NOT_FOUND.getName(), 1L);
         assertThat(telemetry.getFailureReasons(), equalTo(expectedFailure));
-        // cluster-b should be skipped
-        assertThat(telemetry.getByRemoteCluster().get(REMOTE2).getCount(), equalTo(0L));
-        assertThat(telemetry.getByRemoteCluster().get(REMOTE2).getSkippedCount(), equalTo(1L));
 
         // this is only for cluster-a so no skipped remotes
         telemetry = getTelemetryFromFailedQuery("from logs-*,cluster-a:no_such_index | stats sum (v)");
         assertThat(telemetry.getTotalCount(), equalTo(2L));
         assertThat(telemetry.getSuccessCount(), equalTo(0L));
-        assertThat(telemetry.getByRemoteCluster().size(), equalTo(1));
+        assertThat(telemetry.getByRemoteCluster().size(), equalTo(0));
         assertThat(telemetry.getRemotesPerSearchAvg(), equalTo(2.0));
         assertThat(telemetry.getRemotesPerSearchMax(), equalTo(2L));
-        assertThat(telemetry.getSearchCountWithSkippedRemotes(), equalTo(1L));
+        assertThat(telemetry.getSearchCountWithSkippedRemotes(), equalTo(0L));
         expectedFailure = Map.of(CCSUsageTelemetry.Result.NOT_FOUND.getName(), 2L);
         assertThat(telemetry.getFailureReasons(), equalTo(expectedFailure));
-        assertThat(telemetry.getByRemoteCluster().size(), equalTo(1));
+        assertThat(telemetry.getByRemoteCluster().size(), equalTo(0));
     }
 
-    // TODO: enable when skip-up patch is merged
+    // TODO: enable when skip-un patch is merged
     // public void testSkipAllRemotes() throws Exception {
     // var telemetry = getTelemetryFromQuery("from logs-*,c*:no_such_index | stats sum (v)", "unknown");
     //

+ 2 - 1
x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/LookupFromIndexIT.java

@@ -210,12 +210,13 @@ public class LookupFromIndexIT extends AbstractEsqlIntegTestCase {
                 new AsyncExecutionId("test", TaskId.EMPTY_TASK_ID),
                 TEST_REQUEST_TIMEOUT
             );
+            final String finalNodeWithShard = nodeWithShard;
             LookupFromIndexOperator.Factory lookup = new LookupFromIndexOperator.Factory(
                 "test",
                 parentTask,
                 QueryPragmas.ENRICH_MAX_WORKERS.get(Settings.EMPTY),
                 1,
-                internalCluster().getInstance(TransportEsqlQueryAction.class, nodeWithShard).getLookupFromIndexService(),
+                ctx -> internalCluster().getInstance(TransportEsqlQueryAction.class, finalNodeWithShard).getLookupFromIndexService(),
                 DataType.KEYWORD,
                 "lookup",
                 "data",

+ 5 - 0
x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java

@@ -690,6 +690,11 @@ public class EsqlCapabilities {
          */
         JOIN_LOOKUP_V11(Build.current().isSnapshot()),
 
+        /**
+         * LOOKUP JOIN without MV matching (https://github.com/elastic/elasticsearch/issues/118780)
+         */
+        JOIN_LOOKUP_SKIP_MV(JOIN_LOOKUP_V11.isEnabled()),
+
         /**
          * Fix for https://github.com/elastic/elasticsearch/issues/117054
          */

+ 6 - 6
x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/AbstractLookupService.java

@@ -133,7 +133,7 @@ import java.util.stream.IntStream;
 public abstract class AbstractLookupService<R extends AbstractLookupService.Request, T extends AbstractLookupService.TransportRequest> {
     private final String actionName;
     private final ClusterService clusterService;
-    private final CreateShardContext createShardContext;
+    private final LookupShardContextFactory lookupShardContextFactory;
     private final TransportService transportService;
     private final Executor executor;
     private final BigArrays bigArrays;
@@ -152,7 +152,7 @@ public abstract class AbstractLookupService<R extends AbstractLookupService.Requ
     AbstractLookupService(
         String actionName,
         ClusterService clusterService,
-        CreateShardContext createShardContext,
+        LookupShardContextFactory lookupShardContextFactory,
         TransportService transportService,
         BigArrays bigArrays,
         BlockFactory blockFactory,
@@ -161,7 +161,7 @@ public abstract class AbstractLookupService<R extends AbstractLookupService.Requ
     ) {
         this.actionName = actionName;
         this.clusterService = clusterService;
-        this.createShardContext = createShardContext;
+        this.lookupShardContextFactory = lookupShardContextFactory;
         this.transportService = transportService;
         this.executor = transportService.getThreadPool().executor(ThreadPool.Names.SEARCH);
         this.bigArrays = bigArrays;
@@ -324,7 +324,7 @@ public abstract class AbstractLookupService<R extends AbstractLookupService.Requ
         final List<Releasable> releasables = new ArrayList<>(6);
         boolean started = false;
         try {
-            LookupShardContext shardContext = createShardContext.create(request.shardId);
+            LookupShardContext shardContext = lookupShardContextFactory.create(request.shardId);
             releasables.add(shardContext.release);
             final LocalCircuitBreaker localBreaker = new LocalCircuitBreaker(
                 blockFactory.breaker(),
@@ -677,10 +677,10 @@ public abstract class AbstractLookupService<R extends AbstractLookupService.Requ
     /**
      * Create a {@link LookupShardContext} for a locally allocated {@link ShardId}.
      */
-    public interface CreateShardContext {
+    public interface LookupShardContextFactory {
         LookupShardContext create(ShardId shardId) throws IOException;
 
-        static CreateShardContext fromSearchService(SearchService searchService) {
+        static LookupShardContextFactory fromSearchService(SearchService searchService) {
             return shardId -> {
                 ShardSearchRequest shardSearchRequest = new ShardSearchRequest(shardId, 0, AliasFilter.EMPTY);
                 return LookupShardContext.fromSearchContext(

+ 2 - 2
x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java

@@ -47,7 +47,7 @@ public class EnrichLookupService extends AbstractLookupService<EnrichLookupServi
 
     public EnrichLookupService(
         ClusterService clusterService,
-        CreateShardContext createShardContext,
+        LookupShardContextFactory lookupShardContextFactory,
         TransportService transportService,
         BigArrays bigArrays,
         BlockFactory blockFactory
@@ -55,7 +55,7 @@ public class EnrichLookupService extends AbstractLookupService<EnrichLookupServi
         super(
             LOOKUP_ACTION_NAME,
             clusterService,
-            createShardContext,
+            lookupShardContextFactory,
             transportService,
             bigArrays,
             blockFactory,

+ 5 - 2
x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/LookupFromIndexOperator.java

@@ -32,6 +32,7 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Objects;
 import java.util.Optional;
+import java.util.function.Function;
 
 // TODO rename package
 public final class LookupFromIndexOperator extends AsyncOperator<LookupFromIndexOperator.OngoingJoin> {
@@ -40,7 +41,7 @@ public final class LookupFromIndexOperator extends AsyncOperator<LookupFromIndex
         CancellableTask parentTask,
         int maxOutstandingRequests,
         int inputChannel,
-        LookupFromIndexService lookupService,
+        Function<DriverContext, LookupFromIndexService> lookupService,
         DataType inputDataType,
         String lookupIndex,
         String matchField,
@@ -51,6 +52,8 @@ public final class LookupFromIndexOperator extends AsyncOperator<LookupFromIndex
         public String describe() {
             return "LookupOperator[index="
                 + lookupIndex
+                + " input_type="
+                + inputDataType
                 + " match_field="
                 + matchField
                 + " load_fields="
@@ -68,7 +71,7 @@ public final class LookupFromIndexOperator extends AsyncOperator<LookupFromIndex
                 parentTask,
                 maxOutstandingRequests,
                 inputChannel,
-                lookupService,
+                lookupService.apply(driverContext),
                 inputDataType,
                 lookupIndex,
                 matchField,

+ 3 - 3
x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/LookupFromIndexService.java

@@ -46,7 +46,7 @@ public class LookupFromIndexService extends AbstractLookupService<LookupFromInde
 
     public LookupFromIndexService(
         ClusterService clusterService,
-        CreateShardContext createShardContext,
+        LookupShardContextFactory lookupShardContextFactory,
         TransportService transportService,
         BigArrays bigArrays,
         BlockFactory blockFactory
@@ -54,7 +54,7 @@ public class LookupFromIndexService extends AbstractLookupService<LookupFromInde
         super(
             LOOKUP_ACTION_NAME,
             clusterService,
-            createShardContext,
+            lookupShardContextFactory,
             transportService,
             bigArrays,
             blockFactory,
@@ -81,7 +81,7 @@ public class LookupFromIndexService extends AbstractLookupService<LookupFromInde
     protected QueryList queryList(TransportRequest request, SearchExecutionContext context, Block inputBlock, DataType inputDataType) {
         MappedFieldType fieldType = context.getFieldType(request.matchField);
         validateTypes(request.inputDataType, fieldType);
-        return termQueryList(fieldType, context, inputBlock, inputDataType);
+        return termQueryList(fieldType, context, inputBlock, inputDataType).onlySingleValues();
     }
 
     @Override

+ 1 - 1
x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java

@@ -589,7 +589,7 @@ public class LocalExecutionPlanner {
                 parentTask,
                 context.queryPragmas().enrichMaxWorkers(),
                 matchFields.getFirst().channel(),
-                lookupFromIndexService,
+                ctx -> lookupFromIndexService,
                 matchFields.getFirst().type(),
                 indexName,
                 join.leftFields().getFirst().name(),

+ 1 - 1
x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java

@@ -146,7 +146,7 @@ public class PlannerUtils {
     }
 
     /**
-     * Similar to {@link Node#forEachUp(Consumer)}, but with a custom callback to get the node children.
+     * Similar to {@link Node#forEachUp(Class, Consumer)}, but with a custom callback to get the node children.
      */
     private static <T extends Node<T>, E extends T> void forEachUpWithChildren(
         T node,

+ 96 - 89
x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ClusterComputeHandler.java

@@ -11,10 +11,10 @@ import org.elasticsearch.action.ActionListener;
 import org.elasticsearch.action.ActionListenerResponseHandler;
 import org.elasticsearch.action.OriginalIndices;
 import org.elasticsearch.action.support.ChannelActionListener;
-import org.elasticsearch.compute.EsqlRefCountingListener;
 import org.elasticsearch.compute.operator.exchange.ExchangeService;
 import org.elasticsearch.compute.operator.exchange.ExchangeSourceHandler;
 import org.elasticsearch.core.Releasable;
+import org.elasticsearch.core.TimeValue;
 import org.elasticsearch.tasks.CancellableTask;
 import org.elasticsearch.tasks.Task;
 import org.elasticsearch.tasks.TaskCancelledException;
@@ -25,17 +25,17 @@ import org.elasticsearch.transport.TransportChannel;
 import org.elasticsearch.transport.TransportRequestHandler;
 import org.elasticsearch.transport.TransportRequestOptions;
 import org.elasticsearch.transport.TransportService;
-import org.elasticsearch.xpack.esql.action.EsqlExecutionInfo;
 import org.elasticsearch.xpack.esql.plan.physical.ExchangeSinkExec;
 import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan;
 import org.elasticsearch.xpack.esql.session.Configuration;
 
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 import java.util.Set;
 import java.util.concurrent.Executor;
+import java.util.concurrent.atomic.AtomicReference;
 
 /**
  * Manages computes across multiple clusters by sending {@link ClusterComputeRequest} to remote clusters and executing the computes.
@@ -63,46 +63,54 @@ final class ClusterComputeHandler implements TransportRequestHandler<ClusterComp
         transportService.registerRequestHandler(ComputeService.CLUSTER_ACTION_NAME, esqlExecutor, ClusterComputeRequest::new, this);
     }
 
-    void startComputeOnRemoteClusters(
+    void startComputeOnRemoteCluster(
         String sessionId,
         CancellableTask rootTask,
         Configuration configuration,
         PhysicalPlan plan,
         ExchangeSourceHandler exchangeSource,
-        List<RemoteCluster> clusters,
-        ComputeListener computeListener
+        RemoteCluster cluster,
+        Runnable cancelQueryOnFailure,
+        ActionListener<ComputeResponse> listener
     ) {
         var queryPragmas = configuration.pragmas();
-        var linkExchangeListeners = ActionListener.releaseAfter(computeListener.acquireAvoid(), exchangeSource.addEmptySink());
-        try (EsqlRefCountingListener refs = new EsqlRefCountingListener(linkExchangeListeners)) {
-            for (RemoteCluster cluster : clusters) {
-                final var childSessionId = computeService.newChildSession(sessionId);
-                ExchangeService.openExchange(
-                    transportService,
-                    cluster.connection,
-                    childSessionId,
-                    queryPragmas.exchangeBufferSize(),
-                    esqlExecutor,
-                    refs.acquire().delegateFailureAndWrap((l, unused) -> {
-                        var remoteSink = exchangeService.newRemoteSink(rootTask, childSessionId, transportService, cluster.connection);
-                        exchangeSource.addRemoteSink(remoteSink, true, queryPragmas.concurrentExchangeClients(), ActionListener.noop());
-                        var remotePlan = new RemoteClusterPlan(plan, cluster.concreteIndices, cluster.originalIndices);
-                        var clusterRequest = new ClusterComputeRequest(cluster.clusterAlias, childSessionId, configuration, remotePlan);
-                        var clusterListener = ActionListener.runBefore(
-                            computeListener.acquireCompute(cluster.clusterAlias()),
-                            () -> l.onResponse(null)
-                        );
-                        transportService.sendChildRequest(
-                            cluster.connection,
-                            ComputeService.CLUSTER_ACTION_NAME,
-                            clusterRequest,
-                            rootTask,
-                            TransportRequestOptions.EMPTY,
-                            new ActionListenerResponseHandler<>(clusterListener, ComputeResponse::new, esqlExecutor)
-                        );
-                    })
-                );
-            }
+        listener = ActionListener.runBefore(listener, exchangeSource.addEmptySink()::close);
+        final var childSessionId = computeService.newChildSession(sessionId);
+        final AtomicReference<ComputeResponse> finalResponse = new AtomicReference<>();
+        try (var computeListener = new ComputeListener(transportService.getThreadPool(), cancelQueryOnFailure, listener.map(profiles -> {
+            var resp = finalResponse.get();
+            return Objects.requireNonNullElseGet(resp, () -> new ComputeResponse(profiles));
+        }))) {
+            ExchangeService.openExchange(
+                transportService,
+                cluster.connection,
+                childSessionId,
+                queryPragmas.exchangeBufferSize(),
+                esqlExecutor,
+                computeListener.acquireCompute().delegateFailureAndWrap((l, unused) -> {
+                    var remoteSink = exchangeService.newRemoteSink(rootTask, childSessionId, transportService, cluster.connection);
+                    exchangeSource.addRemoteSink(
+                        remoteSink,
+                        true,
+                        queryPragmas.concurrentExchangeClients(),
+                        computeListener.acquireAvoid()
+                    );
+                    var remotePlan = new RemoteClusterPlan(plan, cluster.concreteIndices, cluster.originalIndices);
+                    var clusterRequest = new ClusterComputeRequest(cluster.clusterAlias, childSessionId, configuration, remotePlan);
+                    final ActionListener<ComputeResponse> clusterListener = l.map(r -> {
+                        finalResponse.set(r);
+                        return r.getProfiles();
+                    });
+                    transportService.sendChildRequest(
+                        cluster.connection,
+                        ComputeService.CLUSTER_ACTION_NAME,
+                        clusterRequest,
+                        rootTask,
+                        TransportRequestOptions.EMPTY,
+                        new ActionListenerResponseHandler<>(clusterListener, ComputeResponse::new, esqlExecutor)
+                    );
+                })
+            );
         }
     }
 
@@ -141,28 +149,16 @@ final class ClusterComputeHandler implements TransportRequestHandler<ClusterComp
             listener.onFailure(new IllegalStateException("expected exchange sink for a remote compute; got " + plan));
             return;
         }
-        String clusterAlias = request.clusterAlias();
-        /*
-         * This handler runs only on remote cluster coordinators, so it creates a new local EsqlExecutionInfo object to record
-         * execution metadata for ES|QL processing local to this cluster. The execution info will be copied into the
-         * ComputeResponse that is sent back to the primary coordinating cluster.
-         */
-        EsqlExecutionInfo execInfo = new EsqlExecutionInfo(true);
-        execInfo.swapCluster(clusterAlias, (k, v) -> new EsqlExecutionInfo.Cluster(clusterAlias, Arrays.toString(request.indices())));
-        CancellableTask cancellable = (CancellableTask) task;
-        try (var computeListener = ComputeListener.create(clusterAlias, transportService, cancellable, execInfo, listener)) {
-            runComputeOnRemoteCluster(
-                clusterAlias,
-                request.sessionId(),
-                (CancellableTask) task,
-                request.configuration(),
-                (ExchangeSinkExec) plan,
-                Set.of(remoteClusterPlan.targetIndices()),
-                remoteClusterPlan.originalIndices(),
-                execInfo,
-                computeListener
-            );
-        }
+        runComputeOnRemoteCluster(
+            request.clusterAlias(),
+            request.sessionId(),
+            (CancellableTask) task,
+            request.configuration(),
+            (ExchangeSinkExec) plan,
+            Set.of(remoteClusterPlan.targetIndices()),
+            remoteClusterPlan.originalIndices(),
+            listener
+        );
     }
 
     /**
@@ -182,8 +178,7 @@ final class ClusterComputeHandler implements TransportRequestHandler<ClusterComp
         ExchangeSinkExec plan,
         Set<String> concreteIndices,
         OriginalIndices originalIndices,
-        EsqlExecutionInfo executionInfo,
-        ComputeListener computeListener
+        ActionListener<ComputeResponse> listener
     ) {
         final var exchangeSink = exchangeService.getSinkHandler(globalSessionId);
         parentTask.addListener(
@@ -191,39 +186,51 @@ final class ClusterComputeHandler implements TransportRequestHandler<ClusterComp
         );
         final String localSessionId = clusterAlias + ":" + globalSessionId;
         final PhysicalPlan coordinatorPlan = ComputeService.reductionPlan(plan, true);
-        var exchangeSource = new ExchangeSourceHandler(
-            configuration.pragmas().exchangeBufferSize(),
-            transportService.getThreadPool().executor(ThreadPool.Names.SEARCH),
-            computeListener.acquireAvoid()
-        );
-        try (Releasable ignored = exchangeSource.addEmptySink()) {
-            exchangeSink.addCompletionListener(computeListener.acquireAvoid());
-            computeService.runCompute(
-                parentTask,
-                new ComputeContext(
+        final AtomicReference<ComputeResponse> finalResponse = new AtomicReference<>();
+        final long startTimeInNanos = System.nanoTime();
+        final Runnable cancelQueryOnFailure = computeService.cancelQueryOnFailure(parentTask);
+        try (var computeListener = new ComputeListener(transportService.getThreadPool(), cancelQueryOnFailure, listener.map(profiles -> {
+            final TimeValue took = TimeValue.timeValueNanos(System.nanoTime() - startTimeInNanos);
+            final ComputeResponse r = finalResponse.get();
+            return new ComputeResponse(profiles, took, r.totalShards, r.successfulShards, r.skippedShards, r.failedShards);
+        }))) {
+            var exchangeSource = new ExchangeSourceHandler(
+                configuration.pragmas().exchangeBufferSize(),
+                transportService.getThreadPool().executor(ThreadPool.Names.SEARCH),
+                computeListener.acquireAvoid()
+            );
+            try (Releasable ignored = exchangeSource.addEmptySink()) {
+                exchangeSink.addCompletionListener(computeListener.acquireAvoid());
+                computeService.runCompute(
+                    parentTask,
+                    new ComputeContext(
+                        localSessionId,
+                        clusterAlias,
+                        List.of(),
+                        configuration,
+                        configuration.newFoldContext(),
+                        exchangeSource,
+                        exchangeSink
+                    ),
+                    coordinatorPlan,
+                    computeListener.acquireCompute()
+                );
+                dataNodeComputeHandler.startComputeOnDataNodes(
                     localSessionId,
                     clusterAlias,
-                    List.of(),
+                    parentTask,
                     configuration,
-                    configuration.newFoldContext(),
+                    plan,
+                    concreteIndices,
+                    originalIndices,
                     exchangeSource,
-                    exchangeSink
-                ),
-                coordinatorPlan,
-                computeListener.acquireCompute(clusterAlias)
-            );
-            dataNodeComputeHandler.startComputeOnDataNodes(
-                localSessionId,
-                clusterAlias,
-                parentTask,
-                configuration,
-                plan,
-                concreteIndices,
-                originalIndices,
-                exchangeSource,
-                executionInfo,
-                computeListener
-            );
+                    cancelQueryOnFailure,
+                    computeListener.acquireCompute().map(r -> {
+                        finalResponse.set(r);
+                        return r.getProfiles();
+                    })
+                );
+            }
         }
     }
 

+ 15 - 225
x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeListener.java

@@ -12,189 +12,44 @@ import org.elasticsearch.action.support.RefCountingListener;
 import org.elasticsearch.compute.EsqlRefCountingListener;
 import org.elasticsearch.compute.operator.DriverProfile;
 import org.elasticsearch.compute.operator.ResponseHeadersCollector;
-import org.elasticsearch.core.Nullable;
 import org.elasticsearch.core.Releasable;
-import org.elasticsearch.core.TimeValue;
-import org.elasticsearch.logging.LogManager;
-import org.elasticsearch.logging.Logger;
-import org.elasticsearch.tasks.CancellableTask;
-import org.elasticsearch.transport.RemoteClusterAware;
-import org.elasticsearch.transport.TransportService;
-import org.elasticsearch.xpack.esql.action.EsqlExecutionInfo;
+import org.elasticsearch.threadpool.ThreadPool;
 
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
 
 /**
  * A variant of {@link RefCountingListener} with the following differences:
- * 1. Automatically cancels sub tasks on failure.
+ * 1. Automatically cancels sub tasks on failure (via runOnTaskFailure)
  * 2. Collects driver profiles from sub tasks.
  * 3. Collects response headers from sub tasks, specifically warnings emitted during compute
  * 4. Collects failures and returns the most appropriate exception to the caller.
- * 5. Updates {@link EsqlExecutionInfo} for display in the response for cross-cluster searches
  */
 final class ComputeListener implements Releasable {
-    private static final Logger LOGGER = LogManager.getLogger(ComputeService.class);
-
     private final EsqlRefCountingListener refs;
-    private final AtomicBoolean cancelled = new AtomicBoolean();
-    private final CancellableTask task;
-    private final TransportService transportService;
     private final List<DriverProfile> collectedProfiles;
     private final ResponseHeadersCollector responseHeaders;
-    private final EsqlExecutionInfo esqlExecutionInfo;
-    // clusterAlias indicating where this ComputeListener is running
-    // used by the top level ComputeListener in ComputeService on both local and remote clusters
-    private final String whereRunning;
-
-    /**
-     * Create a ComputeListener that does not need to gather any metadata in EsqlExecutionInfo
-     * (currently that's the ComputeListener in DataNodeRequestHandler).
-     */
-    public static ComputeListener create(
-        TransportService transportService,
-        CancellableTask task,
-        ActionListener<ComputeResponse> delegate
-    ) {
-        return new ComputeListener(transportService, task, null, null, delegate);
-    }
+    private final Runnable runOnFailure;
 
-    /**
-     * Create a ComputeListener that gathers metadata in EsqlExecutionInfo
-     * (currently that's the top level ComputeListener in ComputeService).
-     * @param clusterAlias the clusterAlias where this ComputeListener is running. For the querying cluster, use
-     *                     RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY. For remote clusters that are part of a CCS,
-     *                     the remote cluster is given its clusterAlias in the request sent to it, so that should be
-     *                     passed in here. This gives context to the ComputeListener as to where this listener is running
-     *                     and thus how it should behave with respect to the {@link EsqlExecutionInfo} metadata it gathers.
-     * @param transportService
-     * @param task
-     * @param executionInfo {@link EsqlExecutionInfo} to capture execution metadata
-     * @param delegate
-     */
-    public static ComputeListener create(
-        String clusterAlias,
-        TransportService transportService,
-        CancellableTask task,
-        EsqlExecutionInfo executionInfo,
-        ActionListener<ComputeResponse> delegate
-    ) {
-        return new ComputeListener(transportService, task, clusterAlias, executionInfo, delegate);
-    }
-
-    private ComputeListener(
-        TransportService transportService,
-        CancellableTask task,
-        String clusterAlias,
-        EsqlExecutionInfo executionInfo,
-        ActionListener<ComputeResponse> delegate
-    ) {
-        this.transportService = transportService;
-        this.task = task;
-        this.responseHeaders = new ResponseHeadersCollector(transportService.getThreadPool().getThreadContext());
+    ComputeListener(ThreadPool threadPool, Runnable runOnFailure, ActionListener<List<DriverProfile>> delegate) {
+        this.runOnFailure = runOnFailure;
+        this.responseHeaders = new ResponseHeadersCollector(threadPool.getThreadContext());
         this.collectedProfiles = Collections.synchronizedList(new ArrayList<>());
-        this.esqlExecutionInfo = executionInfo;
-        this.whereRunning = clusterAlias;
-        // for the DataNodeHandler ComputeListener, clusterAlias and executionInfo will be null
-        // for the top level ComputeListener in ComputeService both will be non-null
-        assert (clusterAlias == null && executionInfo == null) || (clusterAlias != null && executionInfo != null)
-            : "clusterAlias and executionInfo must both be null or both non-null";
-
         // listener that executes after all the sub-listeners refs (created via acquireCompute) have completed
         this.refs = new EsqlRefCountingListener(delegate.delegateFailure((l, ignored) -> {
             responseHeaders.finish();
-            ComputeResponse result;
-
-            if (runningOnRemoteCluster()) {
-                // for remote executions - this ComputeResponse is created on the remote cluster/node and will be serialized and
-                // received by the acquireCompute method callback on the coordinating cluster
-                setFinalStatusAndShardCounts(clusterAlias, executionInfo);
-                EsqlExecutionInfo.Cluster cluster = esqlExecutionInfo.getCluster(clusterAlias);
-                result = new ComputeResponse(
-                    collectedProfiles.isEmpty() ? List.of() : collectedProfiles.stream().toList(),
-                    cluster.getTook(),
-                    cluster.getTotalShards(),
-                    cluster.getSuccessfulShards(),
-                    cluster.getSkippedShards(),
-                    cluster.getFailedShards()
-                );
-            } else {
-                result = new ComputeResponse(collectedProfiles.isEmpty() ? List.of() : collectedProfiles.stream().toList());
-                if (coordinatingClusterIsSearchedInCCS()) {
-                    // if not already marked as SKIPPED, mark the local cluster as finished once the coordinator and all
-                    // data nodes have finished processing
-                    setFinalStatusAndShardCounts(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, executionInfo);
-                }
-            }
-            delegate.onResponse(result);
+            delegate.onResponse(collectedProfiles.stream().toList());
         }));
     }
 
-    private static void setFinalStatusAndShardCounts(String clusterAlias, EsqlExecutionInfo executionInfo) {
-        executionInfo.swapCluster(clusterAlias, (k, v) -> {
-            // TODO: once PARTIAL status is supported (partial results work to come), modify this code as needed
-            if (v.getStatus() != EsqlExecutionInfo.Cluster.Status.SKIPPED) {
-                assert v.getTotalShards() != null && v.getSkippedShards() != null : "Null total or skipped shard count: " + v;
-                return new EsqlExecutionInfo.Cluster.Builder(v).setStatus(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)
-                    /*
-                     * Total and skipped shard counts are set early in execution (after can-match).
-                     * Until ES|QL supports shard-level partial results, we just set all non-skipped shards
-                     * as successful and none are failed.
-                     */
-                    .setSuccessfulShards(v.getTotalShards())
-                    .setFailedShards(0)
-                    .build();
-            } else {
-                return v;
-            }
-        });
-    }
-
-    /**
-     * @return true if the "local" querying/coordinator cluster is being searched in a cross-cluster search
-     */
-    private boolean coordinatingClusterIsSearchedInCCS() {
-        return esqlExecutionInfo != null
-            && esqlExecutionInfo.isCrossClusterSearch()
-            && esqlExecutionInfo.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY) != null;
-    }
-
-    /**
-     * @return true if this Listener is running on a remote cluster (i.e., not the querying cluster)
-     */
-    private boolean runningOnRemoteCluster() {
-        return whereRunning != null && whereRunning.equals(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY) == false;
-    }
-
-    /**
-     * @return true if the listener is in a context where the took time needs to be recorded into the EsqlExecutionInfo
-     */
-    private boolean shouldRecordTookTime() {
-        return runningOnRemoteCluster() || coordinatingClusterIsSearchedInCCS();
-    }
-
-    /**
-     * @param computeClusterAlias the clusterAlias passed to the acquireCompute method
-     * @return true if this listener is waiting for a remote response in a CCS search
-     */
-    private boolean isCCSListener(String computeClusterAlias) {
-        return RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY.equals(whereRunning)
-            && computeClusterAlias.equals(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY) == false;
-    }
-
     /**
      * Acquires a new listener that doesn't collect result
      */
     ActionListener<Void> acquireAvoid() {
         return refs.acquire().delegateResponse((l, e) -> {
             try {
-                if (cancelled.compareAndSet(false, true)) {
-                    LOGGER.debug("cancelling ESQL task {} on failure", task);
-                    transportService.getTaskManager().cancelTaskAndDescendants(task, "cancelled on failure", false, ActionListener.noop());
-                }
+                runOnFailure.run();
             } finally {
                 l.onFailure(e);
             }
@@ -203,86 +58,21 @@ final class ComputeListener implements Releasable {
 
     /**
      * Acquires a new listener that collects compute result. This listener will also collect warnings emitted during compute
-     * @param computeClusterAlias The cluster alias where the compute is happening. Used when metadata needs to be gathered
-     *                            into the {@link EsqlExecutionInfo} Cluster objects. Callers that do not required execution
-     *                            info to be gathered (namely, the DataNodeRequestHandler ComputeListener) should pass in null.
      */
-    ActionListener<ComputeResponse> acquireCompute(@Nullable String computeClusterAlias) {
-        assert computeClusterAlias == null || (esqlExecutionInfo != null && esqlExecutionInfo.getRelativeStartNanos() != null)
-            : "When clusterAlias is provided to acquireCompute, executionInfo and relativeStartTimeNanos must be non-null";
-
-        return acquireAvoid().map(resp -> {
+    ActionListener<List<DriverProfile>> acquireCompute() {
+        final ActionListener<Void> delegate = acquireAvoid();
+        return ActionListener.wrap(profiles -> {
             responseHeaders.collect();
-            var profiles = resp.getProfiles();
             if (profiles != null && profiles.isEmpty() == false) {
                 collectedProfiles.addAll(profiles);
             }
-            if (computeClusterAlias == null) {
-                return null;
-            }
-            if (isCCSListener(computeClusterAlias)) {
-                // this is the callback for the listener on the primary coordinator that receives a remote ComputeResponse
-                updateExecutionInfoWithRemoteResponse(computeClusterAlias, resp);
-
-            } else if (shouldRecordTookTime()) {
-                Long relativeStartNanos = esqlExecutionInfo.getRelativeStartNanos();
-                // handler for this cluster's data node and coordinator completion (runs on "local" and remote clusters)
-                assert relativeStartNanos != null : "queryStartTimeNanos not set properly";
-                TimeValue tookTime = new TimeValue(System.nanoTime() - relativeStartNanos, TimeUnit.NANOSECONDS);
-                esqlExecutionInfo.swapCluster(computeClusterAlias, (k, v) -> {
-                    if (v.getStatus() != EsqlExecutionInfo.Cluster.Status.SKIPPED
-                        && (v.getTook() == null || v.getTook().nanos() < tookTime.nanos())) {
-                        return new EsqlExecutionInfo.Cluster.Builder(v).setTook(tookTime).build();
-                    } else {
-                        return v;
-                    }
-                });
-            }
-            return null;
+            delegate.onResponse(null);
+        }, e -> {
+            responseHeaders.collect();
+            delegate.onFailure(e);
         });
     }
 
-    private void updateExecutionInfoWithRemoteResponse(String computeClusterAlias, ComputeResponse resp) {
-        TimeValue tookOnCluster;
-        if (resp.getTook() != null) {
-            TimeValue remoteExecutionTime = resp.getTook();
-            TimeValue planningTookTime = esqlExecutionInfo.planningTookTime();
-            tookOnCluster = new TimeValue(planningTookTime.nanos() + remoteExecutionTime.nanos(), TimeUnit.NANOSECONDS);
-            esqlExecutionInfo.swapCluster(
-                computeClusterAlias,
-                (k, v) -> new EsqlExecutionInfo.Cluster.Builder(v)
-                    // for now ESQL doesn't return partial results, so set status to SUCCESSFUL
-                    .setStatus(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)
-                    .setTook(tookOnCluster)
-                    .setTotalShards(resp.getTotalShards())
-                    .setSuccessfulShards(resp.getSuccessfulShards())
-                    .setSkippedShards(resp.getSkippedShards())
-                    .setFailedShards(resp.getFailedShards())
-                    .build()
-            );
-        } else {
-            // if the cluster is an older version and does not send back took time, then calculate it here on the coordinator
-            // and leave shard info unset, so it is not shown in the CCS metadata section of the JSON response
-            long remoteTook = System.nanoTime() - esqlExecutionInfo.getRelativeStartNanos();
-            tookOnCluster = new TimeValue(remoteTook, TimeUnit.NANOSECONDS);
-            esqlExecutionInfo.swapCluster(
-                computeClusterAlias,
-                (k, v) -> new EsqlExecutionInfo.Cluster.Builder(v)
-                    // for now ESQL doesn't return partial results, so set status to SUCCESSFUL
-                    .setStatus(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)
-                    .setTook(tookOnCluster)
-                    .build()
-            );
-        }
-    }
-
-    /**
-     * Use this method when no execution metadata needs to be added to {@link EsqlExecutionInfo}
-     */
-    ActionListener<ComputeResponse> acquireCompute() {
-        return acquireCompute(null);
-    }
-
     @Override
     public void close() {
         refs.close();

+ 128 - 54
x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java

@@ -12,14 +12,17 @@ import org.elasticsearch.action.OriginalIndices;
 import org.elasticsearch.action.search.SearchRequest;
 import org.elasticsearch.cluster.service.ClusterService;
 import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.common.util.concurrent.RunOnce;
 import org.elasticsearch.compute.data.BlockFactory;
 import org.elasticsearch.compute.data.Page;
 import org.elasticsearch.compute.operator.Driver;
+import org.elasticsearch.compute.operator.DriverProfile;
 import org.elasticsearch.compute.operator.DriverTaskRunner;
 import org.elasticsearch.compute.operator.exchange.ExchangeService;
 import org.elasticsearch.compute.operator.exchange.ExchangeSourceHandler;
 import org.elasticsearch.core.Releasable;
 import org.elasticsearch.core.Releasables;
+import org.elasticsearch.core.TimeValue;
 import org.elasticsearch.core.Tuple;
 import org.elasticsearch.index.query.SearchExecutionContext;
 import org.elasticsearch.logging.LogManager;
@@ -52,7 +55,9 @@ import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicReference;
 import java.util.function.Supplier;
 
 import static org.elasticsearch.xpack.esql.plugin.EsqlPlugin.ESQL_WORKER_THREAD_POOL_NAME;
@@ -63,6 +68,7 @@ import static org.elasticsearch.xpack.esql.plugin.EsqlPlugin.ESQL_WORKER_THREAD_
 public class ComputeService {
     public static final String DATA_ACTION_NAME = EsqlQueryAction.NAME + "/data";
     public static final String CLUSTER_ACTION_NAME = EsqlQueryAction.NAME + "/cluster";
+    private static final String LOCAL_CLUSTER = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY;
 
     private static final Logger LOGGER = LogManager.getLogger(ComputeService.class);
     private final SearchService searchService;
@@ -137,6 +143,7 @@ public class ComputeService {
         Map<String, OriginalIndices> clusterToConcreteIndices = transportService.getRemoteClusterService()
             .groupIndices(SearchRequest.DEFAULT_INDICES_OPTIONS, PlannerUtils.planConcreteIndices(physicalPlan).toArray(String[]::new));
         QueryPragmas queryPragmas = configuration.pragmas();
+        Runnable cancelQueryOnFailure = cancelQueryOnFailure(rootTask);
         if (dataNodePlan == null) {
             if (clusterToConcreteIndices.values().stream().allMatch(v -> v.indices().length == 0) == false) {
                 String error = "expected no concrete indices without data node plan; got " + clusterToConcreteIndices;
@@ -146,20 +153,21 @@ public class ComputeService {
             }
             var computeContext = new ComputeContext(
                 newChildSession(sessionId),
-                RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY,
+                LOCAL_CLUSTER,
                 List.of(),
                 configuration,
                 foldContext,
                 null,
                 null
             );
-            String local = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY;
             updateShardCountForCoordinatorOnlyQuery(execInfo);
-            try (var computeListener = ComputeListener.create(local, transportService, rootTask, execInfo, listener.map(r -> {
-                updateExecutionInfoAfterCoordinatorOnlyQuery(execInfo);
-                return new Result(physicalPlan.output(), collectedPages, r.getProfiles(), execInfo);
-            }))) {
-                runCompute(rootTask, computeContext, coordinatorPlan, computeListener.acquireCompute(local));
+            try (
+                var computeListener = new ComputeListener(transportService.getThreadPool(), cancelQueryOnFailure, listener.map(profiles -> {
+                    updateExecutionInfoAfterCoordinatorOnlyQuery(execInfo);
+                    return new Result(physicalPlan.output(), collectedPages, profiles, execInfo);
+                }))
+            ) {
+                runCompute(rootTask, computeContext, coordinatorPlan, computeListener.acquireCompute());
                 return;
             }
         } else {
@@ -172,22 +180,18 @@ public class ComputeService {
         }
         Map<String, OriginalIndices> clusterToOriginalIndices = transportService.getRemoteClusterService()
             .groupIndices(SearchRequest.DEFAULT_INDICES_OPTIONS, PlannerUtils.planOriginalIndices(physicalPlan));
-        var localOriginalIndices = clusterToOriginalIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY);
-        var localConcreteIndices = clusterToConcreteIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY);
-        String local = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY;
+        var localOriginalIndices = clusterToOriginalIndices.remove(LOCAL_CLUSTER);
+        var localConcreteIndices = clusterToConcreteIndices.remove(LOCAL_CLUSTER);
         /*
          * Grab the output attributes here, so we can pass them to
          * the listener without holding on to a reference to the
          * entire plan.
          */
         List<Attribute> outputAttributes = physicalPlan.output();
-        try (
-            // this is the top level ComputeListener called once at the end (e.g., once all clusters have finished for a CCS)
-            var computeListener = ComputeListener.create(local, transportService, rootTask, execInfo, listener.map(r -> {
-                execInfo.markEndQuery();  // TODO: revisit this time recording model as part of INLINESTATS improvements
-                return new Result(outputAttributes, collectedPages, r.getProfiles(), execInfo);
-            }))
-        ) {
+        try (var computeListener = new ComputeListener(transportService.getThreadPool(), cancelQueryOnFailure, listener.map(profiles -> {
+            execInfo.markEndQuery();  // TODO: revisit this time recording model as part of INLINESTATS improvements
+            return new Result(outputAttributes, collectedPages, profiles, execInfo);
+        }))) {
             var exchangeSource = new ExchangeSourceHandler(
                 queryPragmas.exchangeBufferSize(),
                 transportService.getThreadPool().executor(ThreadPool.Names.SEARCH),
@@ -195,50 +199,114 @@ public class ComputeService {
             );
             try (Releasable ignored = exchangeSource.addEmptySink()) {
                 // run compute on the coordinator
-                runCompute(
-                    rootTask,
-                    new ComputeContext(
-                        sessionId,
-                        RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY,
-                        List.of(),
-                        configuration,
-                        foldContext,
-                        exchangeSource,
-                        null
-                    ),
-                    coordinatorPlan,
-                    computeListener.acquireCompute(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)
-                );
-                // starts computes on data nodes on the main cluster
-                if (localConcreteIndices != null && localConcreteIndices.indices().length > 0) {
-                    dataNodeComputeHandler.startComputeOnDataNodes(
+                final AtomicReference<ComputeResponse> localResponse = new AtomicReference<>(new ComputeResponse(List.of()));
+                try (
+                    var localListener = new ComputeListener(
+                        transportService.getThreadPool(),
+                        cancelQueryOnFailure,
+                        computeListener.acquireCompute().delegateFailure((l, profiles) -> {
+                            if (execInfo.isCrossClusterSearch() && execInfo.clusterAliases().contains(LOCAL_CLUSTER)) {
+                                var tookTime = TimeValue.timeValueNanos(System.nanoTime() - execInfo.getRelativeStartNanos());
+                                var r = localResponse.get();
+                                var merged = new ComputeResponse(
+                                    profiles,
+                                    tookTime,
+                                    r.totalShards,
+                                    r.successfulShards,
+                                    r.skippedShards,
+                                    r.failedShards
+                                );
+                                updateExecutionInfo(execInfo, LOCAL_CLUSTER, merged);
+                            }
+                            l.onResponse(profiles);
+                        })
+                    )
+                ) {
+                    runCompute(
+                        rootTask,
+                        new ComputeContext(sessionId, LOCAL_CLUSTER, List.of(), configuration, foldContext, exchangeSource, null),
+                        coordinatorPlan,
+                        localListener.acquireCompute()
+                    );
+                    // starts computes on data nodes on the main cluster
+                    if (localConcreteIndices != null && localConcreteIndices.indices().length > 0) {
+                        dataNodeComputeHandler.startComputeOnDataNodes(
+                            sessionId,
+                            LOCAL_CLUSTER,
+                            rootTask,
+                            configuration,
+                            dataNodePlan,
+                            Set.of(localConcreteIndices.indices()),
+                            localOriginalIndices,
+                            exchangeSource,
+                            cancelQueryOnFailure,
+                            localListener.acquireCompute().map(r -> {
+                                localResponse.set(r);
+                                return r.getProfiles();
+                            })
+                        );
+                    }
+                }
+                // starts computes on remote clusters
+                final var remoteClusters = clusterComputeHandler.getRemoteClusters(clusterToConcreteIndices, clusterToOriginalIndices);
+                for (ClusterComputeHandler.RemoteCluster cluster : remoteClusters) {
+                    clusterComputeHandler.startComputeOnRemoteCluster(
                         sessionId,
-                        RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY,
                         rootTask,
                         configuration,
                         dataNodePlan,
-                        Set.of(localConcreteIndices.indices()),
-                        localOriginalIndices,
                         exchangeSource,
-                        execInfo,
-                        computeListener
+                        cluster,
+                        cancelQueryOnFailure,
+                        computeListener.acquireCompute().map(r -> {
+                            updateExecutionInfo(execInfo, cluster.clusterAlias(), r);
+                            return r.getProfiles();
+                        })
                     );
                 }
-                // starts computes on remote clusters
-                final var remoteClusters = clusterComputeHandler.getRemoteClusters(clusterToConcreteIndices, clusterToOriginalIndices);
-                clusterComputeHandler.startComputeOnRemoteClusters(
-                    sessionId,
-                    rootTask,
-                    configuration,
-                    dataNodePlan,
-                    exchangeSource,
-                    remoteClusters,
-                    computeListener
-                );
             }
         }
     }
 
+    private void updateExecutionInfo(EsqlExecutionInfo executionInfo, String clusterAlias, ComputeResponse resp) {
+        TimeValue tookOnCluster;
+        if (resp.getTook() != null) {
+            TimeValue remoteExecutionTime = resp.getTook();
+            final long planningTime;
+            if (clusterAlias.equals(LOCAL_CLUSTER)) {
+                planningTime = 0L;
+            } else {
+                planningTime = executionInfo.planningTookTime().nanos();
+            }
+            tookOnCluster = new TimeValue(planningTime + remoteExecutionTime.nanos(), TimeUnit.NANOSECONDS);
+            executionInfo.swapCluster(
+                clusterAlias,
+                (k, v) -> new EsqlExecutionInfo.Cluster.Builder(v)
+                    // for now ESQL doesn't return partial results, so set status to SUCCESSFUL
+                    .setStatus(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)
+                    .setTook(tookOnCluster)
+                    .setTotalShards(resp.getTotalShards())
+                    .setSuccessfulShards(resp.getSuccessfulShards())
+                    .setSkippedShards(resp.getSkippedShards())
+                    .setFailedShards(resp.getFailedShards())
+                    .build()
+            );
+        } else {
+            // if the cluster is an older version and does not send back took time, then calculate it here on the coordinator
+            // and leave shard info unset, so it is not shown in the CCS metadata section of the JSON response
+            long remoteTook = System.nanoTime() - executionInfo.getRelativeStartNanos();
+            tookOnCluster = new TimeValue(remoteTook, TimeUnit.NANOSECONDS);
+            executionInfo.swapCluster(
+                clusterAlias,
+                (k, v) -> new EsqlExecutionInfo.Cluster.Builder(v)
+                    // for now ESQL doesn't return partial results, so set status to SUCCESSFUL
+                    .setStatus(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)
+                    .setTook(tookOnCluster)
+                    .build()
+            );
+        }
+    }
+
     // For queries like: FROM logs* | LIMIT 0 (including cross-cluster LIMIT 0 queries)
     private static void updateShardCountForCoordinatorOnlyQuery(EsqlExecutionInfo execInfo) {
         if (execInfo.isCrossClusterSearch()) {
@@ -272,7 +340,7 @@ public class ComputeService {
         }
     }
 
-    void runCompute(CancellableTask task, ComputeContext context, PhysicalPlan plan, ActionListener<ComputeResponse> listener) {
+    void runCompute(CancellableTask task, ComputeContext context, PhysicalPlan plan, ActionListener<List<DriverProfile>> listener) {
         listener = ActionListener.runBefore(listener, () -> Releasables.close(context.searchContexts()));
         List<EsPhysicalOperationProviders.ShardContext> contexts = new ArrayList<>(context.searchContexts().size());
         for (int i = 0; i < context.searchContexts().size(); i++) {
@@ -328,10 +396,9 @@ public class ComputeService {
         }
         ActionListener<Void> listenerCollectingStatus = listener.map(ignored -> {
             if (context.configuration().profile()) {
-                return new ComputeResponse(drivers.stream().map(Driver::profile).toList());
+                return drivers.stream().map(Driver::profile).toList();
             } else {
-                final ComputeResponse response = new ComputeResponse(List.of());
-                return response;
+                return List.of();
             }
         });
         listenerCollectingStatus = ActionListener.releaseAfter(listenerCollectingStatus, () -> Releasables.close(drivers));
@@ -357,4 +424,11 @@ public class ComputeService {
     String newChildSession(String session) {
         return session + "/" + childSessionIdGenerator.incrementAndGet();
     }
+
+    Runnable cancelQueryOnFailure(CancellableTask task) {
+        return new RunOnce(() -> {
+            LOGGER.debug("cancelling ESQL task {} on failure", task);
+            transportService.getTaskManager().cancelTaskAndDescendants(task, "cancelled on failure", false, ActionListener.noop());
+        });
+    }
 }

Some files were not shown because too many files changed in this diff