فهرست منبع

Merge pull request ESQL-1207 from elastic/main

🤖 ESQL: Merge upstream
elasticsearchmachine 2 سال پیش
والد
کامیت
06004e2e9d
56فایلهای تغییر یافته به همراه1278 افزوده شده و 406 حذف شده
  1. 5 0
      docs/changelog/96177.yaml
  2. 5 0
      docs/changelog/96279.yaml
  3. 6 0
      docs/changelog/96394.yaml
  4. 5 0
      docs/changelog/96406.yaml
  5. 6 0
      docs/changelog/96421.yaml
  6. 3 2
      docs/reference/mapping/types/dense-vector.asciidoc
  7. 13 0
      docs/reference/rest-api/usage.asciidoc
  8. 1 1
      libs/x-content/src/main/java/org/elasticsearch/xcontent/ParsedMediaType.java
  9. 9 0
      libs/x-content/src/test/java/org/elasticsearch/xcontent/ParsedMediaTypeTests.java
  10. 8 5
      modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureFieldMapper.java
  11. 1 1
      modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/RankFeatureFieldTypeTests.java
  12. 23 4
      modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_feature/20_null_value.yml
  13. 1 0
      qa/mixed-cluster/build.gradle
  14. 72 0
      qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/action/support/tasks/RestListTasksCancellationIT.java
  15. 2 1
      server/src/main/java/org/elasticsearch/TransportVersion.java
  16. 7 0
      server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksRequest.java
  17. 10 2
      server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java
  18. 109 190
      server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java
  19. 11 11
      server/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionBuilder.java
  20. 5 0
      server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java
  21. 14 1
      server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java
  22. 1 1
      server/src/main/java/org/elasticsearch/rest/RestRequest.java
  23. 4 1
      server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestListTasksAction.java
  24. 0 13
      server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java
  25. 2 3
      server/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java
  26. 18 0
      server/src/main/java/org/elasticsearch/search/aggregations/SearchContextAggregations.java
  27. 0 12
      server/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java
  28. 0 12
      server/src/main/java/org/elasticsearch/search/internal/SearchContext.java
  29. 2 2
      server/src/main/java/org/elasticsearch/search/query/QueryPhase.java
  30. 0 15
      server/src/main/java/org/elasticsearch/search/rank/RankSearchContext.java
  31. 152 0
      server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java
  32. 3 31
      server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/NodeReplacementAllocationDeciderTests.java
  33. 8 39
      server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/NodeShutdownAllocationDeciderTests.java
  34. 11 0
      server/src/test/java/org/elasticsearch/rest/RestRequestTests.java
  35. 0 25
      server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java
  36. 13 9
      test/framework/src/main/java/org/elasticsearch/action/support/ActionTestUtils.java
  37. 0 13
      test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java
  38. 5 4
      x-pack/docs/build.gradle
  39. 9 4
      x-pack/plugin/build.gradle
  40. 206 0
      x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/action/DataLifecycleUsageTransportActionIT.java
  41. 13 2
      x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java
  42. 2 0
      x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java
  43. 5 0
      x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java
  44. 78 0
      x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/DataLifecycleUsageTransportAction.java
  45. 3 0
      x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureAction.java
  46. 152 0
      x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataLifecycleFeatureSetUsage.java
  47. 1 1
      x-pack/plugin/core/src/main/resources/org/elasticsearch/xpack/profiler/component-template/profiling-executables.json
  48. 86 0
      x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datastreams/DataLifecycleFeatureSetUsageTests.java
  49. 29 0
      x-pack/plugin/mapper-unsigned-long/src/yamlRestTest/resources/rest-api-spec/test/10_basic.yml
  50. 1 0
      x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java
  51. 2 0
      x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java
  52. 3 0
      x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java
  53. 69 0
      x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java
  54. 1 1
      x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/content/ParsedMediaType.java
  55. 81 0
      x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/dlm/10_usage.yml
  56. 2 0
      x-pack/qa/mixed-tier-cluster/build.gradle

+ 5 - 0
docs/changelog/96177.yaml

@@ -0,0 +1,5 @@
+pr: 96177
+summary: Adding `data_lifecycle` to the _xpack/usage API
+area: DLM
+type: enhancement
+issues: []

+ 5 - 0
docs/changelog/96279.yaml

@@ -0,0 +1,5 @@
+pr: 96279
+summary: Improve cancellability in `TransportTasksAction`
+area: Task Management
+type: bug
+issues: []

+ 6 - 0
docs/changelog/96394.yaml

@@ -0,0 +1,6 @@
+pr: 96394
+summary: Allow unsigned long field to use decay functions
+area: Mapping
+type: enhancement
+issues:
+ - 89603

+ 5 - 0
docs/changelog/96406.yaml

@@ -0,0 +1,5 @@
+pr: 96406
+summary: Fix tchar pattern in `RestRequest`
+area: Infra/REST API
+type: bug
+issues: []

+ 6 - 0
docs/changelog/96421.yaml

@@ -0,0 +1,6 @@
+pr: 96421
+summary: Promptly fail recovery from snapshot
+area: Recovery
+type: bug
+issues:
+ - 95525

+ 3 - 2
docs/reference/mapping/types/dense-vector.asciidoc

@@ -115,8 +115,9 @@ integer values between -128 to 127, inclusive for both indexing and searching.
 Number of vector dimensions. Can't exceed `1024` for indexed vectors
 (`"index": true`), or `2048` for non-indexed vectors.
 
-experimental::[]
-Number of dimensions for indexed vectors can be extended up to `2048`.
++
+experimental:[]
+The number of dimensions for indexed vectors can be extended up to `2048`.
 
 `index`::
 (Optional, Boolean)

+ 13 - 0
docs/reference/rest-api/usage.asciidoc

@@ -355,6 +355,19 @@ GET /_xpack/usage
     "data_streams" : 0,
     "indices_count" : 0
   },
+  "data_lifecycle" : {
+    "available": true,
+    "enabled": true,
+    "lifecycle": {
+        "count": 0,
+        "default_rollover_used": true,
+        "retention": {
+            "minimum_millis": 0,
+            "maximum_millis": 0,
+            "average_millis": 0.0
+        }
+    }
+  },
   "data_tiers" : {
     "available" : true,
     "enabled" : true,

+ 1 - 1
libs/x-content/src/main/java/org/elasticsearch/xcontent/ParsedMediaType.java

@@ -27,7 +27,7 @@ public class ParsedMediaType {
     private final String subType;
     private final Map<String, String> parameters;
     // tchar pattern as defined by RFC7230 section 3.2.6
-    private static final Pattern TCHAR_PATTERN = Pattern.compile("[a-zA-z0-9!#$%&'*+\\-.\\^_`|~]+");
+    private static final Pattern TCHAR_PATTERN = Pattern.compile("[a-zA-Z0-9!#$%&'*+\\-.\\^_`|~]+");
 
     private ParsedMediaType(String originalHeaderValue, String type, String subType, Map<String, String> parameters) {
         this.originalHeaderValue = originalHeaderValue;

+ 9 - 0
libs/x-content/src/test/java/org/elasticsearch/xcontent/ParsedMediaTypeTests.java

@@ -11,6 +11,7 @@ package org.elasticsearch.xcontent;
 import org.elasticsearch.test.ESTestCase;
 
 import java.util.Collections;
+import java.util.List;
 import java.util.Map;
 
 import static org.hamcrest.Matchers.equalTo;
@@ -108,6 +109,14 @@ public class ParsedMediaTypeTests extends ESTestCase {
         assertEquals(Collections.emptyMap(), parsedMediaType.getParameters());
     }
 
+    public void testMalformedMediaType() {
+        List<String> headers = List.of("a/b[", "a/b]", "a/b\\");
+        for (String header : headers) {
+            IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> ParsedMediaType.parseMediaType(header));
+            assertThat(e.getMessage(), equalTo("invalid media-type [" + header + "]"));
+        }
+    }
+
     public void testMalformedParameters() {
         String mediaType = "application/foo";
         IllegalArgumentException exception = expectThrows(

+ 8 - 5
modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureFieldMapper.java

@@ -30,8 +30,10 @@ import org.elasticsearch.xcontent.XContentBuilder;
 import org.elasticsearch.xcontent.XContentParser.Token;
 
 import java.io.IOException;
+import java.util.Collections;
 import java.util.Map;
 import java.util.Objects;
+import java.util.Set;
 
 /**
  * A {@link FieldMapper} that exposes Lucene's {@link FeatureField}.
@@ -152,12 +154,13 @@ public class RankFeatureFieldMapper extends FieldMapper {
             if (format != null) {
                 throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] doesn't support formats.");
             }
-            return new SourceValueFetcher(name(), context) {
+            return sourceValueFetcher(context.isSourceEnabled() ? context.sourcePath(name()) : Collections.emptySet());
+        }
+
+        private SourceValueFetcher sourceValueFetcher(Set<String> sourcePaths) {
+            return new SourceValueFetcher(sourcePaths, nullValue) {
                 @Override
-                protected Float parseSourceValue(Object value) {
-                    if (value.equals("")) {
-                        return nullValue;
-                    }
+                protected Object parseSourceValue(Object value) {
                     return objectToFloat(value);
                 }
             };

+ 1 - 1
modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/RankFeatureFieldTypeTests.java

@@ -30,6 +30,6 @@ public class RankFeatureFieldTypeTests extends FieldTypeTestCase {
 
         assertEquals(List.of(3.14f), fetchSourceValue(mapper, 3.14));
         assertEquals(List.of(42.9f), fetchSourceValue(mapper, "42.9"));
-        assertEquals(List.of(2.0f), fetchSourceValue(mapper, ""));
+        assertEquals(List.of(2.0f), fetchSourceValue(mapper, null));
     }
 }

+ 23 - 4
modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_feature/20_null_value.yml

@@ -1,12 +1,12 @@
 ---
-"Non positive null_vallue":
+"Non positive null_value":
 
   - skip:
       version: " - 8.8.99"
       reason: "null_value parameter was added in 8.9.0"
 
   - do:
-      catch: bad_request
+      catch: /\[null_value\] must be a positive normal float for field of type \[rank_feature\]/
       indices.create:
         index: test2
         body:
@@ -16,7 +16,7 @@
             properties:
               pagerank:
                 type: rank_feature
-                null_vallue: -3
+                null_value: -3
 
 ---
 "Search rank_feature with and without null_value":
@@ -35,7 +35,7 @@
             properties:
               pagerank:
                 type: rank_feature
-                null_value: 15
+                null_value: 100
               url_length:
                 type: rank_feature
 
@@ -55,9 +55,19 @@
           pagerank: null
           url_length: null
 
+  # can't index a field value equal to an empty string
+  - do:
+      catch: /failed to parse field \[pagerank\] of type \[rank_feature\] in document/
+      index:
+        index: test1
+        id: "wrong_document1"
+        body:
+          pagerank: ""
+
   - do:
       indices.refresh: {}
 
+  # docs with null values are absent in search results
   - do:
       search:
         index: test1
@@ -72,6 +82,7 @@
   - match:
       hits.hits.0._id: "1"
 
+  # docs with null values are present in search results
   - do:
       search:
         index: test1
@@ -79,6 +90,9 @@
           query:
             rank_feature:
               field: pagerank
+          fields:
+            - field: 'pagerank'
+            - field: 'url_length'
 
   - match:
       hits.total.value: 2
@@ -88,3 +102,8 @@
 
   - match:
       hits.hits.1._id: "1"
+
+  - match: { hits.hits.0._source.pagerank: null }
+  - match: { hits.hits.0.fields.pagerank.0: 100 }
+  - match: { hits.hits.0._source.url_length: null }
+  - is_false: hits.hits.0.fields.url_length

+ 1 - 0
qa/mixed-cluster/build.gradle

@@ -38,6 +38,7 @@ BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName ->
       setting 'path.repo', "${buildDir}/cluster/shared/repo/${baseName}"
       setting 'xpack.security.enabled', 'false'
       requiresFeature 'es.index_mode_feature_flag_registered', Version.fromString("8.0.0")
+      requiresFeature 'es.dlm_feature_flag_enabled', Version.fromString("8.8.0")
     }
 
     tasks.register("${baseName}#mixedClusterTest", StandaloneRestIntegTestTask) {

+ 72 - 0
qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/action/support/tasks/RestListTasksCancellationIT.java

@@ -0,0 +1,72 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.action.support.tasks;
+
+import org.apache.http.client.methods.HttpGet;
+import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateAction;
+import org.elasticsearch.action.support.PlainActionFuture;
+import org.elasticsearch.client.Cancellable;
+import org.elasticsearch.client.Request;
+import org.elasticsearch.client.Response;
+import org.elasticsearch.http.HttpSmokeTestCase;
+import org.elasticsearch.tasks.TaskManager;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.ArrayList;
+import java.util.concurrent.CancellationException;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.action.support.ActionTestUtils.wrapAsRestResponseListener;
+import static org.elasticsearch.test.TaskAssertions.awaitTaskWithPrefix;
+
+public class RestListTasksCancellationIT extends HttpSmokeTestCase {
+
+    public void testListTasksCancellation() throws Exception {
+        final Request clusterStateRequest = new Request(HttpGet.METHOD_NAME, "/_cluster/state");
+        clusterStateRequest.addParameter("wait_for_metadata_version", Long.toString(Long.MAX_VALUE));
+        clusterStateRequest.addParameter("wait_for_timeout", "1h");
+
+        final PlainActionFuture<Response> clusterStateFuture = new PlainActionFuture<>();
+        final Cancellable clusterStateCancellable = getRestClient().performRequestAsync(
+            clusterStateRequest,
+            wrapAsRestResponseListener(clusterStateFuture)
+        );
+
+        awaitTaskWithPrefix(ClusterStateAction.NAME);
+
+        final Request tasksRequest = new Request(HttpGet.METHOD_NAME, "/_tasks");
+        tasksRequest.addParameter("actions", ClusterStateAction.NAME);
+        tasksRequest.addParameter("wait_for_completion", Boolean.toString(true));
+        tasksRequest.addParameter("timeout", "1h");
+
+        final PlainActionFuture<Response> tasksFuture = new PlainActionFuture<>();
+        final Cancellable tasksCancellable = getRestClient().performRequestAsync(tasksRequest, wrapAsRestResponseListener(tasksFuture));
+
+        awaitTaskWithPrefix(ListTasksAction.NAME + "[n]");
+
+        tasksCancellable.cancel();
+
+        final var taskManagers = new ArrayList<TaskManager>(internalCluster().getNodeNames().length);
+        for (final var transportService : internalCluster().getInstances(TransportService.class)) {
+            taskManagers.add(transportService.getTaskManager());
+        }
+        assertBusy(
+            () -> assertFalse(
+                taskManagers.stream()
+                    .flatMap(taskManager -> taskManager.getCancellableTasks().values().stream())
+                    .anyMatch(t -> t.getAction().startsWith(ListTasksAction.NAME))
+            )
+        );
+
+        expectThrows(CancellationException.class, () -> tasksFuture.actionGet(10, TimeUnit.SECONDS));
+        clusterStateCancellable.cancel();
+    }
+
+}

+ 2 - 1
server/src/main/java/org/elasticsearch/TransportVersion.java

@@ -126,12 +126,13 @@ public record TransportVersion(int id) implements Comparable<TransportVersion> {
     public static final TransportVersion V_8_500_003 = registerTransportVersion(8_500_003, "30adbe0c-8614-40dd-81b5-44e9c657bb77");
     public static final TransportVersion V_8_500_004 = registerTransportVersion(8_500_004, "6a00db6a-fd66-42a9-97ea-f6cc53169110");
     public static final TransportVersion V_8_500_005 = registerTransportVersion(8_500_005, "65370d2a-d936-4383-a2e0-8403f708129b");
+    public static final TransportVersion V_8_500_006 = registerTransportVersion(8_500_006, "7BB5621A-80AC-425F-BA88-75543C442F23");
 
     /**
      * Reference to the most recent transport version.
      * This should be the transport version with the highest id.
      */
-    public static final TransportVersion CURRENT = V_8_500_005;
+    public static final TransportVersion CURRENT = V_8_500_006;
 
     /**
      * Reference to the earliest compatible transport version to this version of the codebase.

+ 7 - 0
server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksRequest.java

@@ -14,9 +14,12 @@ import org.elasticsearch.action.support.tasks.BaseTasksRequest;
 import org.elasticsearch.common.Strings;
 import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.tasks.CancellableTask;
 import org.elasticsearch.tasks.Task;
+import org.elasticsearch.tasks.TaskId;
 
 import java.io.IOException;
+import java.util.Map;
 
 import static org.elasticsearch.action.ValidateActions.addValidationError;
 import static org.elasticsearch.common.regex.Regex.simpleMatch;
@@ -119,4 +122,8 @@ public class ListTasksRequest extends BaseTasksRequest<ListTasksRequest> {
         return this;
     }
 
+    @Override
+    public Task createTask(long id, String type, String action, TaskId parentTaskId, Map<String, String> headers) {
+        return new CancellableTask(id, type, action, "", parentTaskId, headers);
+    }
 }

+ 10 - 2
server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java

@@ -24,6 +24,7 @@ import org.elasticsearch.core.TimeValue;
 import org.elasticsearch.tasks.CancellableTask;
 import org.elasticsearch.tasks.RemovedTaskListener;
 import org.elasticsearch.tasks.Task;
+import org.elasticsearch.tasks.TaskCancelledException;
 import org.elasticsearch.tasks.TaskInfo;
 import org.elasticsearch.threadpool.ThreadPool;
 import org.elasticsearch.transport.TransportService;
@@ -76,7 +77,13 @@ public class TransportListTasksAction extends TransportTasksAction<Task, ListTas
     }
 
     @Override
-    protected void processTasks(ListTasksRequest request, ActionListener<List<Task>> nodeOperation) {
+    protected void doExecute(Task task, ListTasksRequest request, ActionListener<ListTasksResponse> listener) {
+        assert task instanceof CancellableTask;
+        super.doExecute(task, request, listener);
+    }
+
+    @Override
+    protected void processTasks(CancellableTask nodeTask, ListTasksRequest request, ActionListener<List<Task>> nodeOperation) {
         if (request.getWaitForCompletion()) {
             final ListenableActionFuture<List<Task>> future = new ListenableActionFuture<>();
             final List<Task> processedTasks = new ArrayList<>();
@@ -137,8 +144,9 @@ public class TransportListTasksAction extends TransportTasksAction<Task, ListTas
                 threadPool,
                 ThreadPool.Names.SAME
             );
+            nodeTask.addListener(() -> future.onFailure(new TaskCancelledException("task cancelled")));
         } else {
-            super.processTasks(request, nodeOperation);
+            super.processTasks(nodeTask, request, nodeOperation);
         }
     }
 }

+ 109 - 190
server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java

@@ -10,40 +10,36 @@ package org.elasticsearch.action.support.tasks;
 
 import org.elasticsearch.ResourceNotFoundException;
 import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.ActionListenerResponseHandler;
 import org.elasticsearch.action.FailedNodeException;
 import org.elasticsearch.action.NoSuchNodeException;
 import org.elasticsearch.action.TaskOperationFailure;
 import org.elasticsearch.action.support.ActionFilters;
+import org.elasticsearch.action.support.CancellableFanOut;
 import org.elasticsearch.action.support.ChannelActionListener;
 import org.elasticsearch.action.support.HandledTransportAction;
-import org.elasticsearch.cluster.node.DiscoveryNode;
 import org.elasticsearch.cluster.node.DiscoveryNodes;
 import org.elasticsearch.cluster.service.ClusterService;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.collect.Iterators;
 import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.common.io.stream.StreamOutput;
 import org.elasticsearch.common.io.stream.Writeable;
-import org.elasticsearch.common.util.concurrent.AtomicArray;
-import org.elasticsearch.core.Tuple;
 import org.elasticsearch.tasks.CancellableTask;
 import org.elasticsearch.tasks.Task;
 import org.elasticsearch.tasks.TaskId;
 import org.elasticsearch.transport.TransportChannel;
-import org.elasticsearch.transport.TransportException;
 import org.elasticsearch.transport.TransportRequest;
 import org.elasticsearch.transport.TransportRequestHandler;
 import org.elasticsearch.transport.TransportRequestOptions;
 import org.elasticsearch.transport.TransportResponse;
-import org.elasticsearch.transport.TransportResponseHandler;
 import org.elasticsearch.transport.TransportService;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.List;
 import java.util.Map;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicReferenceArray;
-
-import static java.util.Collections.emptyList;
 
 /**
  * The base class for transport actions that are interacting with currently running tasks.
@@ -85,67 +81,113 @@ public abstract class TransportTasksAction<
 
     @Override
     protected void doExecute(Task task, TasksRequest request, ActionListener<TasksResponse> listener) {
-        new AsyncAction(task, request, listener).start();
-    }
+        final var discoveryNodes = clusterService.state().nodes();
+        final String[] nodeIds = resolveNodes(request, discoveryNodes);
+
+        new CancellableFanOut<String, NodeTasksResponse, TasksResponse>() {
+            final ArrayList<TaskResponse> taskResponses = new ArrayList<>();
+            final ArrayList<TaskOperationFailure> taskOperationFailures = new ArrayList<>();
+            final ArrayList<FailedNodeException> failedNodeExceptions = new ArrayList<>();
+            final TransportRequestOptions transportRequestOptions = TransportRequestOptions.timeout(request.getTimeout());
+
+            @Override
+            protected void sendItemRequest(String nodeId, ActionListener<NodeTasksResponse> listener) {
+                final var discoveryNode = discoveryNodes.get(nodeId);
+                if (discoveryNode == null) {
+                    listener.onFailure(new NoSuchNodeException(nodeId));
+                    return;
+                }
+
+                transportService.sendChildRequest(
+                    discoveryNode,
+                    transportNodeAction,
+                    new NodeTaskRequest(request),
+                    task,
+                    transportRequestOptions,
+                    new ActionListenerResponseHandler<>(listener, nodeResponseReader)
+                );
+            }
+
+            @Override
+            protected void onItemResponse(String nodeId, NodeTasksResponse nodeTasksResponse) {
+                addAllSynchronized(taskResponses, nodeTasksResponse.results);
+                addAllSynchronized(taskOperationFailures, nodeTasksResponse.exceptions);
+            }
+
+            @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
+            private static <T> void addAllSynchronized(List<T> allResults, Collection<T> response) {
+                if (response.isEmpty() == false) {
+                    synchronized (allResults) {
+                        allResults.addAll(response);
+                    }
+                }
+            }
+
+            @Override
+            protected void onItemFailure(String nodeId, Exception e) {
+                logger.debug(() -> Strings.format("failed to execute on node [{}]", nodeId), e);
+                synchronized (failedNodeExceptions) {
+                    failedNodeExceptions.add(new FailedNodeException(nodeId, "Failed node [" + nodeId + "]", e));
+                }
+            }
+
+            @Override
+            protected TasksResponse onCompletion() {
+                // ref releases all happen-before here so no need to be synchronized
+                return newResponse(request, taskResponses, taskOperationFailures, failedNodeExceptions);
+            }
 
-    private void nodeOperation(CancellableTask task, NodeTaskRequest nodeTaskRequest, ActionListener<NodeTasksResponse> listener) {
-        TasksRequest request = nodeTaskRequest.tasksRequest;
-        processTasks(request, ActionListener.wrap(tasks -> nodeOperation(task, listener, request, tasks), listener::onFailure));
+            @Override
+            public String toString() {
+                return actionName;
+            }
+        }.run(task, Iterators.forArray(nodeIds), listener);
     }
 
+    // not an inline method reference to avoid capturing CancellableFanOut.this.
+    private final Writeable.Reader<NodeTasksResponse> nodeResponseReader = NodeTasksResponse::new;
+
     private void nodeOperation(
-        CancellableTask task,
+        CancellableTask nodeTask,
         ActionListener<NodeTasksResponse> listener,
         TasksRequest request,
-        List<OperationTask> tasks
+        List<OperationTask> operationTasks
     ) {
-        if (tasks.isEmpty()) {
-            listener.onResponse(new NodeTasksResponse(clusterService.localNode().getId(), emptyList(), emptyList()));
-            return;
-        }
-        AtomicArray<Tuple<TaskResponse, Exception>> responses = new AtomicArray<>(tasks.size());
-        final AtomicInteger counter = new AtomicInteger(tasks.size());
-        for (int i = 0; i < tasks.size(); i++) {
-            final int taskIndex = i;
-            ActionListener<TaskResponse> taskListener = new ActionListener<TaskResponse>() {
-                @Override
-                public void onResponse(TaskResponse response) {
-                    responses.setOnce(taskIndex, response == null ? null : new Tuple<>(response, null));
-                    respondIfFinished();
-                }
+        new CancellableFanOut<OperationTask, TaskResponse, NodeTasksResponse>() {
 
-                @Override
-                public void onFailure(Exception e) {
-                    responses.setOnce(taskIndex, new Tuple<>(null, e));
-                    respondIfFinished();
+            final ArrayList<TaskResponse> results = new ArrayList<>(operationTasks.size());
+            final ArrayList<TaskOperationFailure> exceptions = new ArrayList<>();
+
+            @Override
+            protected void sendItemRequest(OperationTask operationTask, ActionListener<TaskResponse> listener) {
+                ActionListener.run(listener, l -> taskOperation(nodeTask, request, operationTask, l));
+            }
+
+            @Override
+            protected void onItemResponse(OperationTask operationTask, TaskResponse taskResponse) {
+                synchronized (results) {
+                    results.add(taskResponse);
                 }
+            }
 
-                private void respondIfFinished() {
-                    if (counter.decrementAndGet() != 0) {
-                        return;
-                    }
-                    List<TaskResponse> results = new ArrayList<>();
-                    List<TaskOperationFailure> exceptions = new ArrayList<>();
-                    for (Tuple<TaskResponse, Exception> response : responses.asList()) {
-                        if (response.v1() == null) {
-                            assert response.v2() != null;
-                            exceptions.add(
-                                new TaskOperationFailure(clusterService.localNode().getId(), tasks.get(taskIndex).getId(), response.v2())
-                            );
-                        } else {
-                            assert response.v2() == null;
-                            results.add(response.v1());
-                        }
-                    }
-                    listener.onResponse(new NodeTasksResponse(clusterService.localNode().getId(), results, exceptions));
+            @Override
+            protected void onItemFailure(OperationTask operationTask, Exception e) {
+                synchronized (exceptions) {
+                    exceptions.add(new TaskOperationFailure(clusterService.localNode().getId(), operationTask.getId(), e));
                 }
-            };
-            try {
-                taskOperation(task, request, tasks.get(taskIndex), taskListener);
-            } catch (Exception e) {
-                taskListener.onFailure(e);
             }
-        }
+
+            @Override
+            protected NodeTasksResponse onCompletion() {
+                // ref releases all happen-before here so no need to be synchronized
+                return new NodeTasksResponse(clusterService.localNode().getId(), results, exceptions);
+            }
+
+            @Override
+            public String toString() {
+                return transportNodeAction;
+            }
+        }.run(nodeTask, operationTasks.iterator(), listener);
     }
 
     protected String[] resolveNodes(TasksRequest request, DiscoveryNodes discoveryNodes) {
@@ -156,7 +198,7 @@ public abstract class TransportTasksAction<
         }
     }
 
-    protected void processTasks(TasksRequest request, ActionListener<List<OperationTask>> nodeOperation) {
+    protected void processTasks(CancellableTask nodeTask, TasksRequest request, ActionListener<List<OperationTask>> nodeOperation) {
         nodeOperation.onResponse(processTasks(request));
     }
 
@@ -192,28 +234,6 @@ public abstract class TransportTasksAction<
         List<FailedNodeException> failedNodeExceptions
     );
 
-    @SuppressWarnings("unchecked")
-    protected TasksResponse newResponse(TasksRequest request, AtomicReferenceArray<?> responses) {
-        List<TaskResponse> tasks = new ArrayList<>();
-        List<FailedNodeException> failedNodeExceptions = new ArrayList<>();
-        List<TaskOperationFailure> taskOperationFailures = new ArrayList<>();
-        for (int i = 0; i < responses.length(); i++) {
-            Object response = responses.get(i);
-            if (response instanceof FailedNodeException) {
-                failedNodeExceptions.add((FailedNodeException) response);
-            } else {
-                NodeTasksResponse tasksResponse = (NodeTasksResponse) response;
-                if (tasksResponse.results != null) {
-                    tasks.addAll(tasksResponse.results);
-                }
-                if (tasksResponse.exceptions != null) {
-                    taskOperationFailures.addAll(tasksResponse.exceptions);
-                }
-            }
-        }
-        return newResponse(request, tasks, taskOperationFailures, failedNodeExceptions);
-    }
-
     /**
      * Perform the required operation on the task. It is OK start an asynchronous operation or to throw an exception but not both.
      * @param actionTask The related transport action task. Can be used to create a task ID to handle upstream transport cancellations.
@@ -228,120 +248,19 @@ public abstract class TransportTasksAction<
         ActionListener<TaskResponse> listener
     );
 
-    private class AsyncAction {
-
-        private final TasksRequest request;
-        private final String[] nodesIds;
-        private final DiscoveryNode[] nodes;
-        private final ActionListener<TasksResponse> listener;
-        private final AtomicReferenceArray<Object> responses;
-        private final AtomicInteger counter = new AtomicInteger();
-        private final Task task;
-
-        private AsyncAction(Task task, TasksRequest request, ActionListener<TasksResponse> listener) {
-            this.task = task;
-            this.request = request;
-            this.listener = listener;
-            final DiscoveryNodes discoveryNodes = clusterService.state().nodes();
-            this.nodesIds = resolveNodes(request, discoveryNodes);
-            Map<String, DiscoveryNode> nodes = discoveryNodes.getNodes();
-            this.nodes = new DiscoveryNode[nodesIds.length];
-            for (int i = 0; i < this.nodesIds.length; i++) {
-                this.nodes[i] = nodes.get(this.nodesIds[i]);
-            }
-            this.responses = new AtomicReferenceArray<>(this.nodesIds.length);
-        }
-
-        private void start() {
-            if (nodesIds.length == 0) {
-                // nothing to do
-                try {
-                    listener.onResponse(newResponse(request, responses));
-                } catch (Exception e) {
-                    logger.debug("failed to generate empty response", e);
-                    listener.onFailure(e);
-                }
-            } else {
-                final TransportRequestOptions transportRequestOptions = TransportRequestOptions.timeout(request.getTimeout());
-                for (int i = 0; i < nodesIds.length; i++) {
-                    final String nodeId = nodesIds[i];
-                    final int idx = i;
-                    final DiscoveryNode node = nodes[i];
-                    try {
-                        if (node == null) {
-                            onFailure(idx, nodeId, new NoSuchNodeException(nodeId));
-                        } else {
-                            NodeTaskRequest nodeRequest = new NodeTaskRequest(request);
-                            nodeRequest.setParentTask(clusterService.localNode().getId(), task.getId());
-                            transportService.sendRequest(
-                                node,
-                                transportNodeAction,
-                                nodeRequest,
-                                transportRequestOptions,
-                                new TransportResponseHandler<NodeTasksResponse>() {
-                                    @Override
-                                    public NodeTasksResponse read(StreamInput in) throws IOException {
-                                        return new NodeTasksResponse(in);
-                                    }
-
-                                    @Override
-                                    public void handleResponse(NodeTasksResponse response) {
-                                        onOperation(idx, response);
-                                    }
-
-                                    @Override
-                                    public void handleException(TransportException exp) {
-                                        onFailure(idx, node.getId(), exp);
-                                    }
-                                }
-                            );
-                        }
-                    } catch (Exception e) {
-                        onFailure(idx, nodeId, e);
-                    }
-                }
-            }
-        }
-
-        private void onOperation(int idx, NodeTasksResponse nodeResponse) {
-            responses.set(idx, nodeResponse);
-            if (counter.incrementAndGet() == responses.length()) {
-                finishHim();
-            }
-        }
-
-        private void onFailure(int idx, String nodeId, Throwable t) {
-            logger.debug(() -> "failed to execute on node [" + nodeId + "]", t);
-
-            responses.set(idx, new FailedNodeException(nodeId, "Failed node [" + nodeId + "]", t));
-
-            if (counter.incrementAndGet() == responses.length()) {
-                finishHim();
-            }
-        }
-
-        private void finishHim() {
-            if ((task instanceof CancellableTask t) && t.notifyIfCancelled(listener)) {
-                return;
-            }
-            TasksResponse finalResponse;
-            try {
-                finalResponse = newResponse(request, responses);
-            } catch (Exception e) {
-                logger.debug("failed to combine responses from nodes", e);
-                listener.onFailure(e);
-                return;
-            }
-            listener.onResponse(finalResponse);
-        }
-    }
-
     class NodeTransportHandler implements TransportRequestHandler<NodeTaskRequest> {
 
         @Override
         public void messageReceived(final NodeTaskRequest request, final TransportChannel channel, Task task) throws Exception {
             assert task instanceof CancellableTask;
-            nodeOperation((CancellableTask) task, request, new ChannelActionListener<>(channel));
+            TasksRequest tasksRequest = request.tasksRequest;
+            processTasks(
+                (CancellableTask) task,
+                tasksRequest,
+                new ChannelActionListener<NodeTasksResponse>(channel).delegateFailure(
+                    (l, tasks) -> nodeOperation((CancellableTask) task, l, tasksRequest, tasks)
+                )
+            );
         }
     }
 

+ 11 - 11
server/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionBuilder.java

@@ -26,6 +26,7 @@ import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
 import org.elasticsearch.common.xcontent.XContentHelper;
 import org.elasticsearch.core.TimeValue;
 import org.elasticsearch.index.fielddata.FieldData;
+import org.elasticsearch.index.fielddata.IndexFieldData;
 import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
 import org.elasticsearch.index.fielddata.IndexNumericFieldData;
 import org.elasticsearch.index.fielddata.MultiGeoPointValues;
@@ -35,7 +36,6 @@ import org.elasticsearch.index.fielddata.SortingNumericDoubleValues;
 import org.elasticsearch.index.mapper.DateFieldMapper;
 import org.elasticsearch.index.mapper.GeoPointFieldMapper.GeoPointFieldType;
 import org.elasticsearch.index.mapper.MappedFieldType;
-import org.elasticsearch.index.mapper.NumberFieldMapper;
 import org.elasticsearch.index.query.SearchExecutionContext;
 import org.elasticsearch.search.MultiValueMode;
 import org.elasticsearch.xcontent.NamedXContentRegistry;
@@ -217,15 +217,8 @@ public abstract class DecayFunctionBuilder<DFB extends DecayFunctionBuilder<DFB>
             return parseDateVariable(parser, context, fieldType, mode);
         } else if (fieldType instanceof GeoPointFieldType) {
             return parseGeoVariable(parser, context, fieldType, mode);
-        } else if (fieldType instanceof NumberFieldMapper.NumberFieldType) {
-            return parseNumberVariable(parser, context, fieldType, mode);
         } else {
-            throw new ParsingException(
-                parser.getTokenLocation(),
-                "field [{}] is of type [{}], but only numeric types are supported.",
-                fieldName,
-                fieldType
-            );
+            return parseNumberVariable(parser, context, fieldType, mode);
         }
     }
 
@@ -267,8 +260,15 @@ public abstract class DecayFunctionBuilder<DFB extends DecayFunctionBuilder<DFB>
                 DecayFunctionBuilder.ORIGIN
             );
         }
-        IndexNumericFieldData numericFieldData = context.getForField(fieldType, MappedFieldType.FielddataOperation.SEARCH);
-        return new NumericFieldDataScoreFunction(origin, scale, decay, offset, getDecayFunction(), numericFieldData, mode);
+
+        IndexFieldData<?> indexFieldData = context.getForField(fieldType, MappedFieldType.FielddataOperation.SEARCH);
+        if (indexFieldData instanceof IndexNumericFieldData numericFieldData) {
+            return new NumericFieldDataScoreFunction(origin, scale, decay, offset, getDecayFunction(), numericFieldData, mode);
+        } else {
+            throw new IllegalArgumentException(
+                "field [" + fieldName + "] is of type [" + fieldType + "], but only numeric types are supported."
+            );
+        }
     }
 
     private AbstractDistanceScoreFunction parseGeoVariable(

+ 5 - 0
server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java

@@ -356,6 +356,11 @@ public class PeerRecoveryTargetService implements IndexEventListener {
         return recoverySettings.tryAcquireSnapshotDownloadPermits();
     }
 
+    // Visible for testing
+    public int ongoingRecoveryCount() {
+        return onGoingRecoveries.size();
+    }
+
     /**
      * Prepare the start recovery request.
      *

+ 14 - 1
server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java

@@ -42,6 +42,7 @@ import org.elasticsearch.index.store.StoreFileMetadata;
 import org.elasticsearch.index.translog.Translog;
 import org.elasticsearch.repositories.IndexId;
 
+import java.io.FilterInputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.nio.file.Path;
@@ -586,7 +587,19 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget
         ) {
             StoreFileMetadata metadata = fileInfo.metadata();
             int readSnapshotFileBufferSize = snapshotFilesProvider.getReadSnapshotFileBufferSizeForRepo(repository);
-            multiFileWriter.writeFile(metadata, readSnapshotFileBufferSize, inputStream);
+            multiFileWriter.writeFile(metadata, readSnapshotFileBufferSize, new FilterInputStream(inputStream) {
+                @Override
+                public int read() throws IOException {
+                    cancellableThreads.checkForCancel();
+                    return super.read();
+                }
+
+                @Override
+                public int read(byte[] b, int off, int len) throws IOException {
+                    cancellableThreads.checkForCancel();
+                    return super.read(b, off, len);
+                }
+            });
             listener.onResponse(null);
         } catch (Exception e) {
             logger.debug(() -> format("Unable to recover snapshot file %s from repository %s", fileInfo, repository), e);

+ 1 - 1
server/src/main/java/org/elasticsearch/rest/RestRequest.java

@@ -48,7 +48,7 @@ import static org.elasticsearch.core.TimeValue.parseTimeValue;
 public class RestRequest implements ToXContent.Params {
 
     // tchar pattern as defined by RFC7230 section 3.2.6
-    private static final Pattern TCHAR_PATTERN = Pattern.compile("[a-zA-z0-9!#$%&'*+\\-.\\^_`|~]+");
+    private static final Pattern TCHAR_PATTERN = Pattern.compile("[a-zA-Z0-9!#$%&'*+\\-.\\^_`|~]+");
 
     private static final AtomicLong requestIdGenerator = new AtomicLong();
 

+ 4 - 1
server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestListTasksAction.java

@@ -18,6 +18,7 @@ import org.elasticsearch.core.TimeValue;
 import org.elasticsearch.rest.BaseRestHandler;
 import org.elasticsearch.rest.RestChannel;
 import org.elasticsearch.rest.RestRequest;
+import org.elasticsearch.rest.action.RestCancellableNodeClient;
 import org.elasticsearch.rest.action.RestChunkedToXContentListener;
 import org.elasticsearch.tasks.TaskId;
 
@@ -49,7 +50,9 @@ public class RestListTasksAction extends BaseRestHandler {
     public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
         final ListTasksRequest listTasksRequest = generateListTasksRequest(request);
         final String groupBy = request.param("group_by", "nodes");
-        return channel -> client.admin().cluster().listTasks(listTasksRequest, listTasksResponseListener(nodesInCluster, groupBy, channel));
+        return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).admin()
+            .cluster()
+            .listTasks(listTasksRequest, listTasksResponseListener(nodesInCluster, groupBy, channel));
     }
 
     public static ListTasksRequest generateListTasksRequest(RestRequest request) {

+ 0 - 13
server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java

@@ -11,8 +11,6 @@ package org.elasticsearch.search;
 import org.apache.lucene.search.BooleanClause.Occur;
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.BoostQuery;
-import org.apache.lucene.search.Collector;
-import org.apache.lucene.search.CollectorManager;
 import org.apache.lucene.search.FieldDoc;
 import org.apache.lucene.search.MatchNoDocsQuery;
 import org.apache.lucene.search.Query;
@@ -129,7 +127,6 @@ final class DefaultSearchContext extends SearchContext {
     private Profilers profilers;
 
     private final Map<String, SearchExtBuilder> searchExtBuilders = new HashMap<>();
-    private CollectorManager<Collector, Void> aggCollectorManager;
     private final SearchExecutionContext searchExecutionContext;
     private final FetchPhase fetchPhase;
 
@@ -763,16 +760,6 @@ final class DefaultSearchContext extends SearchContext {
         return relativeTimeSupplier.getAsLong();
     }
 
-    @Override
-    public CollectorManager<Collector, Void> getAggsCollectorManager() {
-        return aggCollectorManager;
-    }
-
-    @Override
-    public void registerAggsCollectorManager(CollectorManager<Collector, Void> collectorManager) {
-        this.aggCollectorManager = collectorManager;
-    }
-
     @Override
     public SearchExecutionContext getSearchExecutionContext() {
         return searchExecutionContext;

+ 2 - 3
server/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java

@@ -56,9 +56,9 @@ public class AggregationPhase {
         }
         if (context.getProfilers() != null) {
             InternalProfileCollector profileCollector = new InternalProfileCollector(collector, CollectorResult.REASON_AGGREGATION);
-            context.registerAggsCollectorManager(new InternalProfileCollectorManager(profileCollector));
+            context.aggregations().registerAggsCollectorManager(new InternalProfileCollectorManager(profileCollector));
         } else {
-            context.registerAggsCollectorManager(new SingleThreadCollectorManager(collector));
+            context.aggregations().registerAggsCollectorManager(new SingleThreadCollectorManager(collector));
         }
     }
 
@@ -110,6 +110,5 @@ public class AggregationPhase {
 
         // disable aggregations so that they don't run on next pages in case of scrolling
         context.aggregations(null);
-        context.registerAggsCollectorManager(null);
     }
 }

+ 18 - 0
server/src/main/java/org/elasticsearch/search/aggregations/SearchContextAggregations.java

@@ -7,6 +7,9 @@
  */
 package org.elasticsearch.search.aggregations;
 
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.CollectorManager;
+
 /**
  * The aggregation context that is part of the search context.
  */
@@ -14,6 +17,7 @@ public class SearchContextAggregations {
 
     private final AggregatorFactories factories;
     private Aggregator[] aggregators;
+    private CollectorManager<Collector, Void> aggCollectorManager;
 
     /**
      * Creates a new aggregation context with the parsed aggregator factories
@@ -38,4 +42,18 @@ public class SearchContextAggregations {
     public void aggregators(Aggregator[] aggregators) {
         this.aggregators = aggregators;
     }
+
+    /**
+     * Registers the collector to be run for the aggregations phase
+     */
+    public void registerAggsCollectorManager(CollectorManager<Collector, Void> aggCollectorManager) {
+        this.aggCollectorManager = aggCollectorManager;
+    }
+
+    /**
+     * Returns the collector to be run for the aggregations phase
+     */
+    public CollectorManager<Collector, Void> getAggsCollectorManager() {
+        return aggCollectorManager;
+    }
 }

+ 0 - 12
server/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java

@@ -8,8 +8,6 @@
 
 package org.elasticsearch.search.internal;
 
-import org.apache.lucene.search.Collector;
-import org.apache.lucene.search.CollectorManager;
 import org.apache.lucene.search.FieldDoc;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.TotalHits;
@@ -440,16 +438,6 @@ public abstract class FilteredSearchContext extends SearchContext {
         return in.getProfilers();
     }
 
-    @Override
-    public CollectorManager<Collector, Void> getAggsCollectorManager() {
-        return in.getAggsCollectorManager();
-    }
-
-    @Override
-    public void registerAggsCollectorManager(CollectorManager<Collector, Void> collectorManager) {
-        in.registerAggsCollectorManager(collectorManager);
-    }
-
     @Override
     public SearchExecutionContext getSearchExecutionContext() {
         return in.getSearchExecutionContext();

+ 0 - 12
server/src/main/java/org/elasticsearch/search/internal/SearchContext.java

@@ -7,8 +7,6 @@
  */
 package org.elasticsearch.search.internal;
 
-import org.apache.lucene.search.Collector;
-import org.apache.lucene.search.CollectorManager;
 import org.apache.lucene.search.FieldDoc;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.TotalHits;
@@ -375,16 +373,6 @@ public abstract class SearchContext implements Releasable {
      */
     public abstract long getRelativeTimeInMillis();
 
-    /**
-     * Registers the collector to be run for the aggregations phase
-     */
-    public abstract void registerAggsCollectorManager(CollectorManager<Collector, Void> collectorManager);
-
-    /**
-     * Returns the collector to be run for the aggregations phase
-     */
-    public abstract CollectorManager<Collector, Void> getAggsCollectorManager();
-
     public abstract SearchExecutionContext getSearchExecutionContext();
 
     @Override

+ 2 - 2
server/src/main/java/org/elasticsearch/search/query/QueryPhase.java

@@ -237,9 +237,9 @@ public class QueryPhase {
                     collector
                 );
             }
-            if (searchContext.getAggsCollectorManager() != null) {
+            if (searchContext.aggregations() != null) {
                 final Collector collector = collectorManager.newCollector();
-                final Collector aggsCollector = searchContext.getAggsCollectorManager().newCollector();
+                final Collector aggsCollector = searchContext.aggregations().getAggsCollectorManager().newCollector();
                 collectorManager = wrapWithProfilerCollectorManagerIfNeeded(
                     searchContext.getProfilers(),
                     new SingleThreadCollectorManager(MultiCollector.wrap(collector, aggsCollector)),

+ 0 - 15
server/src/main/java/org/elasticsearch/search/rank/RankSearchContext.java

@@ -8,8 +8,6 @@
 
 package org.elasticsearch.search.rank;
 
-import org.apache.lucene.search.Collector;
-import org.apache.lucene.search.CollectorManager;
 import org.apache.lucene.search.FieldDoc;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.TotalHits;
@@ -204,14 +202,6 @@ public class RankSearchContext extends SearchContext {
         return parent.getRelativeTimeInMillis();
     }
 
-    /**
-     * Aggregations are run as a separate query, so do not add any aggregations collectors.
-     */
-    @Override
-    public CollectorManager<Collector, Void> getAggsCollectorManager() {
-        return null;
-    }
-
     /* ---- ALL METHODS ARE UNSUPPORTED BEYOND HERE ---- */
 
     @Override
@@ -544,11 +534,6 @@ public class RankSearchContext extends SearchContext {
         throw new UnsupportedOperationException();
     }
 
-    @Override
-    public void registerAggsCollectorManager(CollectorManager<Collector, Void> collectorManager) {
-        throw new UnsupportedOperationException();
-    }
-
     @Override
     public SearchExecutionContext getSearchExecutionContext() {
         throw new UnsupportedOperationException();

+ 152 - 0
server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java

@@ -7,6 +7,7 @@
  */
 package org.elasticsearch.action.admin.cluster.node.tasks;
 
+import org.elasticsearch.ElasticsearchException;
 import org.elasticsearch.action.ActionFuture;
 import org.elasticsearch.action.ActionListener;
 import org.elasticsearch.action.FailedNodeException;
@@ -40,6 +41,7 @@ import org.elasticsearch.tasks.Task;
 import org.elasticsearch.tasks.TaskCancelledException;
 import org.elasticsearch.tasks.TaskId;
 import org.elasticsearch.tasks.TaskInfo;
+import org.elasticsearch.test.ReachabilityChecker;
 import org.elasticsearch.test.tasks.MockTaskManager;
 import org.elasticsearch.threadpool.ThreadPool;
 import org.elasticsearch.transport.TransportRequest;
@@ -55,9 +57,12 @@ import java.util.Collections;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicReference;
+import java.util.concurrent.atomic.AtomicReferenceArray;
 import java.util.stream.Collectors;
 
 import static org.elasticsearch.action.support.PlainActionFuture.newFuture;
@@ -68,6 +73,7 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo;
 import static org.hamcrest.Matchers.hasSize;
 import static org.hamcrest.Matchers.instanceOf;
 import static org.hamcrest.Matchers.not;
+import static org.hamcrest.Matchers.nullValue;
 
 public class TransportTasksActionTests extends TaskManagerTestCase {
 
@@ -674,6 +680,152 @@ public class TransportTasksActionTests extends TaskManagerTestCase {
         assertEquals(0, responses.failureCount());
     }
 
+    public void testTaskResponsesDiscardedOnCancellation() throws Exception {
+        setupTestNodes(Settings.EMPTY);
+        connectNodes(testNodes);
+        CountDownLatch blockedActionLatch = new CountDownLatch(1);
+        ActionFuture<NodesResponse> future = startBlockingTestNodesAction(blockedActionLatch);
+
+        final var taskResponseListeners = new LinkedBlockingQueue<ActionListener<TestTaskResponse>>();
+        final var taskResponseListenersCountDown = new CountDownLatch(2); // test action plus the list[n] action
+
+        final TestTasksAction tasksAction = new TestTasksAction(
+            "internal:testTasksAction",
+            testNodes[0].clusterService,
+            testNodes[0].transportService
+        ) {
+            @Override
+            protected void taskOperation(
+                CancellableTask actionTask,
+                TestTasksRequest request,
+                Task task,
+                ActionListener<TestTaskResponse> listener
+            ) {
+                taskResponseListeners.add(listener);
+                taskResponseListenersCountDown.countDown();
+            }
+        };
+
+        TestTasksRequest testTasksRequest = new TestTasksRequest();
+        testTasksRequest.setNodes(testNodes[0].getNodeId()); // only local node
+        PlainActionFuture<TestTasksResponse> taskFuture = newFuture();
+        CancellableTask task = (CancellableTask) testNodes[0].transportService.getTaskManager()
+            .registerAndExecute(
+                "direct",
+                tasksAction,
+                testTasksRequest,
+                testNodes[0].transportService.getLocalNodeConnection(),
+                taskFuture
+            );
+        safeAwait(taskResponseListenersCountDown);
+
+        final var reachabilityChecker = new ReachabilityChecker();
+
+        final var listener0 = Objects.requireNonNull(taskResponseListeners.poll());
+        if (randomBoolean()) {
+            listener0.onResponse(reachabilityChecker.register(new TestTaskResponse("status")));
+        } else {
+            listener0.onFailure(reachabilityChecker.register(new ElasticsearchException("simulated")));
+        }
+        reachabilityChecker.checkReachable();
+
+        PlainActionFuture.<Void, RuntimeException>get(
+            fut -> testNodes[0].transportService.getTaskManager().cancelTaskAndDescendants(task, "test", false, fut),
+            10,
+            TimeUnit.SECONDS
+        );
+
+        reachabilityChecker.ensureUnreachable();
+
+        while (true) {
+            final var listener = taskResponseListeners.poll();
+            if (listener == null) {
+                break;
+            }
+            if (randomBoolean()) {
+                listener.onResponse(reachabilityChecker.register(new TestTaskResponse("status")));
+            } else {
+                listener.onFailure(reachabilityChecker.register(new ElasticsearchException("simulated")));
+            }
+            reachabilityChecker.ensureUnreachable();
+        }
+
+        expectThrows(TaskCancelledException.class, taskFuture::actionGet);
+
+        blockedActionLatch.countDown();
+        NodesResponse responses = future.get(10, TimeUnit.SECONDS);
+        assertEquals(0, responses.failureCount());
+    }
+
+    public void testNodeResponsesDiscardedOnCancellation() {
+        setupTestNodes(Settings.EMPTY);
+        connectNodes(testNodes);
+
+        final var taskResponseListeners = new AtomicReferenceArray<ActionListener<TestTaskResponse>>(testNodes.length);
+        final var taskResponseListenersCountDown = new CountDownLatch(testNodes.length); // one list[n] action per node
+        final var tasksActions = new TestTasksAction[testNodes.length];
+        for (int i = 0; i < testNodes.length; i++) {
+            final var nodeIndex = i;
+            tasksActions[i] = new TestTasksAction("internal:testTasksAction", testNodes[i].clusterService, testNodes[i].transportService) {
+                @Override
+                protected void taskOperation(
+                    CancellableTask actionTask,
+                    TestTasksRequest request,
+                    Task task,
+                    ActionListener<TestTaskResponse> listener
+                ) {
+                    assertThat(taskResponseListeners.getAndSet(nodeIndex, ActionListener.notifyOnce(listener)), nullValue());
+                    taskResponseListenersCountDown.countDown();
+                }
+            };
+        }
+
+        TestTasksRequest testTasksRequest = new TestTasksRequest();
+        testTasksRequest.setActions("internal:testTasksAction[n]");
+        PlainActionFuture<TestTasksResponse> taskFuture = newFuture();
+        CancellableTask task = (CancellableTask) testNodes[0].transportService.getTaskManager()
+            .registerAndExecute(
+                "direct",
+                tasksActions[0],
+                testTasksRequest,
+                testNodes[0].transportService.getLocalNodeConnection(),
+                taskFuture
+            );
+        safeAwait(taskResponseListenersCountDown);
+
+        final var reachabilityChecker = new ReachabilityChecker();
+
+        if (randomBoolean()) {
+            // local node does not de/serialize node-level response so retains references to the task-level response
+            if (randomBoolean()) {
+                taskResponseListeners.get(0).onResponse(reachabilityChecker.register(new TestTaskResponse("status")));
+            } else {
+                taskResponseListeners.get(0).onFailure(reachabilityChecker.register(new ElasticsearchException("simulated")));
+            }
+            reachabilityChecker.checkReachable();
+        }
+
+        PlainActionFuture.<Void, RuntimeException>get(
+            fut -> testNodes[0].transportService.getTaskManager().cancelTaskAndDescendants(task, "test", false, fut),
+            10,
+            TimeUnit.SECONDS
+        );
+
+        reachabilityChecker.ensureUnreachable();
+        assertFalse(taskFuture.isDone());
+
+        for (int i = 0; i < testNodes.length; i++) {
+            if (randomBoolean()) {
+                taskResponseListeners.get(i).onResponse(reachabilityChecker.register(new TestTaskResponse("status")));
+            } else {
+                taskResponseListeners.get(i).onFailure(reachabilityChecker.register(new ElasticsearchException("simulated")));
+            }
+            reachabilityChecker.ensureUnreachable();
+        }
+
+        expectThrows(TaskCancelledException.class, taskFuture::actionGet);
+    }
+
     public void testTaskLevelActionFailures() throws Exception {
         setupTestNodes(Settings.EMPTY);
         connectNodes(testNodes);

+ 3 - 31
server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/NodeReplacementAllocationDeciderTests.java

@@ -9,11 +9,8 @@
 package org.elasticsearch.cluster.routing.allocation.decider;
 
 import org.elasticsearch.Version;
-import org.elasticsearch.action.ActionListener;
 import org.elasticsearch.cluster.ClusterState;
 import org.elasticsearch.cluster.ESAllocationTestCase;
-import org.elasticsearch.cluster.EmptyClusterInfoService;
-import org.elasticsearch.cluster.TestShardRoutingRoleStrategies;
 import org.elasticsearch.cluster.metadata.IndexMetadata;
 import org.elasticsearch.cluster.metadata.Metadata;
 import org.elasticsearch.cluster.metadata.NodesShutdownMetadata;
@@ -26,14 +23,9 @@ import org.elasticsearch.cluster.routing.RoutingNode;
 import org.elasticsearch.cluster.routing.RoutingNodesHelper;
 import org.elasticsearch.cluster.routing.ShardRouting;
 import org.elasticsearch.cluster.routing.UnassignedInfo;
-import org.elasticsearch.cluster.routing.allocation.AllocationService;
 import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
-import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
 import org.elasticsearch.common.settings.ClusterSettings;
-import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.index.shard.ShardId;
-import org.elasticsearch.snapshots.EmptySnapshotsInfoService;
-import org.elasticsearch.test.gateway.TestGatewayAllocator;
 
 import java.util.Arrays;
 import java.util.Collections;
@@ -63,14 +55,6 @@ public class NodeReplacementAllocationDeciderTests extends ESAllocationTestCase
             new NodeShutdownAllocationDecider()
         )
     );
-    private final AllocationService service = new AllocationService(
-        allocationDeciders,
-        new TestGatewayAllocator(),
-        new BalancedShardsAllocator(Settings.EMPTY),
-        EmptyClusterInfoService.INSTANCE,
-        EmptySnapshotsInfoService.INSTANCE,
-        TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY
-    );
 
     private final String idxName = "test-idx";
     private final String idxUuid = "test-idx-uuid";
@@ -98,11 +82,7 @@ public class NodeReplacementAllocationDeciderTests extends ESAllocationTestCase
     }
 
     public void testCanForceAllocate() {
-        ClusterState state = prepareState(
-            service.reroute(ClusterState.EMPTY_STATE, "initial state", ActionListener.noop()),
-            NODE_A.getId(),
-            NODE_B.getName()
-        );
+        ClusterState state = prepareState(ClusterState.EMPTY_STATE, NODE_A.getId(), NODE_B.getName());
         RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state, null, null, 0);
         RoutingNode routingNode = RoutingNodesHelper.routingNode(NODE_A.getId(), NODE_A, shard);
         allocation.debugDecision(true);
@@ -146,11 +126,7 @@ public class NodeReplacementAllocationDeciderTests extends ESAllocationTestCase
     }
 
     public void testCannotRemainOnReplacedNode() {
-        ClusterState state = prepareState(
-            service.reroute(ClusterState.EMPTY_STATE, "initial state", ActionListener.noop()),
-            NODE_A.getId(),
-            NODE_B.getName()
-        );
+        ClusterState state = prepareState(ClusterState.EMPTY_STATE, NODE_A.getId(), NODE_B.getName());
         RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state, null, null, 0);
         RoutingNode routingNode = RoutingNodesHelper.routingNode(NODE_A.getId(), NODE_A, shard);
         allocation.debugDecision(true);
@@ -176,11 +152,7 @@ public class NodeReplacementAllocationDeciderTests extends ESAllocationTestCase
     }
 
     public void testCanAllocateToNeitherSourceNorTarget() {
-        ClusterState state = prepareState(
-            service.reroute(ClusterState.EMPTY_STATE, "initial state", ActionListener.noop()),
-            NODE_A.getId(),
-            NODE_B.getName()
-        );
+        ClusterState state = prepareState(ClusterState.EMPTY_STATE, NODE_A.getId(), NODE_B.getName());
         RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state, null, null, 0);
         RoutingNode routingNode = RoutingNodesHelper.routingNode(NODE_A.getId(), NODE_A, shard);
         allocation.debugDecision(true);

+ 8 - 39
server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/NodeShutdownAllocationDeciderTests.java

@@ -9,11 +9,8 @@
 package org.elasticsearch.cluster.routing.allocation.decider;
 
 import org.elasticsearch.Version;
-import org.elasticsearch.action.ActionListener;
 import org.elasticsearch.cluster.ClusterState;
 import org.elasticsearch.cluster.ESAllocationTestCase;
-import org.elasticsearch.cluster.EmptyClusterInfoService;
-import org.elasticsearch.cluster.TestShardRoutingRoleStrategies;
 import org.elasticsearch.cluster.metadata.IndexMetadata;
 import org.elasticsearch.cluster.metadata.Metadata;
 import org.elasticsearch.cluster.metadata.NodesShutdownMetadata;
@@ -26,15 +23,10 @@ import org.elasticsearch.cluster.routing.RoutingNode;
 import org.elasticsearch.cluster.routing.RoutingNodesHelper;
 import org.elasticsearch.cluster.routing.ShardRouting;
 import org.elasticsearch.cluster.routing.UnassignedInfo;
-import org.elasticsearch.cluster.routing.allocation.AllocationService;
 import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
-import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
 import org.elasticsearch.common.settings.ClusterSettings;
-import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.core.TimeValue;
 import org.elasticsearch.index.shard.ShardId;
-import org.elasticsearch.snapshots.EmptySnapshotsInfoService;
-import org.elasticsearch.test.gateway.TestGatewayAllocator;
 
 import java.util.Arrays;
 import java.util.Collections;
@@ -58,14 +50,6 @@ public class NodeShutdownAllocationDeciderTests extends ESAllocationTestCase {
     private final AllocationDeciders allocationDeciders = new AllocationDeciders(
         Arrays.asList(decider, new SameShardAllocationDecider(clusterSettings), new ReplicaAfterPrimaryActiveAllocationDecider())
     );
-    private final AllocationService service = new AllocationService(
-        allocationDeciders,
-        new TestGatewayAllocator(),
-        new BalancedShardsAllocator(Settings.EMPTY),
-        EmptyClusterInfoService.INSTANCE,
-        EmptySnapshotsInfoService.INSTANCE,
-        TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY
-    );
 
     private final String idxName = "test-idx";
     private final String idxUuid = "test-idx-uuid";
@@ -80,10 +64,7 @@ public class NodeShutdownAllocationDeciderTests extends ESAllocationTestCase {
     );
 
     public void testCanAllocateShardsToRestartingNode() {
-        ClusterState state = prepareState(
-            service.reroute(ClusterState.EMPTY_STATE, "initial state", ActionListener.noop()),
-            SingleNodeShutdownMetadata.Type.RESTART
-        );
+        ClusterState state = prepareState(ClusterState.EMPTY_STATE, SingleNodeShutdownMetadata.Type.RESTART);
         RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state, null, null, 0);
         RoutingNode routingNode = RoutingNodesHelper.routingNode(DATA_NODE.getId(), DATA_NODE, shard);
         allocation.debugDecision(true);
@@ -98,7 +79,7 @@ public class NodeShutdownAllocationDeciderTests extends ESAllocationTestCase {
 
     public void testCannotAllocateShardsToRemovingNode() {
         for (SingleNodeShutdownMetadata.Type type : REMOVE_SHUTDOWN_TYPES) {
-            ClusterState state = prepareState(service.reroute(ClusterState.EMPTY_STATE, "initial state", ActionListener.noop()), type);
+            ClusterState state = prepareState(ClusterState.EMPTY_STATE, type);
             RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state, null, null, 0);
             RoutingNode routingNode = RoutingNodesHelper.routingNode(DATA_NODE.getId(), DATA_NODE, shard);
             allocation.debugDecision(true);
@@ -110,10 +91,7 @@ public class NodeShutdownAllocationDeciderTests extends ESAllocationTestCase {
     }
 
     public void testShardsCanRemainOnRestartingNode() {
-        ClusterState state = prepareState(
-            service.reroute(ClusterState.EMPTY_STATE, "initial state", ActionListener.noop()),
-            SingleNodeShutdownMetadata.Type.RESTART
-        );
+        ClusterState state = prepareState(ClusterState.EMPTY_STATE, SingleNodeShutdownMetadata.Type.RESTART);
         RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state, null, null, 0);
         RoutingNode routingNode = RoutingNodesHelper.routingNode(DATA_NODE.getId(), DATA_NODE, shard);
         allocation.debugDecision(true);
@@ -128,7 +106,7 @@ public class NodeShutdownAllocationDeciderTests extends ESAllocationTestCase {
 
     public void testShardsCannotRemainOnRemovingNode() {
         for (SingleNodeShutdownMetadata.Type type : REMOVE_SHUTDOWN_TYPES) {
-            ClusterState state = prepareState(service.reroute(ClusterState.EMPTY_STATE, "initial state", ActionListener.noop()), type);
+            ClusterState state = prepareState(ClusterState.EMPTY_STATE, type);
             RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state, null, null, 0);
             RoutingNode routingNode = RoutingNodesHelper.routingNode(DATA_NODE.getId(), DATA_NODE, shard);
             allocation.debugDecision(true);
@@ -144,10 +122,7 @@ public class NodeShutdownAllocationDeciderTests extends ESAllocationTestCase {
     }
 
     public void testCanAutoExpandToRestartingNode() {
-        ClusterState state = prepareState(
-            service.reroute(ClusterState.EMPTY_STATE, "initial state", ActionListener.noop()),
-            SingleNodeShutdownMetadata.Type.RESTART
-        );
+        ClusterState state = prepareState(ClusterState.EMPTY_STATE, SingleNodeShutdownMetadata.Type.RESTART);
         RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state, null, null, 0);
         allocation.debugDecision(true);
 
@@ -160,9 +135,7 @@ public class NodeShutdownAllocationDeciderTests extends ESAllocationTestCase {
     }
 
     public void testCanAutoExpandToNodeIfNoNodesShuttingDown() {
-        ClusterState state = service.reroute(ClusterState.EMPTY_STATE, "initial state", ActionListener.noop());
-
-        RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state, null, null, 0);
+        RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, ClusterState.EMPTY_STATE, null, null, 0);
         allocation.debugDecision(true);
 
         Decision decision = decider.shouldAutoExpandToNode(indexMetadata, DATA_NODE, allocation);
@@ -172,11 +145,7 @@ public class NodeShutdownAllocationDeciderTests extends ESAllocationTestCase {
 
     public void testCanAutoExpandToNodeThatIsNotShuttingDown() {
         for (SingleNodeShutdownMetadata.Type type : REMOVE_SHUTDOWN_TYPES) {
-            ClusterState state = prepareState(
-                service.reroute(ClusterState.EMPTY_STATE, "initial state", ActionListener.noop()),
-                type,
-                "other-node-id"
-            );
+            ClusterState state = prepareState(ClusterState.EMPTY_STATE, type, "other-node-id");
 
             RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state, null, null, 0);
             allocation.debugDecision(true);
@@ -189,7 +158,7 @@ public class NodeShutdownAllocationDeciderTests extends ESAllocationTestCase {
 
     public void testCannotAutoExpandToRemovingNode() {
         for (SingleNodeShutdownMetadata.Type type : REMOVE_SHUTDOWN_TYPES) {
-            ClusterState state = prepareState(service.reroute(ClusterState.EMPTY_STATE, "initial state", ActionListener.noop()), type);
+            ClusterState state = prepareState(ClusterState.EMPTY_STATE, type);
             RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state, null, null, 0);
             allocation.debugDecision(true);
 

+ 11 - 0
server/src/test/java/org/elasticsearch/rest/RestRequestTests.java

@@ -201,6 +201,17 @@ public class RestRequestTests extends ESTestCase {
         assertThat(e.getMessage(), equalTo("Invalid media-type value on headers [Content-Type]"));
     }
 
+    public void testInvalidMediaTypeCharacter() {
+        List<String> headers = List.of("a/b[", "a/b]", "a/b\\");
+        for (String header : headers) {
+            IllegalArgumentException e = expectThrows(
+                IllegalArgumentException.class,
+                () -> RestRequest.parseContentType(Collections.singletonList(header))
+            );
+            assertThat(e.getMessage(), equalTo("invalid Content-Type header [" + header + "]"));
+        }
+    }
+
     public void testNoContentTypeHeader() {
         RestRequest contentRestRequest = contentRestRequest("", Collections.emptyMap(), Collections.emptyMap());
         assertNull(contentRestRequest.getXContentType());

+ 0 - 25
server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java

@@ -50,7 +50,6 @@ import org.apache.lucene.search.Sort;
 import org.apache.lucene.search.SortField;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.search.TopDocs;
-import org.apache.lucene.search.TotalHitCountCollector;
 import org.apache.lucene.search.TotalHits;
 import org.apache.lucene.search.Weight;
 import org.apache.lucene.search.join.BitSetProducer;
@@ -409,14 +408,10 @@ public class QueryPhaseTests extends IndexShardTestCase {
         context.terminateAfter(numDocs);
         {
             context.setSize(10);
-            TotalHitCountCollector collector = new TotalHitCountCollector();
-            context.registerAggsCollectorManager(new SingleThreadCollectorManager(collector));
             QueryPhase.executeQuery(context);
             assertFalse(context.queryResult().terminatedEarly());
             assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs));
             assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(10));
-            assertThat(collector.getTotalHits(), equalTo(numDocs));
-            context.registerAggsCollectorManager(null);
         }
 
         context.terminateAfter(1);
@@ -447,21 +442,14 @@ public class QueryPhaseTests extends IndexShardTestCase {
         }
         {
             context.setSize(1);
-            TotalHitCountCollector collector = new TotalHitCountCollector();
-            context.registerAggsCollectorManager(new SingleThreadCollectorManager(collector));
             QueryPhase.executeQuery(context);
             assertTrue(context.queryResult().terminatedEarly());
             assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs));
             assertThat(context.queryResult().topDocs().topDocs.totalHits.relation, equalTo(TotalHits.Relation.EQUAL_TO));
             assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1));
-            // TotalHitCountCollector counts num docs in the first leaf
-            assertThat(collector.getTotalHits(), equalTo(reader.leaves().get(0).reader().numDocs()));
-            context.registerAggsCollectorManager(null);
         }
         {
             context.setSize(0);
-            TotalHitCountCollector collector = new TotalHitCountCollector();
-            context.registerAggsCollectorManager(new SingleThreadCollectorManager(collector));
             QueryPhase.executeQuery(context);
             assertTrue(context.queryResult().terminatedEarly());
             // TotalHitCountCollector counts num docs in the first leaf
@@ -469,8 +457,6 @@ public class QueryPhaseTests extends IndexShardTestCase {
             assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocsInFirstLeaf));
             assertThat(context.queryResult().topDocs().topDocs.totalHits.relation, equalTo(TotalHits.Relation.EQUAL_TO));
             assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0));
-            assertThat(collector.getTotalHits(), equalTo(numDocsInFirstLeaf));
-            context.registerAggsCollectorManager(null);
         }
 
         // tests with trackTotalHits and terminateAfter
@@ -478,8 +464,6 @@ public class QueryPhaseTests extends IndexShardTestCase {
         context.setSize(0);
         for (int trackTotalHits : new int[] { -1, 3, 76, 100 }) {
             context.trackTotalHitsUpTo(trackTotalHits);
-            TotalHitCountCollector collector = new TotalHitCountCollector();
-            context.registerAggsCollectorManager(new SingleThreadCollectorManager(collector));
             QueryPhase.executeQuery(context);
             assertTrue(context.queryResult().terminatedEarly());
             if (trackTotalHits == -1) {
@@ -493,16 +477,12 @@ public class QueryPhaseTests extends IndexShardTestCase {
                 assertThat(context.queryResult().topDocs().topDocs.totalHits.relation, equalTo(TotalHits.Relation.EQUAL_TO));
             }
             assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0));
-            assertThat(collector.getTotalHits(), equalTo(countDocUpTo.applyAsInt(10)));
-            context.registerAggsCollectorManager(null);
         }
 
         context.terminateAfter(7);
         context.setSize(10);
         for (int trackTotalHits : new int[] { -1, 3, 75, 100 }) {
             context.trackTotalHitsUpTo(trackTotalHits);
-            EarlyTerminatingCollector collector = new EarlyTerminatingCollector(new TotalHitCountCollector(), 1, false);
-            context.registerAggsCollectorManager(new SingleThreadCollectorManager(collector));
             QueryPhase.executeQuery(context);
             assertTrue(context.queryResult().terminatedEarly());
             if (trackTotalHits == -1) {
@@ -516,7 +496,6 @@ public class QueryPhaseTests extends IndexShardTestCase {
                 assertThat(context.queryResult().topDocs().topDocs.totalHits.relation, equalTo(TotalHits.Relation.EQUAL_TO));
             }
             assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(7));
-            context.registerAggsCollectorManager(null);
         }
         reader.close();
         dir.close();
@@ -566,16 +545,12 @@ public class QueryPhaseTests extends IndexShardTestCase {
             assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2)));
             context.parsedPostFilter(null);
 
-            final TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector();
-            context.registerAggsCollectorManager(new SingleThreadCollectorManager(totalHitCountCollector));
             QueryPhase.executeQuery(context);
             assertNull(context.queryResult().terminatedEarly());
             assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs));
             assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1));
             assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class));
             assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2)));
-            assertThat(totalHitCountCollector.getTotalHits(), equalTo(numDocs));
-            context.registerAggsCollectorManager(null);
         }
 
         {

+ 13 - 9
test/framework/src/main/java/org/elasticsearch/action/support/ActionTestUtils.java

@@ -15,11 +15,12 @@ import org.elasticsearch.client.Response;
 import org.elasticsearch.client.ResponseListener;
 import org.elasticsearch.core.CheckedConsumer;
 import org.elasticsearch.tasks.Task;
+import org.elasticsearch.tasks.TaskId;
 import org.elasticsearch.tasks.TaskManager;
 import org.elasticsearch.transport.Transport;
 
-import static org.elasticsearch.action.support.PlainActionFuture.newFuture;
-import static org.mockito.Mockito.mock;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
 
 public class ActionTestUtils {
 
@@ -29,10 +30,11 @@ public class ActionTestUtils {
         TransportAction<Request, Response> action,
         Request request
     ) {
-        PlainActionFuture<Response> future = newFuture();
-        Task task = mock(Task.class);
-        action.execute(task, request, future);
-        return future.actionGet();
+        return PlainActionFuture.get(
+            future -> action.execute(request.createTask(1L, "direct", action.actionName, TaskId.EMPTY_TASK_ID, Map.of()), request, future),
+            10,
+            TimeUnit.SECONDS
+        );
     }
 
     public static <Request extends ActionRequest, Response extends ActionResponse> Response executeBlockingWithTask(
@@ -41,9 +43,11 @@ public class ActionTestUtils {
         TransportAction<Request, Response> action,
         Request request
     ) {
-        PlainActionFuture<Response> future = newFuture();
-        taskManager.registerAndExecute("transport", action, request, localConnection, future);
-        return future.actionGet();
+        return PlainActionFuture.get(
+            future -> taskManager.registerAndExecute("transport", action, request, localConnection, future),
+            10,
+            TimeUnit.SECONDS
+        );
     }
 
     /**

+ 0 - 13
test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java

@@ -7,8 +7,6 @@
  */
 package org.elasticsearch.test;
 
-import org.apache.lucene.search.Collector;
-import org.apache.lucene.search.CollectorManager;
 import org.apache.lucene.search.FieldDoc;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.TotalHits;
@@ -73,7 +71,6 @@ public class TestSearchContext extends SearchContext {
     boolean trackScores = false;
     int trackTotalHitsUpTo = SearchContext.DEFAULT_TRACK_TOTAL_HITS_UP_TO;
     RankShardContext rankShardContext;
-    CollectorManager<Collector, Void> aggCollectorManager;
     ContextIndexSearcher searcher;
     int from;
     int size;
@@ -526,16 +523,6 @@ public class TestSearchContext extends SearchContext {
         return null; // no profiling
     }
 
-    @Override
-    public CollectorManager<Collector, Void> getAggsCollectorManager() {
-        return aggCollectorManager;
-    }
-
-    @Override
-    public void registerAggsCollectorManager(CollectorManager<Collector, Void> collector) {
-        this.aggCollectorManager = collector;
-    }
-
     @Override
     public SearchExecutionContext getSearchExecutionContext() {
         return searchExecutionContext;

+ 5 - 4
x-pack/docs/build.gradle

@@ -1,4 +1,3 @@
-import org.elasticsearch.gradle.Version
 import org.elasticsearch.gradle.internal.info.BuildParams
 
 apply plugin: 'elasticsearch.docs-test'
@@ -30,9 +29,11 @@ restResources {
   }
 }
 
-// TODO: Remove the following when RCS feature is released
-// The get-builtin-privileges doc test does not include the new cluster privilege for RCS
-// So we disable the test if the build is a snapshot where unreleased feature is enabled by default
+// TODO: Remove the following when the following features are released. These tests include new privileges only available under feature flags
+//  which require snapshot builds:
+// * RCS 2.0. cross_cluster_search is only available with untrusted_remote_cluster_feature_flag_registered set
+// * DLM. manage_dlm privilege is only available with dlm_feature_flag_enabled set
+// We disable these tests for snapshot builds to maintain release build coverage.
 tasks.named("yamlRestTest").configure {
   if (BuildParams.isSnapshotBuild()) {
     systemProperty 'tests.rest.blacklist', '*/get-builtin-privileges/*'

+ 9 - 4
x-pack/plugin/build.gradle

@@ -66,9 +66,11 @@ if (BuildParams.isSnapshotBuild() == false) {
   // private key, these tests are blacklisted in non-snapshot test runs
   restTestBlacklist.addAll(['xpack/15_basic/*', 'license/20_put_license/*', 'license/30_enterprise_license/*'])
 
-  // TODO: Remove the following when RCS feature is released
-  // cross_cluster_search privilege is only available when untrusted_remote_cluster_feature_flag_registered is enabled
-  // which requires snapshot build
+  // TODO: Remove the following when the following features are released. These tests include new privileges only available under feature flags
+  //  which require snapshot builds:
+  // * RCS 2.0. cross_cluster_search is only available with untrusted_remote_cluster_feature_flag_registered set
+  // * DLM. manage_dlm privilege is only available with dlm_feature_flag_enabled set
+  // We disable these tests for snapshot builds to maintain release build coverage.
   restTestBlacklist.add('privileges/11_builtin/Test get builtin privileges')
   restTestBlacklist.add('api_key/50_cross_cluster/*')
 }
@@ -95,7 +97,10 @@ tasks.named("yamlRestTestV7CompatTransform").configure { task ->
     "vectors/10_dense_vector_basic/Deprecated function signature",
     "to support it, it would require to almost revert back the #48725 and complicate the code"
   )
-  task.skipTest("vectors/20_dense_vector_special_cases/Indexing of Dense vectors should error when dims don't match defined in the mapping", "Error message has changed")
+  task.skipTest(
+    "vectors/20_dense_vector_special_cases/Indexing of Dense vectors should error when dims don't match defined in the mapping",
+    "Error message has changed"
+  )
   task.skipTest("vectors/30_sparse_vector_basic/Cosine Similarity", "not supported for compatibility")
   task.skipTest("vectors/30_sparse_vector_basic/Deprecated function signature", "not supported for compatibility")
   task.skipTest("vectors/30_sparse_vector_basic/Dot Product", "not supported for compatibility")

+ 206 - 0
x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/action/DataLifecycleUsageTransportActionIT.java

@@ -0,0 +1,206 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+package org.elasticsearch.xpack.core.action;
+
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.action.support.PlainActionFuture;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ClusterStateUpdateTask;
+import org.elasticsearch.cluster.metadata.DataLifecycle;
+import org.elasticsearch.cluster.metadata.DataStream;
+import org.elasticsearch.cluster.metadata.DataStreamAlias;
+import org.elasticsearch.cluster.metadata.Metadata;
+import org.elasticsearch.cluster.service.ClusterService;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.core.Tuple;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexMode;
+import org.elasticsearch.plugins.ActionPlugin;
+import org.elasticsearch.plugins.Plugin;
+import org.elasticsearch.protocol.xpack.XPackUsageRequest;
+import org.elasticsearch.test.ESIntegTestCase;
+import org.elasticsearch.xcontent.ToXContent;
+import org.elasticsearch.xcontent.XContentBuilder;
+import org.elasticsearch.xcontent.XContentFactory;
+import org.elasticsearch.xcontent.XContentType;
+import org.elasticsearch.xpack.core.XPackClientPlugin;
+import org.junit.After;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.Function;
+
+import static org.elasticsearch.xpack.core.action.XPackUsageFeatureAction.DATA_LIFECYCLE;
+import static org.hamcrest.Matchers.equalTo;
+
+public class DataLifecycleUsageTransportActionIT extends ESIntegTestCase {
+    /*
+     * The DataLifecycleUsageTransportAction is not exposed in the xpack core plugin, so we have a special test plugin to do this
+     */
+    @Override
+    protected Collection<Class<? extends Plugin>> nodePlugins() {
+        return List.of(TestDateLifecycleUsagePlugin.class);
+    }
+
+    @After
+    private void cleanup() throws Exception {
+        updateClusterState(clusterState -> {
+            ClusterState.Builder clusterStateBuilder = new ClusterState.Builder(clusterState);
+            Metadata.Builder metadataBuilder = Metadata.builder(clusterState.metadata());
+            metadataBuilder.dataStreams(Map.of(), Map.of());
+            clusterStateBuilder.metadata(metadataBuilder);
+            return clusterStateBuilder.build();
+        });
+        updateClusterSettings(Settings.builder().put(DataLifecycle.CLUSTER_DLM_DEFAULT_ROLLOVER_SETTING.getKey(), (String) null));
+    }
+
+    @SuppressWarnings("unchecked")
+    public void testAction() throws Exception {
+        assertUsageResults(0, 0, 0, 0.0, true);
+        AtomicLong count = new AtomicLong(0);
+        AtomicLong totalRetentionTimes = new AtomicLong(0);
+        AtomicLong minRetention = new AtomicLong(Long.MAX_VALUE);
+        AtomicLong maxRetention = new AtomicLong(Long.MIN_VALUE);
+        boolean useDefaultRolloverConfig = randomBoolean();
+        if (useDefaultRolloverConfig == false) {
+            updateClusterSettings(Settings.builder().put(DataLifecycle.CLUSTER_DLM_DEFAULT_ROLLOVER_SETTING.getKey(), "min_docs=33"));
+        }
+        /*
+         * We now add a number of simulated data streams to the cluster state. Some have lifecycles, some don't. The ones with lifecycles
+         * have varying retention periods. After adding them, we make sure the numbers add up.
+         */
+        updateClusterState(clusterState -> {
+            Metadata.Builder metadataBuilder = Metadata.builder(clusterState.metadata());
+            Map<String, DataStream> dataStreamMap = new HashMap<>();
+            for (int dataStreamCount = 0; dataStreamCount < randomInt(200); dataStreamCount++) {
+                boolean hasLifecycle = randomBoolean();
+                long retentionMillis;
+                if (hasLifecycle) {
+                    retentionMillis = randomLongBetween(1000, 100000);
+                    count.incrementAndGet();
+                    totalRetentionTimes.addAndGet(retentionMillis);
+                    if (retentionMillis < minRetention.get()) {
+                        minRetention.set(retentionMillis);
+                    }
+                    if (retentionMillis > maxRetention.get()) {
+                        maxRetention.set(retentionMillis);
+                    }
+                } else {
+                    retentionMillis = 0;
+                }
+                List<Index> indices = new ArrayList<>();
+                for (int indicesCount = 0; indicesCount < randomIntBetween(1, 10); indicesCount++) {
+                    Index index = new Index(randomAlphaOfLength(60), randomAlphaOfLength(60));
+                    indices.add(index);
+                }
+                boolean systemDataStream = randomBoolean();
+                DataStream dataStream = new DataStream(
+                    randomAlphaOfLength(50),
+                    indices,
+                    randomLongBetween(0, 1000),
+                    Map.of(),
+                    systemDataStream || randomBoolean(),
+                    randomBoolean(),
+                    systemDataStream,
+                    randomBoolean(),
+                    IndexMode.STANDARD,
+                    hasLifecycle ? new DataLifecycle(retentionMillis) : null
+                );
+                dataStreamMap.put(dataStream.getName(), dataStream);
+            }
+            Map<String, DataStreamAlias> dataStreamAliasesMap = Map.of();
+            metadataBuilder.dataStreams(dataStreamMap, dataStreamAliasesMap);
+            ClusterState.Builder clusterStateBuilder = new ClusterState.Builder(clusterState);
+            clusterStateBuilder.metadata(metadataBuilder);
+            return clusterStateBuilder.build();
+        });
+        int expectedMinimumRetention = minRetention.get() == Long.MAX_VALUE ? 0 : minRetention.intValue();
+        int expectedMaximumRetention = maxRetention.get() == Long.MIN_VALUE ? 0 : maxRetention.intValue();
+        double expectedAverageRetention = count.get() == 0 ? 0.0 : totalRetentionTimes.doubleValue() / count.get();
+        assertUsageResults(
+            count.intValue(),
+            expectedMinimumRetention,
+            expectedMaximumRetention,
+            expectedAverageRetention,
+            useDefaultRolloverConfig
+        );
+    }
+
+    @SuppressWarnings("unchecked")
+    private void assertUsageResults(
+        int count,
+        int minimumRetention,
+        int maximumRetention,
+        double averageRetention,
+        boolean defaultRolloverUsed
+    ) throws Exception {
+        XPackUsageFeatureResponse response = client().execute(DATA_LIFECYCLE, new XPackUsageRequest()).get();
+        XContentBuilder builder = XContentFactory.jsonBuilder();
+        builder = response.getUsage().toXContent(builder, ToXContent.EMPTY_PARAMS);
+        Tuple<XContentType, Map<String, Object>> tuple = XContentHelper.convertToMap(
+            BytesReference.bytes(builder),
+            true,
+            XContentType.JSON
+        );
+
+        Map<String, Object> map = tuple.v2();
+        assertThat(map.get("available"), equalTo(true));
+        assertThat(map.get("enabled"), equalTo(true));
+        assertThat(map.get("count"), equalTo(count));
+        assertThat(map.get("default_rollover_used"), equalTo(defaultRolloverUsed));
+        Map<String, Object> retentionMap = (Map<String, Object>) map.get("retention");
+        assertThat(retentionMap.size(), equalTo(3));
+        assertThat(retentionMap.get("minimum_millis"), equalTo(minimumRetention));
+        assertThat(retentionMap.get("maximum_millis"), equalTo(maximumRetention));
+        assertThat(retentionMap.get("average_millis"), equalTo(averageRetention));
+    }
+
+    /*
+     * Updates the cluster state in the internal cluster using the provided function
+     */
+    protected static void updateClusterState(final Function<ClusterState, ClusterState> updater) throws Exception {
+        final PlainActionFuture<Void> future = PlainActionFuture.newFuture();
+        final ClusterService clusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class);
+        clusterService.submitUnbatchedStateUpdateTask("test", new ClusterStateUpdateTask() {
+            @Override
+            public ClusterState execute(ClusterState currentState) {
+                return updater.apply(currentState);
+            }
+
+            @Override
+            public void onFailure(Exception e) {
+                future.onFailure(e);
+            }
+
+            @Override
+            public void clusterStateProcessed(ClusterState oldState, ClusterState newState) {
+                future.onResponse(null);
+            }
+        });
+        future.get();
+    }
+
+    /*
+     * This plugin exposes the DataLifecycleUsageTransportAction.
+     */
+    public static final class TestDateLifecycleUsagePlugin extends XPackClientPlugin {
+        @Override
+        public List<ActionHandler<? extends ActionRequest, ? extends ActionResponse>> getActions() {
+            List<ActionHandler<? extends ActionRequest, ? extends ActionResponse>> actions = new ArrayList<>();
+            actions.add(new ActionPlugin.ActionHandler<>(DATA_LIFECYCLE, DataLifecycleUsageTransportAction.class));
+            return actions;
+        }
+    }
+}

+ 13 - 2
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java

@@ -10,6 +10,7 @@ import org.elasticsearch.action.ActionResponse;
 import org.elasticsearch.action.ActionType;
 import org.elasticsearch.cluster.ClusterState;
 import org.elasticsearch.cluster.NamedDiff;
+import org.elasticsearch.cluster.metadata.DataLifecycle;
 import org.elasticsearch.cluster.metadata.Metadata;
 import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
 import org.elasticsearch.common.settings.Setting;
@@ -38,6 +39,7 @@ import org.elasticsearch.xpack.core.application.EnterpriseSearchFeatureSetUsage;
 import org.elasticsearch.xpack.core.archive.ArchiveFeatureSetUsage;
 import org.elasticsearch.xpack.core.async.DeleteAsyncResultAction;
 import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata;
+import org.elasticsearch.xpack.core.datastreams.DataLifecycleFeatureSetUsage;
 import org.elasticsearch.xpack.core.datastreams.DataStreamFeatureSetUsage;
 import org.elasticsearch.xpack.core.downsample.DownsampleIndexerAction;
 import org.elasticsearch.xpack.core.enrich.EnrichFeatureSetUsage;
@@ -235,6 +237,8 @@ import org.elasticsearch.xpack.core.watcher.transport.actions.stats.WatcherStats
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
+import java.util.Objects;
+import java.util.stream.Stream;
 
 // TODO: merge this into XPackPlugin
 public class XPackClientPlugin extends Plugin implements ActionPlugin, NetworkPlugin {
@@ -415,7 +419,7 @@ public class XPackClientPlugin extends Plugin implements ActionPlugin, NetworkPl
 
     @Override
     public List<NamedWriteableRegistry.Entry> getNamedWriteables() {
-        return Arrays.asList(
+        return Stream.of(
             // graph
             new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.GRAPH, GraphFeatureSetUsage::new),
             // logstash
@@ -548,6 +552,13 @@ public class XPackClientPlugin extends Plugin implements ActionPlugin, NetworkPl
             ),
             // Data Streams
             new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.DATA_STREAMS, DataStreamFeatureSetUsage::new),
+            DataLifecycle.isEnabled()
+                ? new NamedWriteableRegistry.Entry(
+                    XPackFeatureSet.Usage.class,
+                    XPackField.DATA_LIFECYCLE,
+                    DataLifecycleFeatureSetUsage::new
+                )
+                : null,
             // Data Tiers
             new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.DATA_TIERS, DataTiersFeatureSetUsage::new),
             // Archive
@@ -564,7 +575,7 @@ public class XPackClientPlugin extends Plugin implements ActionPlugin, NetworkPl
                 XPackField.ENTERPRISE_SEARCH,
                 EnterpriseSearchFeatureSetUsage::new
             )
-        );
+        ).filter(Objects::nonNull).toList();
     }
 
     @Override

+ 2 - 0
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java

@@ -67,6 +67,8 @@ public final class XPackField {
     public static final String SEARCHABLE_SNAPSHOTS = "searchable_snapshots";
     /** Name constant for the data streams feature. */
     public static final String DATA_STREAMS = "data_streams";
+    /** Name constant for the data lifecycle feature. */
+    public static final String DATA_LIFECYCLE = "data_lifecycle";
     /** Name constant for the data tiers feature. */
     public static final String DATA_TIERS = "data_tiers";
     /** Name constant for the aggregate_metric plugin. */

+ 5 - 0
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java

@@ -17,6 +17,7 @@ import org.elasticsearch.action.support.ActionFilter;
 import org.elasticsearch.action.support.TransportAction;
 import org.elasticsearch.client.internal.Client;
 import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.DataLifecycle;
 import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
 import org.elasticsearch.cluster.metadata.Metadata;
 import org.elasticsearch.cluster.node.DiscoveryNode;
@@ -74,6 +75,7 @@ import org.elasticsearch.watcher.ResourceWatcherService;
 import org.elasticsearch.xcontent.NamedXContentRegistry;
 import org.elasticsearch.xpack.cluster.routing.allocation.DataTierAllocationDecider;
 import org.elasticsearch.xpack.cluster.routing.allocation.mapper.DataTierFieldMapper;
+import org.elasticsearch.xpack.core.action.DataLifecycleUsageTransportAction;
 import org.elasticsearch.xpack.core.action.DataStreamInfoTransportAction;
 import org.elasticsearch.xpack.core.action.DataStreamUsageTransportAction;
 import org.elasticsearch.xpack.core.action.ReloadAnalyzerAction;
@@ -355,6 +357,9 @@ public class XPackPlugin extends XPackClientPlugin
         actions.add(new ActionHandler<>(XPackUsageFeatureAction.DATA_TIERS, DataTiersUsageTransportAction.class));
         actions.add(new ActionHandler<>(XPackUsageFeatureAction.DATA_STREAMS, DataStreamUsageTransportAction.class));
         actions.add(new ActionHandler<>(XPackInfoFeatureAction.DATA_STREAMS, DataStreamInfoTransportAction.class));
+        if (DataLifecycle.isEnabled()) {
+            actions.add(new ActionHandler<>(XPackUsageFeatureAction.DATA_LIFECYCLE, DataLifecycleUsageTransportAction.class));
+        }
         actions.add(new ActionHandler<>(XPackUsageFeatureAction.HEALTH, HealthApiUsageTransportAction.class));
         actions.add(new ActionHandler<>(XPackUsageFeatureAction.REMOTE_CLUSTERS, RemoteClusterUsageTransportAction.class));
         return actions;

+ 78 - 0
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/DataLifecycleUsageTransportAction.java

@@ -0,0 +1,78 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+package org.elasticsearch.xpack.core.action;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.indices.rollover.RolloverConfiguration;
+import org.elasticsearch.action.support.ActionFilters;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.DataLifecycle;
+import org.elasticsearch.cluster.metadata.DataStream;
+import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
+import org.elasticsearch.cluster.service.ClusterService;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.protocol.xpack.XPackUsageRequest;
+import org.elasticsearch.tasks.Task;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+import org.elasticsearch.xpack.core.datastreams.DataLifecycleFeatureSetUsage;
+
+import java.util.Collection;
+import java.util.LongSummaryStatistics;
+import java.util.stream.Collectors;
+
+public class DataLifecycleUsageTransportAction extends XPackUsageFeatureTransportAction {
+
+    @Inject
+    public DataLifecycleUsageTransportAction(
+        TransportService transportService,
+        ClusterService clusterService,
+        ThreadPool threadPool,
+        ActionFilters actionFilters,
+        IndexNameExpressionResolver indexNameExpressionResolver
+    ) {
+        super(
+            XPackUsageFeatureAction.DATA_LIFECYCLE.name(),
+            transportService,
+            clusterService,
+            threadPool,
+            actionFilters,
+            indexNameExpressionResolver
+        );
+    }
+
+    @Override
+    protected void masterOperation(
+        Task task,
+        XPackUsageRequest request,
+        ClusterState state,
+        ActionListener<XPackUsageFeatureResponse> listener
+    ) {
+        final Collection<DataStream> dataStreams = state.metadata().dataStreams().values();
+        LongSummaryStatistics retentionStats = dataStreams.stream()
+            .filter(ds -> ds.getLifecycle() != null)
+            .collect(Collectors.summarizingLong(ds -> ds.getLifecycle().getDataRetention().getMillis()));
+        long dataStreamsWithLifecycles = retentionStats.getCount();
+        long minRetention = dataStreamsWithLifecycles == 0 ? 0 : retentionStats.getMin();
+        long maxRetention = dataStreamsWithLifecycles == 0 ? 0 : retentionStats.getMax();
+        double averageRetention = retentionStats.getAverage();
+        RolloverConfiguration rolloverConfiguration = clusterService.getClusterSettings()
+            .get(DataLifecycle.CLUSTER_DLM_DEFAULT_ROLLOVER_SETTING);
+        String rolloverConfigString = rolloverConfiguration.toString();
+        final DataLifecycleFeatureSetUsage.LifecycleStats stats = new DataLifecycleFeatureSetUsage.LifecycleStats(
+            dataStreamsWithLifecycles,
+            minRetention,
+            maxRetention,
+            averageRetention,
+            DataLifecycle.CLUSTER_DLM_DEFAULT_ROLLOVER_SETTING.getDefault(null).toString().equals(rolloverConfigString)
+        );
+
+        final DataLifecycleFeatureSetUsage usage = new DataLifecycleFeatureSetUsage(stats);
+        listener.onResponse(new XPackUsageFeatureResponse(usage));
+    }
+}

+ 3 - 0
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureAction.java

@@ -7,6 +7,7 @@
 package org.elasticsearch.xpack.core.action;
 
 import org.elasticsearch.action.ActionType;
+import org.elasticsearch.cluster.metadata.DataLifecycle;
 import org.elasticsearch.transport.TcpTransport;
 import org.elasticsearch.xpack.core.XPackField;
 
@@ -46,6 +47,7 @@ public class XPackUsageFeatureAction extends ActionType<XPackUsageFeatureRespons
     public static final XPackUsageFeatureAction ENRICH = new XPackUsageFeatureAction(XPackField.ENRICH);
     public static final XPackUsageFeatureAction SEARCHABLE_SNAPSHOTS = new XPackUsageFeatureAction(XPackField.SEARCHABLE_SNAPSHOTS);
     public static final XPackUsageFeatureAction DATA_STREAMS = new XPackUsageFeatureAction(XPackField.DATA_STREAMS);
+    public static final XPackUsageFeatureAction DATA_LIFECYCLE = new XPackUsageFeatureAction(XPackField.DATA_LIFECYCLE);
     public static final XPackUsageFeatureAction DATA_TIERS = new XPackUsageFeatureAction(XPackField.DATA_TIERS);
     public static final XPackUsageFeatureAction AGGREGATE_METRIC = new XPackUsageFeatureAction(XPackField.AGGREGATE_METRIC);
     public static final XPackUsageFeatureAction ARCHIVE = new XPackUsageFeatureAction(XPackField.ARCHIVE);
@@ -58,6 +60,7 @@ public class XPackUsageFeatureAction extends ActionType<XPackUsageFeatureRespons
         ANALYTICS,
         CCR,
         DATA_STREAMS,
+        DataLifecycle.isEnabled() ? DATA_LIFECYCLE : null,
         DATA_TIERS,
         EQL,
         ESQL,

+ 152 - 0
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataLifecycleFeatureSetUsage.java

@@ -0,0 +1,152 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+package org.elasticsearch.xpack.core.datastreams;
+
+import org.elasticsearch.TransportVersion;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Writeable;
+import org.elasticsearch.xcontent.XContentBuilder;
+import org.elasticsearch.xpack.core.XPackFeatureSet;
+import org.elasticsearch.xpack.core.XPackField;
+
+import java.io.IOException;
+import java.util.Objects;
+
+public class DataLifecycleFeatureSetUsage extends XPackFeatureSet.Usage {
+    final LifecycleStats lifecycleStats;
+
+    public DataLifecycleFeatureSetUsage(StreamInput input) throws IOException {
+        super(input);
+        this.lifecycleStats = new LifecycleStats(input);
+    }
+
+    public DataLifecycleFeatureSetUsage(LifecycleStats stats) {
+        super(XPackField.DATA_LIFECYCLE, true, true);
+        this.lifecycleStats = stats;
+    }
+
+    @Override
+    public void writeTo(StreamOutput out) throws IOException {
+        super.writeTo(out);
+        lifecycleStats.writeTo(out);
+    }
+
+    @Override
+    public TransportVersion getMinimalSupportedVersion() {
+        return TransportVersion.V_8_500_006;
+    }
+
+    @Override
+    protected void innerXContent(XContentBuilder builder, Params params) throws IOException {
+        super.innerXContent(builder, params);
+        builder.field("count", lifecycleStats.dataStreamsWithLifecyclesCount);
+        builder.field("default_rollover_used", lifecycleStats.defaultRolloverUsed);
+        builder.startObject("retention");
+        builder.field("minimum_millis", lifecycleStats.minRetentionMillis);
+        builder.field("maximum_millis", lifecycleStats.maxRetentionMillis);
+        builder.field("average_millis", lifecycleStats.averageRetentionMillis);
+        builder.endObject();
+    }
+
+    @Override
+    public String toString() {
+        return Strings.toString(this);
+    }
+
+    @Override
+    public int hashCode() {
+        return lifecycleStats.hashCode();
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (obj == null) {
+            return false;
+        }
+        if (obj.getClass() != getClass()) {
+            return false;
+        }
+        DataLifecycleFeatureSetUsage other = (DataLifecycleFeatureSetUsage) obj;
+        return Objects.equals(lifecycleStats, other.lifecycleStats);
+    }
+
+    public static class LifecycleStats implements Writeable {
+        final long dataStreamsWithLifecyclesCount;
+        final long minRetentionMillis;
+        final long maxRetentionMillis;
+        final double averageRetentionMillis;
+        final boolean defaultRolloverUsed;
+
+        public LifecycleStats(
+            long dataStreamsWithLifecyclesCount,
+            long minRetention,
+            long maxRetention,
+            double averageRetention,
+            boolean defaultRolloverUsed
+        ) {
+            this.dataStreamsWithLifecyclesCount = dataStreamsWithLifecyclesCount;
+            this.minRetentionMillis = minRetention;
+            this.maxRetentionMillis = maxRetention;
+            this.averageRetentionMillis = averageRetention;
+            this.defaultRolloverUsed = defaultRolloverUsed;
+        }
+
+        public LifecycleStats(StreamInput in) throws IOException {
+            if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_006)) {
+                this.dataStreamsWithLifecyclesCount = in.readVLong();
+                this.minRetentionMillis = in.readVLong();
+                this.maxRetentionMillis = in.readVLong();
+                this.averageRetentionMillis = in.readDouble();
+                this.defaultRolloverUsed = in.readBoolean();
+            } else {
+                this.dataStreamsWithLifecyclesCount = 0;
+                this.minRetentionMillis = 0;
+                this.maxRetentionMillis = 0;
+                this.averageRetentionMillis = 0.0;
+                this.defaultRolloverUsed = false;
+            }
+        }
+
+        @Override
+        public void writeTo(StreamOutput out) throws IOException {
+            if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_006)) {
+                out.writeVLong(dataStreamsWithLifecyclesCount);
+                out.writeVLong(minRetentionMillis);
+                out.writeVLong(maxRetentionMillis);
+                out.writeDouble(averageRetentionMillis);
+                out.writeBoolean(defaultRolloverUsed);
+            }
+        }
+
+        @Override
+        public int hashCode() {
+            return Objects.hash(
+                dataStreamsWithLifecyclesCount,
+                minRetentionMillis,
+                maxRetentionMillis,
+                averageRetentionMillis,
+                defaultRolloverUsed
+            );
+        }
+
+        @Override
+        public boolean equals(Object obj) {
+            if (obj.getClass() != getClass()) {
+                return false;
+            }
+            LifecycleStats other = (LifecycleStats) obj;
+            return dataStreamsWithLifecyclesCount == other.dataStreamsWithLifecyclesCount
+                && minRetentionMillis == other.minRetentionMillis
+                && maxRetentionMillis == other.maxRetentionMillis
+                && averageRetentionMillis == other.averageRetentionMillis
+                && defaultRolloverUsed == other.defaultRolloverUsed;
+        }
+    }
+}

+ 1 - 1
x-pack/plugin/core/src/main/resources/org/elasticsearch/xpack/profiler/component-template/profiling-executables.json

@@ -31,7 +31,7 @@
         "Symbolization.next_time": {
           "type": "date",
           "format": "epoch_second",
-          "index": false
+          "index": true
         }
       }
     }

+ 86 - 0
x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datastreams/DataLifecycleFeatureSetUsageTests.java

@@ -0,0 +1,86 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+package org.elasticsearch.xpack.core.datastreams;
+
+import org.elasticsearch.common.io.stream.Writeable;
+import org.elasticsearch.test.AbstractWireSerializingTestCase;
+import org.elasticsearch.test.ESTestCase;
+
+public class DataLifecycleFeatureSetUsageTests extends AbstractWireSerializingTestCase<DataLifecycleFeatureSetUsage> {
+
+    @Override
+    protected DataLifecycleFeatureSetUsage createTestInstance() {
+        return new DataLifecycleFeatureSetUsage(
+            new DataLifecycleFeatureSetUsage.LifecycleStats(
+                randomNonNegativeLong(),
+                randomNonNegativeLong(),
+                randomNonNegativeLong(),
+                randomDouble(),
+                randomBoolean()
+            )
+        );
+    }
+
+    @Override
+    protected DataLifecycleFeatureSetUsage mutateInstance(DataLifecycleFeatureSetUsage instance) {
+        return switch (randomInt(4)) {
+            case 0 -> new DataLifecycleFeatureSetUsage(
+                new DataLifecycleFeatureSetUsage.LifecycleStats(
+                    randomValueOtherThan(instance.lifecycleStats.dataStreamsWithLifecyclesCount, ESTestCase::randomLong),
+                    instance.lifecycleStats.minRetentionMillis,
+                    instance.lifecycleStats.maxRetentionMillis,
+                    instance.lifecycleStats.averageRetentionMillis,
+                    instance.lifecycleStats.defaultRolloverUsed
+                )
+            );
+            case 1 -> new DataLifecycleFeatureSetUsage(
+                new DataLifecycleFeatureSetUsage.LifecycleStats(
+                    instance.lifecycleStats.dataStreamsWithLifecyclesCount,
+                    randomValueOtherThan(instance.lifecycleStats.minRetentionMillis, ESTestCase::randomLong),
+                    instance.lifecycleStats.maxRetentionMillis,
+                    instance.lifecycleStats.averageRetentionMillis,
+                    instance.lifecycleStats.defaultRolloverUsed
+                )
+            );
+            case 2 -> new DataLifecycleFeatureSetUsage(
+                new DataLifecycleFeatureSetUsage.LifecycleStats(
+                    instance.lifecycleStats.dataStreamsWithLifecyclesCount,
+                    instance.lifecycleStats.minRetentionMillis,
+                    randomValueOtherThan(instance.lifecycleStats.maxRetentionMillis, ESTestCase::randomLong),
+                    instance.lifecycleStats.averageRetentionMillis,
+                    instance.lifecycleStats.defaultRolloverUsed
+                )
+            );
+            case 3 -> new DataLifecycleFeatureSetUsage(
+                new DataLifecycleFeatureSetUsage.LifecycleStats(
+                    instance.lifecycleStats.dataStreamsWithLifecyclesCount,
+                    instance.lifecycleStats.minRetentionMillis,
+                    instance.lifecycleStats.maxRetentionMillis,
+                    randomValueOtherThan(instance.lifecycleStats.averageRetentionMillis, ESTestCase::randomDouble),
+                    instance.lifecycleStats.defaultRolloverUsed
+                )
+            );
+            case 4 -> new DataLifecycleFeatureSetUsage(
+                new DataLifecycleFeatureSetUsage.LifecycleStats(
+                    instance.lifecycleStats.dataStreamsWithLifecyclesCount,
+                    instance.lifecycleStats.minRetentionMillis,
+                    instance.lifecycleStats.maxRetentionMillis,
+                    instance.lifecycleStats.averageRetentionMillis,
+                    instance.lifecycleStats.defaultRolloverUsed == false
+                )
+            );
+            default -> throw new RuntimeException("unreachable");
+        };
+    }
+
+    @Override
+    protected Writeable.Reader<DataLifecycleFeatureSetUsage> instanceReader() {
+        return DataLifecycleFeatureSetUsage::new;
+    }
+
+}

+ 29 - 0
x-pack/plugin/mapper-unsigned-long/src/yamlRestTest/resources/rest-api-spec/test/10_basic.yml

@@ -376,3 +376,32 @@ setup:
               }]
 
   - length: { aggregations.test.buckets: 0 }
+
+---
+"Decay":
+  - skip:
+      features: close_to
+      version: " - 8.8.99"
+      reason: "decay functions not supported for unsigned_long"
+
+  - do:
+      search:
+        index: test1
+        body:
+          size: 10
+          query:
+            function_score:
+              functions: [{
+                "linear": {
+                  "ul": {
+                    "scale": 18000000000000000000.0,
+                    "origin": 12000000000000000000.0
+                  }
+                }
+              }]
+
+  - close_to: { hits.hits.0._score: { value: 0.9228715, error: 0.001 } }
+  - close_to: { hits.hits.1._score: { value: 0.9228715, error: 0.001 } }
+  - close_to: { hits.hits.2._score: { value: 0.8209238, error: 0.001 } }
+  - close_to: { hits.hits.3._score: { value: 0.8209238, error: 0.001 } }
+  - close_to: { hits.hits.4._score: { value: 0.6666667, error: 0.001 } }

+ 1 - 0
x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java

@@ -367,6 +367,7 @@ public class Constants {
         "cluster:monitor/xpack/usage/analytics",
         "cluster:monitor/xpack/usage/archive",
         "cluster:monitor/xpack/usage/ccr",
+        DataLifecycle.isEnabled() ? "cluster:monitor/xpack/usage/data_lifecycle" : null,
         "cluster:monitor/xpack/usage/data_streams",
         "cluster:monitor/xpack/usage/data_tiers",
         "cluster:monitor/xpack/usage/enrich",

+ 2 - 0
x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java

@@ -50,6 +50,7 @@ import org.elasticsearch.test.ESTestCase;
 import org.elasticsearch.test.MockLogAppender;
 import org.elasticsearch.test.TransportVersionUtils;
 import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TcpTransport;
 import org.elasticsearch.transport.TransportRequest;
 import org.elasticsearch.transport.TransportRequest.Empty;
 import org.elasticsearch.xcontent.XContentBuilder;
@@ -2019,6 +2020,7 @@ public class CompositeRolesStoreTests extends ESTestCase {
     }
 
     public void testGetRoleForCrossClusterAccessAuthentication() throws Exception {
+        assumeTrue("untrusted remote cluster feature flag must be enabled", TcpTransport.isUntrustedRemoteClusterEnabled());
         final FileRolesStore fileRolesStore = mock(FileRolesStore.class);
         doCallRealMethod().when(fileRolesStore).accept(anySet(), anyActionListener());
         final NativeRolesStore nativeRolesStore = mock(NativeRolesStore.class);

+ 3 - 0
x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java

@@ -683,6 +683,7 @@ public class SecurityServerTransportInterceptorTests extends ESTestCase {
     }
 
     public void testSendWithCrossClusterAccessHeadersForSystemUserCcrInternalAction() throws Exception {
+        assumeTrue("untrusted remote cluster feature flag must be enabled", TcpTransport.isUntrustedRemoteClusterEnabled());
         final String action = randomFrom(
             "internal:admin/ccr/restore/session/put",
             "internal:admin/ccr/restore/session/clear",
@@ -698,6 +699,7 @@ public class SecurityServerTransportInterceptorTests extends ESTestCase {
     }
 
     public void testSendWithCrossClusterAccessHeadersForRegularUserRegularAction() throws Exception {
+        assumeTrue("untrusted remote cluster feature flag must be enabled", TcpTransport.isUntrustedRemoteClusterEnabled());
         final Authentication authentication = randomValueOtherThanMany(
             authc -> authc.getAuthenticationType() == Authentication.AuthenticationType.INTERNAL,
             () -> AuthenticationTestHelper.builder().build()
@@ -708,6 +710,7 @@ public class SecurityServerTransportInterceptorTests extends ESTestCase {
     }
 
     public void testSendWithCrossClusterAccessHeadersForRegularUserClusterStateAction() throws Exception {
+        assumeTrue("untrusted remote cluster feature flag must be enabled", TcpTransport.isUntrustedRemoteClusterEnabled());
         final Authentication authentication = randomValueOtherThanMany(
             authc -> authc.getAuthenticationType() == Authentication.AuthenticationType.INTERNAL,
             () -> AuthenticationTestHelper.builder().build()

+ 69 - 0
x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java

@@ -662,6 +662,75 @@ public class SnapshotBasedIndexRecoveryIT extends AbstractSnapshotIntegTestCase
         }
     }
 
+    @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/96427")
+    public void testCancelledRecoveryAbortsDownloadPromptly() throws Exception {
+        updateSetting(INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS.getKey(), "1");
+
+        try {
+            internalCluster().ensureAtLeastNumDataNodes(2);
+
+            String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
+            createIndex(
+                indexName,
+                Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build()
+            );
+            ensureGreen(indexName);
+
+            int numDocs = randomIntBetween(1, 1000);
+            indexDocs(indexName, numDocs, numDocs);
+
+            String repoName = "repo";
+            createRepo(repoName, TestRepositoryPlugin.FILTER_TYPE);
+            createSnapshot(repoName, "snap", Collections.singletonList(indexName));
+
+            final AtomicBoolean isCancelled = new AtomicBoolean();
+            final CountDownLatch readFromBlobCalledLatch = new CountDownLatch(1);
+            final CountDownLatch readFromBlobRespondLatch = new CountDownLatch(1);
+
+            FilterFsRepository.wrapReadBlobMethod((blobName, stream) -> {
+                if (blobName.startsWith("__")) {
+                    return new FilterInputStream(stream) {
+                        @Override
+                        public int read() throws IOException {
+                            beforeRead();
+                            return super.read();
+                        }
+
+                        @Override
+                        public int read(byte[] b, int off, int len) throws IOException {
+                            beforeRead();
+                            return super.read(b, off, len);
+                        }
+
+                        private void beforeRead() {
+                            assertFalse(isCancelled.get()); // should have no further reads once the index is deleted
+                            readFromBlobCalledLatch.countDown();
+                            safeAwait(readFromBlobRespondLatch);
+                        }
+                    };
+                } else {
+                    return stream;
+                }
+            });
+
+            updateIndexSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1), indexName);
+            safeAwait(readFromBlobCalledLatch);
+
+            assertAcked(client().admin().indices().prepareDelete(indexName).get());
+            // cancellation flag is set when applying the cluster state that deletes the index, so no further waiting is necessary
+            isCancelled.set(true);
+            readFromBlobRespondLatch.countDown();
+
+            assertThat(indexExists(indexName), is(equalTo(false)));
+            assertBusy(
+                () -> internalCluster().getInstances(PeerRecoveryTargetService.class)
+                    .forEach(peerRecoveryTargetService -> assertEquals(0, peerRecoveryTargetService.ongoingRecoveryCount()))
+            );
+        } finally {
+            updateSetting(INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS.getKey(), null);
+        }
+    }
+
     public void testRecoveryAfterRestoreUsesSnapshots() throws Exception {
         String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
         createIndex(

+ 1 - 1
x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/content/ParsedMediaType.java

@@ -19,7 +19,7 @@ import java.util.stream.Collectors;
  */
 class ParsedMediaType {
     // tchar pattern as defined by RFC7230 section 3.2.6
-    private static final Pattern TCHAR_PATTERN = Pattern.compile("[a-zA-z0-9!#$%&'*+\\-.\\^_`|~]+");
+    private static final Pattern TCHAR_PATTERN = Pattern.compile("[a-zA-Z0-9!#$%&'*+\\-.\\^_`|~]+");
 
     private final String originalHeaderValue;
     private final String type;

+ 81 - 0
x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/dlm/10_usage.yml

@@ -0,0 +1,81 @@
+---
+"Test DLM usage stats":
+  - skip:
+      version: "- 8.8.99"
+      reason: "the dlm stats were only added to the usage api in 8.9"
+
+  - do:
+      xpack.usage: {}
+
+  - match: { data_lifecycle.available: true }
+  - match: { data_lifecycle.enabled: true }
+  - match: { data_lifecycle.count: 0 }
+  - match: { data_lifecycle.default_rollover_used: true }
+  - match: { data_lifecycle.retention.minimum_millis: 0 }
+  - match: { data_lifecycle.retention.maximum_millis: 0 }
+  - match: { data_lifecycle.retention.average_millis: 0 }
+
+  - do:
+      indices.put_index_template:
+        name: my-template-1
+        body:
+          index_patterns: [foo-*]
+          template:
+            mappings:
+              properties:
+                '@timestamp':
+                  type: date
+            lifecycle:
+              data_retention: 10d
+          data_stream: {}
+
+  - do:
+      indices.create_data_stream:
+        name: foo-foobar
+  - is_true: acknowledged
+
+  - do:
+      indices.put_index_template:
+        name: my-template-2
+        body:
+          index_patterns: [bar-*]
+          template:
+            mappings:
+              properties:
+                '@timestamp':
+                  type: date
+            lifecycle:
+              data_retention: 5d
+          data_stream: {}
+
+  - do:
+      indices.create_data_stream:
+        name: bar-foobar
+  - is_true: acknowledged
+
+  - do:
+      xpack.usage: {}
+
+  - match: { data_lifecycle.available: true }
+  - match: { data_lifecycle.enabled: true }
+  - match: { data_lifecycle.count: 2 }
+  - match: { data_lifecycle.default_rollover_used: true }
+  - match: { data_lifecycle.retention.minimum_millis: 432000000 }
+  - match: { data_lifecycle.retention.maximum_millis: 864000000 }
+  - match: { data_lifecycle.retention.average_millis: 648000000 }
+
+  - do:
+      indices.delete_data_stream:
+        name: foo-foobar
+  - is_true: acknowledged
+
+  - do:
+      xpack.usage: {}
+
+  - match: { data_lifecycle.available: true }
+  - match: { data_lifecycle.enabled: true }
+  - match: { data_lifecycle.count: 1 }
+  - match: { data_lifecycle.default_rollover_used: true }
+  - match: { data_lifecycle.retention.minimum_millis: 432000000 }
+  - match: { data_lifecycle.retention.maximum_millis: 432000000 }
+  - match: { data_lifecycle.retention.average_millis: 432000000 }

+ 2 - 0
x-pack/qa/mixed-tier-cluster/build.gradle

@@ -1,6 +1,7 @@
 apply plugin: 'elasticsearch.legacy-java-rest-test'
 apply plugin: 'elasticsearch.bwc-test'
 
+import org.elasticsearch.gradle.Version
 import org.elasticsearch.gradle.VersionProperties
 import org.elasticsearch.gradle.internal.info.BuildParams
 import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask
@@ -29,6 +30,7 @@ BuildParams.bwcVersions.withWireCompatible(v -> v.onOrAfter("7.9.0") &&
       nodes."${baseName}-1".setting 'node.roles', '["data_content", "data_hot"]'
     }
     nodes."${baseName}-2".setting 'node.roles', '["master"]'
+    requiresFeature 'es.dlm_feature_flag_enabled', Version.fromString("8.8.0")
   }
 
   tasks.register("${baseName}#mixedClusterTest", StandaloneRestIntegTestTask) {