Browse Source

Introduce a _lifecycle/explain API for data stream backing indices (#94621)

This adds an {index}/_lifecycle/explain API to retrieve information
about an index's status within its lifecycle.

The response looks like so:
```
"indices" : {
    ".ds-metrics-foo-2023.03.22-000001" : {
      "index" : ".ds-metrics-foo-2023.03.22-000001",
      "managed_by_dlm" : true,
      "index_creation_date_millis" : 1679475563571,
      "time_since_index_creation" : "843ms",
      "rollover_date_millis" : 1679475564293,
      "time_since_rollover" : "121ms",
      "lifecycle" : { },
      "generation_time" : "121ms"
    },
    ".ds-metrics-foo-2023.03.22-000002" : {
      "index" : ".ds-metrics-foo-2023.03.22-000002",
      "managed_by_dlm" : true,
      "index_creation_date_millis" : 1679475564351,
      "time_since_index_creation" : "63ms",
      "lifecycle" : { }
    }
  }
}
```
Andrei Dan 2 years ago
parent
commit
223385f887
19 changed files with 1617 additions and 2 deletions
  1. 5 0
      docs/changelog/94621.yaml
  2. 13 0
      docs/reference/dlm/apis/dlm-api.asciidoc
  3. 111 0
      docs/reference/dlm/apis/explain-data-lifecycle.asciidoc
  4. 3 0
      docs/reference/rest-api/index.asciidoc
  5. 24 0
      modules/dlm/build.gradle
  6. 306 0
      modules/dlm/src/internalClusterTest/java/org/elasticsearch/dlm/ExplainDataLifecycleIT.java
  7. 3 0
      modules/dlm/src/main/java/module-info.java
  8. 47 2
      modules/dlm/src/main/java/org/elasticsearch/dlm/DataLifecyclePlugin.java
  9. 205 0
      modules/dlm/src/main/java/org/elasticsearch/dlm/action/ExplainDataLifecycleAction.java
  10. 122 0
      modules/dlm/src/main/java/org/elasticsearch/dlm/action/TransportExplainDataLifecycleAction.java
  11. 55 0
      modules/dlm/src/main/java/org/elasticsearch/dlm/rest/RestExplainDataLifecycleAction.java
  12. 218 0
      modules/dlm/src/test/java/org/elasticsearch/dlm/action/ExplainDataLifecycleResponseTests.java
  13. 40 0
      modules/dlm/src/yamlRestTest/java/org/elasticsearch/dlm/DlmRestIT.java
  14. 53 0
      modules/dlm/src/yamlRestTest/resources/rest-api-spec/test/dlm/10_explain_lifecycle.yml
  15. 37 0
      rest-api-spec/src/main/resources/rest-api-spec/api/dlm.explain_lifecycle.json
  16. 2 0
      server/src/main/java/module-info.java
  17. 228 0
      server/src/main/java/org/elasticsearch/action/dlm/ExplainIndexDataLifecycle.java
  18. 144 0
      server/src/test/java/org/elasticsearch/action/dlm/ExplainIndexDataLifecycleTests.java
  19. 1 0
      x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java

+ 5 - 0
docs/changelog/94621.yaml

@@ -0,0 +1,5 @@
+pr: 94621
+summary: Introduce a _lifecycle/explain API for data stream backing indices
+area: DLM
+type: feature
+issues: []

+ 13 - 0
docs/reference/dlm/apis/dlm-api.asciidoc

@@ -0,0 +1,13 @@
+[[data-lifecycle-management-api]]
+== Data Lifecycle Management APIs
+
+You use the following APIs to configure the data lifecycle management for data streams
+and to retrieve lifecycle information for backing indices.
+
+[discrete]
+[[dlm-api-management-endpoint]]
+=== Operation management APIs
+
+* <<dlm-explain-lifecycle,Explain Lifecycle API>>
+
+include::explain-data-lifecycle.asciidoc[]

+ 111 - 0
docs/reference/dlm/apis/explain-data-lifecycle.asciidoc

@@ -0,0 +1,111 @@
+[[dlm-explain-lifecycle]]
+=== Explain Lifecycle API
+++++
+<titleabbrev>Explain Data Lifecycle</titleabbrev>
+++++
+
+experimental::[]
+
+Retrieves the current data lifecycle status for one or more data stream backing indices.
+
+[[dlm-explain-lifecycle-request]]
+==== {api-request-title}
+
+`GET <target>/_lifecycle/explain`
+
+[[dlm-explain-lifecycle-desc]]
+==== {api-description-title}
+
+Retrieves information about the index's current DLM lifecycle state, such as
+time since index creation, time since rollover, the lifecycle configuration
+managing the index, or any error that {es} might've encountered during the lifecycle
+execution.
+
+[[dlm-explain-lifecycle-path-params]]
+==== {api-path-parms-title}
+
+`<target>`::
+(Required, string) Comma-separated list of indices.
+
+[[dlm-explain-lifecycle-query-params]]
+==== {api-query-parms-title}
+
+`include_defaults`::
+  (Optional, Boolean) Includes default configurations related to the lifecycle of the target index.
+  Defaults to `false`.
+
+include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
+
+[[dlm-explain-lifecycle-example]]
+==== {api-examples-title}
+
+The following example retrieves the lifecycle state of the index `.ds-metrics-2023.03.22-000001`:
+
+[source,console]
+--------------------------------------------------
+GET .ds-metrics-2023.03.22-000001/_lifecycle/explain
+--------------------------------------------------
+// TEST[skip:we're not setting up DLM in these tests]
+
+If the index is managed by DLM `explain` will show the `managed_by_dlm` field
+set to `true` and the rest of the response will contain information about the
+lifecycle execution status for this index:
+
+[source,console-result]
+--------------------------------------------------
+{
+  "indices": {
+    ".ds-metrics-2023.03.22-000001": {
+      "index" : ".ds-metrics-2023.03.22-000001",
+      "managed_by_dlm" : true,                        <1>
+      "index_creation_date_millis" : 1679475563571,   <2>
+      "time_since_index_creation" : "843ms",          <3>
+      "rollover_date_millis" : 1679475564293,         <4>
+      "time_since_rollover" : "121ms",                <5>
+      "lifecycle" : { },                              <6>
+      "generation_time" : "121ms"                     <7>
+  }
+}
+--------------------------------------------------
+// TESTRESPONSE[skip:the result is for illustrating purposes only]
+
+<1> Shows if the index is being managed by DLM. If the index is not managed by
+DLM the other fields will not be shown
+<2> When the index was created, this timestamp is used to determine when to
+rollover
+<3> The time since the index creation (used for calculating when to rollover
+the index via the `max_age`)
+<4> When the index was rolled over. If the index was not rolled over this will not be
+shown.
+<5> The time since rollover. If the index was not rolled over this will not be shown.
+<6> The lifecycle configuration that applies to this index (which is configured on the parent
+data stream)
+<7> The generation time of the index represents the time since the index started progressing
+towards the user configurable / business specific parts of the lifecycle (e.g. retention).
+Every index will have to wait for it to be rolled over before being able to progress to the
+business-specific part of the lifecycle (i.e. the index advances in its lifecycle after it
+stops being the write index of a data stream). If the index has not been rolled over the
+`generation_time` will not be reported.
+
+The `explain` will also report any errors related to the lifecycle execution for the target
+index:
+
+[source,console-result]
+--------------------------------------------------
+{
+  "indices": {
+    ".ds-metrics-2023.03.22-000001": {
+      "index" : ".ds-metrics-2023.03.22-000001",
+      "managed_by_dlm" : true,
+      "index_creation_date_millis" : 1679475563571,
+      "time_since_index_creation" : "843ms",
+      "lifecycle" : { },
+      "error": "{\"type\":\"validation_exception\",\"reason\":\"Validation Failed: 1: this action would add [2] shards, but this cluster
+currently has [4]/[3] maximum normal shards open;\"}"        <1>
+  }
+}
+--------------------------------------------------
+// TESTRESPONSE[skip:the result is for illustrating purposes only]
+
+<1> The target index could not be rolled over due to a limitation in the number of shards
+allowed in the cluster.

+ 3 - 0
docs/reference/rest-api/index.asciidoc

@@ -19,6 +19,7 @@ not be included yet.
 * <<features-apis,Features APIs>>
 * <<ccr-apis,{ccr-cap} APIs>>
 * <<data-stream-apis,Data stream APIs>>
+* <<data-lifecycle-management-api, Data Lifecycle Management APIs>>
 * <<docs, Document APIs>>
 * <<enrich-apis,Enrich APIs>>
 * <<eql-apis,EQL search APIs>>
@@ -93,3 +94,5 @@ include::{es-repo-dir}/transform/apis/index.asciidoc[]
 include::usage.asciidoc[]
 include::{xes-repo-dir}/rest-api/watcher.asciidoc[]
 include::defs.asciidoc[]
+include::{es-repo-dir}/dlm/apis/dlm-api.asciidoc[]
+

+ 24 - 0
modules/dlm/build.gradle

@@ -1,7 +1,16 @@
+import org.elasticsearch.gradle.Version
 import org.elasticsearch.gradle.internal.info.BuildParams
 
 apply plugin: 'elasticsearch.internal-es-plugin'
 apply plugin: 'elasticsearch.internal-cluster-test'
+apply plugin: 'elasticsearch.legacy-yaml-rest-test'
+apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test'
+
+restResources {
+  restApi {
+    include  '_common', 'indices', 'index', 'cluster', 'data_stream', 'dlm'
+  }
+}
 
 esplugin {
   name 'dlm'
@@ -15,6 +24,18 @@ dependencies {
 
 addQaCheckDependencies(project)
 
+testClusters.configureEach {
+  module ':modules:reindex'
+  testDistribution = 'DEFAULT'
+  // disable ILM history, since it disturbs tests using _all
+  setting 'indices.lifecycle.history_index_enabled', 'false'
+  setting 'xpack.security.enabled', 'true'
+  setting 'indices.dlm.poll_interval', '1000ms'
+  keystore 'bootstrap.password', 'x-pack-test-password'
+  user username: "x_pack_rest_user", password: "x-pack-test-password"
+  requiresFeature 'es.dlm_feature_flag_enabled', Version.fromString("8.8.0")
+}
+
 if (BuildParams.isSnapshotBuild() == false) {
   tasks.named("test").configure {
     systemProperty 'es.dlm_feature_flag_enabled', 'true'
@@ -22,4 +43,7 @@ if (BuildParams.isSnapshotBuild() == false) {
   tasks.named("internalClusterTest").configure {
     systemProperty 'es.dlm_feature_flag_enabled', 'true'
   }
+  tasks.named("yamlRestTest").configure {
+    systemProperty 'es.dlm_feature_flag_enabled', 'true'
+  }
 }

+ 306 - 0
modules/dlm/src/internalClusterTest/java/org/elasticsearch/dlm/ExplainDataLifecycleIT.java

@@ -0,0 +1,306 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+package org.elasticsearch.dlm;
+
+import org.elasticsearch.action.DocWriteRequest;
+import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
+import org.elasticsearch.action.admin.indices.rollover.Condition;
+import org.elasticsearch.action.admin.indices.rollover.RolloverConditions;
+import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction;
+import org.elasticsearch.action.bulk.BulkItemResponse;
+import org.elasticsearch.action.bulk.BulkRequest;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.datastreams.CreateDataStreamAction;
+import org.elasticsearch.action.datastreams.GetDataStreamAction;
+import org.elasticsearch.action.dlm.ExplainIndexDataLifecycle;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.cluster.metadata.ComposableIndexTemplate;
+import org.elasticsearch.cluster.metadata.DataLifecycle;
+import org.elasticsearch.cluster.metadata.DataStream;
+import org.elasticsearch.cluster.metadata.Template;
+import org.elasticsearch.common.compress.CompressedXContent;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.core.Nullable;
+import org.elasticsearch.datastreams.DataStreamsPlugin;
+import org.elasticsearch.dlm.action.ExplainDataLifecycleAction;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.mapper.DateFieldMapper;
+import org.elasticsearch.plugins.Plugin;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.test.ESIntegTestCase;
+import org.elasticsearch.test.transport.MockTransportService;
+import org.elasticsearch.xcontent.XContentType;
+import org.junit.After;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+
+import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.backingIndexEqualTo;
+import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.DEFAULT_TIMESTAMP_FIELD;
+import static org.elasticsearch.indices.ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.nullValue;
+import static org.hamcrest.Matchers.startsWith;
+
+public class ExplainDataLifecycleIT extends ESIntegTestCase {
+
+    @Override
+    protected Collection<Class<? extends Plugin>> nodePlugins() {
+        return List.of(DataLifecyclePlugin.class, DataStreamsPlugin.class, MockTransportService.TestPlugin.class);
+    }
+
+    protected boolean ignoreExternalCluster() {
+        return true;
+    }
+
+    @Override
+    protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
+        Settings.Builder settings = Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings));
+        settings.put(DataLifecycleService.DLM_POLL_INTERVAL, "1s");
+        settings.put(DataLifecycle.CLUSTER_DLM_DEFAULT_ROLLOVER_SETTING.getKey(), "min_docs=1,max_docs=1");
+        return settings.build();
+    }
+
+    @After
+    public void cleanup() {
+        // we change SETTING_CLUSTER_MAX_SHARDS_PER_NODE in a test so let's make sure we clean it up even when the test fails
+        updateClusterSettings(Settings.builder().putNull("*"));
+    }
+
+    public void testExplainLifecycle() throws Exception {
+        // empty lifecycle contains the default rollover
+        DataLifecycle lifecycle = new DataLifecycle();
+
+        putComposableIndexTemplate("id1", null, List.of("metrics-foo*"), null, null, lifecycle);
+        String dataStreamName = "metrics-foo";
+        CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName);
+        client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get();
+
+        indexDocs(dataStreamName, 1);
+
+        assertBusy(() -> {
+            GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName });
+            GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest)
+                .actionGet();
+            assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1));
+            assertThat(getDataStreamResponse.getDataStreams().get(0).getDataStream().getName(), equalTo(dataStreamName));
+            List<Index> backingIndices = getDataStreamResponse.getDataStreams().get(0).getDataStream().getIndices();
+            assertThat(backingIndices.size(), equalTo(2));
+            String backingIndex = backingIndices.get(0).getName();
+            assertThat(backingIndex, backingIndexEqualTo(dataStreamName, 1));
+            String writeIndex = backingIndices.get(1).getName();
+            assertThat(writeIndex, backingIndexEqualTo(dataStreamName, 2));
+        });
+
+        {
+            ExplainDataLifecycleAction.Request explainIndicesRequest = new ExplainDataLifecycleAction.Request(
+                new String[] {
+                    DataStream.getDefaultBackingIndexName(dataStreamName, 1),
+                    DataStream.getDefaultBackingIndexName(dataStreamName, 2) }
+            );
+            ExplainDataLifecycleAction.Response response = client().execute(ExplainDataLifecycleAction.INSTANCE, explainIndicesRequest)
+                .actionGet();
+            assertThat(response.getIndices().size(), is(2));
+            // we requested the explain for indices with the default include_details=false
+            assertThat(response.getRolloverConditions(), nullValue());
+            for (ExplainIndexDataLifecycle explainIndex : response.getIndices()) {
+                assertThat(explainIndex.isManagedByDLM(), is(true));
+                assertThat(explainIndex.getIndexCreationDate(), notNullValue());
+                assertThat(explainIndex.getLifecycle(), notNullValue());
+                assertThat(explainIndex.getLifecycle().getDataRetention(), nullValue());
+                assertThat(explainIndex.getError(), nullValue());
+
+                if (explainIndex.getIndex().equals(DataStream.getDefaultBackingIndexName(dataStreamName, 1))) {
+                    // first generation index was rolled over
+                    assertThat(explainIndex.getIndex(), is(DataStream.getDefaultBackingIndexName(dataStreamName, 1)));
+                    assertThat(explainIndex.getRolloverDate(), notNullValue());
+                    assertThat(explainIndex.getTimeSinceRollover(System::currentTimeMillis), notNullValue());
+                    assertThat(explainIndex.getGenerationTime(System::currentTimeMillis), notNullValue());
+                } else {
+                    // the write index has not been rolled over yet
+                    assertThat(explainIndex.getGenerationTime(System::currentTimeMillis), nullValue());
+                    assertThat(explainIndex.getIndex(), is(DataStream.getDefaultBackingIndexName(dataStreamName, 2)));
+                    assertThat(explainIndex.getRolloverDate(), nullValue());
+                    assertThat(explainIndex.getTimeSinceRollover(System::currentTimeMillis), nullValue());
+                }
+            }
+        }
+
+        {
+            // let's also explain with include_defaults=true
+            ExplainDataLifecycleAction.Request explainIndicesRequest = new ExplainDataLifecycleAction.Request(
+                new String[] {
+                    DataStream.getDefaultBackingIndexName(dataStreamName, 1),
+                    DataStream.getDefaultBackingIndexName(dataStreamName, 2) },
+                true
+            );
+            ExplainDataLifecycleAction.Response response = client().execute(ExplainDataLifecycleAction.INSTANCE, explainIndicesRequest)
+                .actionGet();
+            assertThat(response.getIndices().size(), is(2));
+            RolloverConditions rolloverConditions = response.getRolloverConditions();
+            assertThat(rolloverConditions, notNullValue());
+            Map<String, Condition<?>> conditions = rolloverConditions.getConditions();
+            assertThat(conditions.size(), is(2));
+            assertThat(conditions.get(RolloverConditions.MAX_DOCS_FIELD.getPreferredName()).value(), is(1L));
+            assertThat(conditions.get(RolloverConditions.MIN_DOCS_FIELD.getPreferredName()).value(), is(1L));
+        }
+    }
+
+    public void testExplainLifecycleForIndicesWithErrors() throws Exception {
+        // empty lifecycle contains the default rollover
+        DataLifecycle lifecycle = new DataLifecycle();
+
+        putComposableIndexTemplate("id1", null, List.of("metrics-foo*"), null, null, lifecycle);
+
+        String dataStreamName = "metrics-foo";
+        CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName);
+        client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get();
+
+        indexDocs(dataStreamName, 1);
+
+        // let's allow one rollover to go through
+        assertBusy(() -> {
+            GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName });
+            GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest)
+                .actionGet();
+            assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1));
+            assertThat(getDataStreamResponse.getDataStreams().get(0).getDataStream().getName(), equalTo(dataStreamName));
+            List<Index> backingIndices = getDataStreamResponse.getDataStreams().get(0).getDataStream().getIndices();
+            assertThat(backingIndices.size(), equalTo(2));
+            String backingIndex = backingIndices.get(0).getName();
+            assertThat(backingIndex, backingIndexEqualTo(dataStreamName, 1));
+            String writeIndex = backingIndices.get(1).getName();
+            assertThat(writeIndex, backingIndexEqualTo(dataStreamName, 2));
+        });
+
+        // prevent new indices from being created (ie. future rollovers)
+        updateClusterSettings(Settings.builder().put(SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), 1));
+
+        indexDocs(dataStreamName, 1);
+
+        String writeIndexName = DataStream.getDefaultBackingIndexName(dataStreamName, 2);
+        assertBusy(() -> {
+            ExplainDataLifecycleAction.Request explainIndicesRequest = new ExplainDataLifecycleAction.Request(
+                new String[] { writeIndexName }
+            );
+            ExplainDataLifecycleAction.Response response = client().execute(ExplainDataLifecycleAction.INSTANCE, explainIndicesRequest)
+                .actionGet();
+            assertThat(response.getIndices().size(), is(1));
+            // we requested the explain for indices with the default include_details=false
+            assertThat(response.getRolloverConditions(), nullValue());
+            for (ExplainIndexDataLifecycle explainIndex : response.getIndices()) {
+                assertThat(explainIndex.getIndex(), is(writeIndexName));
+                assertThat(explainIndex.isManagedByDLM(), is(true));
+                assertThat(explainIndex.getIndexCreationDate(), notNullValue());
+                assertThat(explainIndex.getLifecycle(), notNullValue());
+                assertThat(explainIndex.getLifecycle().getDataRetention(), nullValue());
+                assertThat(explainIndex.getRolloverDate(), nullValue());
+                assertThat(explainIndex.getTimeSinceRollover(System::currentTimeMillis), nullValue());
+                // index has not been rolled over yet
+                assertThat(explainIndex.getGenerationTime(System::currentTimeMillis), nullValue());
+
+                assertThat(explainIndex.getError(), containsString("maximum normal shards open"));
+            }
+        });
+
+        // let's reset the cluster max shards per node limit to allow rollover to proceed and check the reported error is null
+        updateClusterSettings(Settings.builder().putNull("*"));
+
+        assertBusy(() -> {
+            ExplainDataLifecycleAction.Request explainIndicesRequest = new ExplainDataLifecycleAction.Request(
+                new String[] { writeIndexName }
+            );
+            ExplainDataLifecycleAction.Response response = client().execute(ExplainDataLifecycleAction.INSTANCE, explainIndicesRequest)
+                .actionGet();
+            assertThat(response.getIndices().size(), is(1));
+            assertThat(response.getIndices().get(0).getError(), is(nullValue()));
+        });
+    }
+
+    public void testExplainDLMForUnmanagedIndices() throws Exception {
+        String dataStreamName = "metrics-foo";
+        putComposableIndexTemplate("id1", null, List.of("metrics-foo*"), null, null, null);
+        CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request("metrics-foo");
+        client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get();
+
+        indexDocs(dataStreamName, 4);
+
+        String writeIndexName = DataStream.getDefaultBackingIndexName(dataStreamName, 1);
+        assertBusy(() -> {
+            ExplainDataLifecycleAction.Request explainIndicesRequest = new ExplainDataLifecycleAction.Request(
+                new String[] { writeIndexName }
+            );
+            ExplainDataLifecycleAction.Response response = client().execute(ExplainDataLifecycleAction.INSTANCE, explainIndicesRequest)
+                .actionGet();
+            assertThat(response.getIndices().size(), is(1));
+            assertThat(response.getRolloverConditions(), nullValue());
+            for (ExplainIndexDataLifecycle explainIndex : response.getIndices()) {
+                assertThat(explainIndex.isManagedByDLM(), is(false));
+                assertThat(explainIndex.getIndex(), is(writeIndexName));
+                assertThat(explainIndex.getIndexCreationDate(), nullValue());
+                assertThat(explainIndex.getLifecycle(), nullValue());
+                assertThat(explainIndex.getGenerationTime(System::currentTimeMillis), nullValue());
+                assertThat(explainIndex.getRolloverDate(), nullValue());
+                assertThat(explainIndex.getTimeSinceRollover(System::currentTimeMillis), nullValue());
+
+                assertThat(explainIndex.getError(), nullValue());
+            }
+        });
+    }
+
+    static void indexDocs(String dataStream, int numDocs) {
+        BulkRequest bulkRequest = new BulkRequest();
+        for (int i = 0; i < numDocs; i++) {
+            String value = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(System.currentTimeMillis());
+            bulkRequest.add(
+                new IndexRequest(dataStream).opType(DocWriteRequest.OpType.CREATE)
+                    .source(String.format(Locale.ROOT, "{\"%s\":\"%s\"}", DEFAULT_TIMESTAMP_FIELD, value), XContentType.JSON)
+            );
+        }
+        BulkResponse bulkResponse = client().bulk(bulkRequest).actionGet();
+        assertThat(bulkResponse.getItems().length, equalTo(numDocs));
+        String backingIndexPrefix = DataStream.BACKING_INDEX_PREFIX + dataStream;
+        for (BulkItemResponse itemResponse : bulkResponse) {
+            assertThat(itemResponse.getFailureMessage(), nullValue());
+            assertThat(itemResponse.status(), equalTo(RestStatus.CREATED));
+            assertThat(itemResponse.getIndex(), startsWith(backingIndexPrefix));
+        }
+        client().admin().indices().refresh(new RefreshRequest(dataStream)).actionGet();
+    }
+
+    static void putComposableIndexTemplate(
+        String id,
+        @Nullable String mappings,
+        List<String> patterns,
+        @Nullable Settings settings,
+        @Nullable Map<String, Object> metadata,
+        @Nullable DataLifecycle lifecycle
+    ) throws IOException {
+        PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request(id);
+        request.indexTemplate(
+            new ComposableIndexTemplate(
+                patterns,
+                new Template(settings, mappings == null ? null : CompressedXContent.fromJSON(mappings), null, lifecycle),
+                null,
+                null,
+                null,
+                metadata,
+                new ComposableIndexTemplate.DataStreamTemplate(),
+                null
+            )
+        );
+        client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet();
+    }
+
+}

+ 3 - 0
modules/dlm/src/main/java/module-info.java

@@ -14,4 +14,7 @@ module org.elasticsearch.dlm {
     requires org.apache.logging.log4j;
 
     exports org.elasticsearch.dlm;
+    exports org.elasticsearch.dlm.action;
+    exports org.elasticsearch.dlm.rest;
+
 }

+ 47 - 2
modules/dlm/src/main/java/org/elasticsearch/dlm/DataLifecyclePlugin.java

@@ -10,21 +10,32 @@ package org.elasticsearch.dlm;
 
 import org.apache.lucene.util.SetOnce;
 import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.ActionResponse;
 import org.elasticsearch.client.internal.Client;
 import org.elasticsearch.client.internal.OriginSettingClient;
 import org.elasticsearch.cluster.metadata.DataLifecycle;
 import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
 import org.elasticsearch.cluster.routing.allocation.AllocationService;
 import org.elasticsearch.cluster.service.ClusterService;
 import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
+import org.elasticsearch.common.settings.ClusterSettings;
+import org.elasticsearch.common.settings.IndexScopedSettings;
 import org.elasticsearch.common.settings.Setting;
 import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsFilter;
 import org.elasticsearch.core.IOUtils;
+import org.elasticsearch.dlm.action.ExplainDataLifecycleAction;
+import org.elasticsearch.dlm.action.TransportExplainDataLifecycleAction;
+import org.elasticsearch.dlm.rest.RestExplainDataLifecycleAction;
 import org.elasticsearch.env.Environment;
 import org.elasticsearch.env.NodeEnvironment;
 import org.elasticsearch.plugins.ActionPlugin;
 import org.elasticsearch.plugins.Plugin;
 import org.elasticsearch.repositories.RepositoriesService;
+import org.elasticsearch.rest.RestController;
+import org.elasticsearch.rest.RestHandler;
 import org.elasticsearch.script.ScriptService;
 import org.elasticsearch.threadpool.ThreadPool;
 import org.elasticsearch.tracing.Tracer;
@@ -33,6 +44,7 @@ import org.elasticsearch.xcontent.NamedXContentRegistry;
 
 import java.io.IOException;
 import java.time.Clock;
+import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
 import java.util.function.Supplier;
@@ -45,6 +57,8 @@ import static org.elasticsearch.cluster.metadata.DataLifecycle.DLM_ORIGIN;
 public class DataLifecyclePlugin extends Plugin implements ActionPlugin {
 
     private final Settings settings;
+    private final SetOnce<DataLifecycleErrorStore> errorStoreInitialisationService = new SetOnce<>();
+
     private final SetOnce<DataLifecycleService> dataLifecycleInitialisationService = new SetOnce<>();
 
     public DataLifecyclePlugin(Settings settings) {
@@ -80,6 +94,7 @@ public class DataLifecyclePlugin extends Plugin implements ActionPlugin {
             return List.of();
         }
 
+        errorStoreInitialisationService.set(new DataLifecycleErrorStore());
         dataLifecycleInitialisationService.set(
             new DataLifecycleService(
                 settings,
@@ -88,11 +103,11 @@ public class DataLifecyclePlugin extends Plugin implements ActionPlugin {
                 getClock(),
                 threadPool,
                 threadPool::absoluteTimeInMillis,
-                new DataLifecycleErrorStore()
+                errorStoreInitialisationService.get()
             )
         );
         dataLifecycleInitialisationService.get().init();
-        return List.of(dataLifecycleInitialisationService.get());
+        return List.of(errorStoreInitialisationService.get(), dataLifecycleInitialisationService.get());
     }
 
     @Override
@@ -104,6 +119,36 @@ public class DataLifecyclePlugin extends Plugin implements ActionPlugin {
         return List.of(DataLifecycleService.DLM_POLL_INTERVAL_SETTING);
     }
 
+    @Override
+    public List<RestHandler> getRestHandlers(
+        Settings settings,
+        RestController restController,
+        ClusterSettings clusterSettings,
+        IndexScopedSettings indexScopedSettings,
+        SettingsFilter settingsFilter,
+        IndexNameExpressionResolver indexNameExpressionResolver,
+        Supplier<DiscoveryNodes> nodesInCluster
+    ) {
+        if (DataLifecycle.isEnabled() == false) {
+            return List.of();
+        }
+
+        List<RestHandler> handlers = new ArrayList<>();
+        handlers.add(new RestExplainDataLifecycleAction());
+        return handlers;
+    }
+
+    @Override
+    public List<ActionHandler<? extends ActionRequest, ? extends ActionResponse>> getActions() {
+        if (DataLifecycle.isEnabled() == false) {
+            return List.of();
+        }
+
+        List<ActionHandler<? extends ActionRequest, ? extends ActionResponse>> actions = new ArrayList<>();
+        actions.add(new ActionHandler<>(ExplainDataLifecycleAction.INSTANCE, TransportExplainDataLifecycleAction.class));
+        return actions;
+    }
+
     @Override
     public void close() throws IOException {
         try {

+ 205 - 0
modules/dlm/src/main/java/org/elasticsearch/dlm/action/ExplainDataLifecycleAction.java

@@ -0,0 +1,205 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.dlm.action;
+
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.action.ActionType;
+import org.elasticsearch.action.IndicesRequest;
+import org.elasticsearch.action.admin.indices.rollover.RolloverConditions;
+import org.elasticsearch.action.dlm.ExplainIndexDataLifecycle;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.master.MasterNodeReadRequest;
+import org.elasticsearch.common.collect.Iterators;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.ChunkedToXContentObject;
+import org.elasticsearch.core.Nullable;
+import org.elasticsearch.xcontent.ParseField;
+import org.elasticsearch.xcontent.ToXContent;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Objects;
+
+/**
+ * Action for explaining the DLM lifecycle status for one or more indices.
+ */
+public class ExplainDataLifecycleAction extends ActionType<ExplainDataLifecycleAction.Response> {
+    public static final ExplainDataLifecycleAction INSTANCE = new ExplainDataLifecycleAction();
+    public static final String NAME = "indices:admin/dlm/explain";
+
+    public ExplainDataLifecycleAction() {
+        super(NAME, Response::new);
+    }
+
+    /**
+     * Request explaining the DLM lifecycle for one or more indices.
+     */
+    public static class Request extends MasterNodeReadRequest<Request> implements IndicesRequest.Replaceable {
+        private String[] names;
+        private boolean includeDefaults;
+        private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen();
+
+        public Request(String[] names) {
+            this(names, false);
+        }
+
+        public Request(String[] names, boolean includeDefaults) {
+            this.names = names;
+            this.includeDefaults = includeDefaults;
+        }
+
+        public boolean includeDefaults() {
+            return includeDefaults;
+        }
+
+        @Override
+        public ActionRequestValidationException validate() {
+            return null;
+        }
+
+        public Request(StreamInput in) throws IOException {
+            super(in);
+            this.names = in.readOptionalStringArray();
+            this.indicesOptions = IndicesOptions.readIndicesOptions(in);
+            this.includeDefaults = in.readBoolean();
+        }
+
+        @Override
+        public void writeTo(StreamOutput out) throws IOException {
+            super.writeTo(out);
+            out.writeOptionalStringArray(names);
+            indicesOptions.writeIndicesOptions(out);
+            out.writeBoolean(includeDefaults);
+        }
+
+        @Override
+        public boolean equals(Object o) {
+            if (this == o) {
+                return true;
+            }
+            if (o == null || getClass() != o.getClass()) {
+                return false;
+            }
+            Request request = (Request) o;
+            return includeDefaults == request.includeDefaults
+                && Arrays.equals(names, request.names)
+                && Objects.equals(indicesOptions, request.indicesOptions);
+        }
+
+        @Override
+        public int hashCode() {
+            int result = Objects.hash(includeDefaults, indicesOptions);
+            result = 31 * result + Arrays.hashCode(names);
+            return result;
+        }
+
+        @Override
+        public String[] indices() {
+            return names;
+        }
+
+        @Override
+        public IndicesOptions indicesOptions() {
+            return indicesOptions;
+        }
+
+        @Override
+        public IndicesRequest indices(String... indices) {
+            this.names = indices;
+            return this;
+        }
+
+        public Request includeDefaults(boolean includeDefaults) {
+            this.includeDefaults = includeDefaults;
+            return this;
+        }
+
+        public Request indicesOptions(IndicesOptions indicesOptions) {
+            this.indicesOptions = indicesOptions;
+            return this;
+        }
+    }
+
+    /**
+     * Class representing the response for the explain DLM lifecycle action for one or more indices.
+     */
+    public static class Response extends ActionResponse implements ChunkedToXContentObject {
+        public static final ParseField INDICES_FIELD = new ParseField("indices");
+        private List<ExplainIndexDataLifecycle> indices;
+        @Nullable
+        private final RolloverConditions rolloverConditions;
+
+        public Response(List<ExplainIndexDataLifecycle> indices, @Nullable RolloverConditions rolloverConditions) {
+            this.indices = indices;
+            this.rolloverConditions = rolloverConditions;
+        }
+
+        public Response(StreamInput in) throws IOException {
+            super(in);
+            this.indices = in.readList(ExplainIndexDataLifecycle::new);
+            this.rolloverConditions = in.readOptionalWriteable(RolloverConditions::new);
+        }
+
+        public List<ExplainIndexDataLifecycle> getIndices() {
+            return indices;
+        }
+
+        public RolloverConditions getRolloverConditions() {
+            return rolloverConditions;
+        }
+
+        @Override
+        public void writeTo(StreamOutput out) throws IOException {
+            out.writeList(indices);
+            out.writeOptionalWriteable(rolloverConditions);
+        }
+
+        @Override
+        public boolean equals(Object o) {
+            if (this == o) {
+                return true;
+            }
+            if (o == null || getClass() != o.getClass()) {
+                return false;
+            }
+            Response response = (Response) o;
+            return Objects.equals(indices, response.indices) && Objects.equals(rolloverConditions, response.rolloverConditions);
+        }
+
+        @Override
+        public int hashCode() {
+            return Objects.hash(indices, rolloverConditions);
+        }
+
+        @Override
+        public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params outerParams) {
+            final Iterator<? extends ToXContent> indicesIterator = indices.stream()
+                .map(explainIndexDataLifecycle -> (ToXContent) (builder, params) -> {
+                    builder.field(explainIndexDataLifecycle.getIndex());
+                    explainIndexDataLifecycle.toXContent(builder, params, rolloverConditions);
+                    return builder;
+                })
+                .iterator();
+
+            return Iterators.concat(Iterators.single((builder, params) -> {
+                builder.startObject();
+                builder.startObject(INDICES_FIELD.getPreferredName());
+                return builder;
+            }), indicesIterator, Iterators.single((ToXContent) (builder, params) -> {
+                builder.endObject();
+                builder.endObject();
+                return builder;
+            }));
+        }
+    }
+}

+ 122 - 0
modules/dlm/src/main/java/org/elasticsearch/dlm/action/TransportExplainDataLifecycleAction.java

@@ -0,0 +1,122 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.dlm.action;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.indices.rollover.RolloverInfo;
+import org.elasticsearch.action.dlm.ExplainIndexDataLifecycle;
+import org.elasticsearch.action.support.ActionFilters;
+import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.metadata.DataLifecycle;
+import org.elasticsearch.cluster.metadata.DataStream;
+import org.elasticsearch.cluster.metadata.IndexAbstraction;
+import org.elasticsearch.cluster.metadata.IndexMetadata;
+import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
+import org.elasticsearch.cluster.metadata.Metadata;
+import org.elasticsearch.cluster.service.ClusterService;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.ClusterSettings;
+import org.elasticsearch.dlm.DataLifecycleErrorStore;
+import org.elasticsearch.tasks.Task;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Transport action handling the explain DLM lifecycle requests for one or more DLM managed indices.
+ */
+public class TransportExplainDataLifecycleAction extends TransportMasterNodeReadAction<
+    ExplainDataLifecycleAction.Request,
+    ExplainDataLifecycleAction.Response> {
+
+    private final DataLifecycleErrorStore errorStore;
+
+    @Inject
+    public TransportExplainDataLifecycleAction(
+        TransportService transportService,
+        ClusterService clusterService,
+        ThreadPool threadPool,
+        ActionFilters actionFilters,
+        IndexNameExpressionResolver indexNameExpressionResolver,
+        DataLifecycleErrorStore dataLifecycleServiceErrorStore
+    ) {
+        super(
+            ExplainDataLifecycleAction.NAME,
+            transportService,
+            clusterService,
+            threadPool,
+            actionFilters,
+            ExplainDataLifecycleAction.Request::new,
+            indexNameExpressionResolver,
+            ExplainDataLifecycleAction.Response::new,
+            ThreadPool.Names.MANAGEMENT
+        );
+        this.errorStore = dataLifecycleServiceErrorStore;
+    }
+
+    @Override
+    protected void masterOperation(
+        Task task,
+        ExplainDataLifecycleAction.Request request,
+        ClusterState state,
+        ActionListener<ExplainDataLifecycleAction.Response> listener
+    ) throws Exception {
+
+        String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request);
+        List<ExplainIndexDataLifecycle> explainIndices = new ArrayList<>(concreteIndices.length);
+        Metadata metadata = state.metadata();
+        for (String index : concreteIndices) {
+            IndexAbstraction indexAbstraction = metadata.getIndicesLookup().get(index);
+            if (indexAbstraction == null) {
+                continue;
+            }
+            IndexMetadata idxMetadata = metadata.index(index);
+            if (idxMetadata == null) {
+                continue;
+            }
+            DataStream parentDataStream = indexAbstraction.getParentDataStream();
+            if (parentDataStream == null || parentDataStream.isIndexManagedByDLM(idxMetadata.getIndex(), metadata::index) == false) {
+                explainIndices.add(new ExplainIndexDataLifecycle(index, false, null, null, null, null));
+                continue;
+            }
+
+            RolloverInfo rolloverInfo = idxMetadata.getRolloverInfos().get(parentDataStream.getName());
+            ExplainIndexDataLifecycle explainIndexDataLifecycle = new ExplainIndexDataLifecycle(
+                index,
+                true,
+                idxMetadata.getCreationDate(),
+                rolloverInfo == null ? null : rolloverInfo.getTime(),
+                parentDataStream.getLifecycle(),
+                errorStore.getError(index)
+            );
+            explainIndices.add(explainIndexDataLifecycle);
+        }
+
+        ClusterSettings clusterSettings = clusterService.getClusterSettings();
+        listener.onResponse(
+            new ExplainDataLifecycleAction.Response(
+                explainIndices,
+                request.includeDefaults() && DataLifecycle.isEnabled()
+                    ? clusterSettings.get(DataLifecycle.CLUSTER_DLM_DEFAULT_ROLLOVER_SETTING)
+                    : null
+            )
+        );
+    }
+
+    @Override
+    protected ClusterBlockException checkBlock(ExplainDataLifecycleAction.Request request, ClusterState state) {
+        return state.blocks()
+            .indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, request));
+    }
+}

+ 55 - 0
modules/dlm/src/main/java/org/elasticsearch/dlm/rest/RestExplainDataLifecycleAction.java

@@ -0,0 +1,55 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.dlm.rest;
+
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.internal.node.NodeClient;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.dlm.action.ExplainDataLifecycleAction;
+import org.elasticsearch.rest.BaseRestHandler;
+import org.elasticsearch.rest.RestRequest;
+import org.elasticsearch.rest.Scope;
+import org.elasticsearch.rest.ServerlessScope;
+import org.elasticsearch.rest.action.RestChunkedToXContentListener;
+
+import java.util.List;
+
+import static org.elasticsearch.rest.RestRequest.Method.POST;
+
+@ServerlessScope(Scope.PUBLIC)
+public class RestExplainDataLifecycleAction extends BaseRestHandler {
+
+    @Override
+    public String getName() {
+        return "dlm_explain_action";
+    }
+
+    @Override
+    public List<Route> routes() {
+        return List.of(new Route(POST, "/{index}/_lifecycle/explain"));
+    }
+
+    @Override
+    protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) {
+        String[] indices = Strings.splitStringByCommaToArray(restRequest.param("index"));
+        ExplainDataLifecycleAction.Request explainRequest = new ExplainDataLifecycleAction.Request(indices);
+        explainRequest.includeDefaults(restRequest.paramAsBoolean("include_defaults", false));
+        explainRequest.indicesOptions(IndicesOptions.fromRequest(restRequest, IndicesOptions.strictExpandOpen()));
+        String masterNodeTimeout = restRequest.param("master_timeout");
+        if (masterNodeTimeout != null) {
+            explainRequest.masterNodeTimeout(masterNodeTimeout);
+        }
+        return channel -> client.execute(ExplainDataLifecycleAction.INSTANCE, explainRequest, new RestChunkedToXContentListener<>(channel));
+    }
+
+    @Override
+    public boolean allowSystemIndexAccessByDefault() {
+        return true;
+    }
+}

+ 218 - 0
modules/dlm/src/test/java/org/elasticsearch/dlm/action/ExplainDataLifecycleResponseTests.java

@@ -0,0 +1,218 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.dlm.action;
+
+import org.elasticsearch.action.admin.indices.rollover.MaxPrimaryShardDocsCondition;
+import org.elasticsearch.action.admin.indices.rollover.MinPrimaryShardDocsCondition;
+import org.elasticsearch.action.admin.indices.rollover.RolloverConditions;
+import org.elasticsearch.action.dlm.ExplainIndexDataLifecycle;
+import org.elasticsearch.cluster.metadata.DataLifecycle;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
+import org.elasticsearch.common.io.stream.Writeable;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.core.Nullable;
+import org.elasticsearch.core.TimeValue;
+import org.elasticsearch.indices.IndicesModule;
+import org.elasticsearch.test.AbstractChunkedSerializingTestCase;
+import org.elasticsearch.test.AbstractWireSerializingTestCase;
+import org.elasticsearch.xcontent.NamedXContentRegistry;
+import org.elasticsearch.xcontent.XContentBuilder;
+import org.elasticsearch.xcontent.XContentFactory;
+import org.junit.Before;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.dlm.action.ExplainDataLifecycleAction.Response;
+import static org.elasticsearch.xcontent.ToXContent.EMPTY_PARAMS;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.nullValue;
+
+public class ExplainDataLifecycleResponseTests extends AbstractWireSerializingTestCase<Response> {
+
+    private NamedWriteableRegistry namedWriteableRegistry;
+    private NamedXContentRegistry xContentRegistry;
+
+    @Before
+    public void setupNamedWriteableRegistry() {
+        namedWriteableRegistry = new NamedWriteableRegistry(IndicesModule.getNamedWriteables());
+        xContentRegistry = new NamedXContentRegistry(IndicesModule.getNamedXContents());
+    }
+
+    @Override
+    protected NamedXContentRegistry xContentRegistry() {
+        return xContentRegistry;
+    }
+
+    @Override
+    protected NamedWriteableRegistry getNamedWriteableRegistry() {
+        return namedWriteableRegistry;
+    }
+
+    @SuppressWarnings("unchecked")
+    public void testToXContent() throws IOException {
+        long now = System.currentTimeMillis();
+        DataLifecycle lifecycle = new DataLifecycle();
+        ExplainIndexDataLifecycle explainIndex = createRandomIndexDLMExplanation(now, lifecycle);
+        explainIndex.setNowSupplier(() -> now);
+        {
+            Response response = new Response(List.of(explainIndex), null);
+
+            XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint();
+            response.toXContentChunked(EMPTY_PARAMS).forEachRemaining(xcontent -> {
+                try {
+                    xcontent.toXContent(builder, EMPTY_PARAMS);
+                } catch (IOException e) {
+                    logger.error(e.getMessage(), e);
+                    fail(e.getMessage());
+                }
+            });
+            Map<String, Object> xContentMap = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2();
+            Map<String, Object> indices = (Map<String, Object>) xContentMap.get("indices");
+            assertThat(indices.size(), is(1));
+            Map<String, Object> explainIndexMap = (Map<String, Object>) indices.get(explainIndex.getIndex());
+            assertThat(explainIndexMap.get("managed_by_dlm"), is(explainIndex.isManagedByDLM()));
+            if (explainIndex.isManagedByDLM()) {
+                assertThat(explainIndexMap.get("index_creation_date_millis"), is(explainIndex.getIndexCreationDate()));
+                assertThat(
+                    explainIndexMap.get("time_since_index_creation"),
+                    is(explainIndex.getTimeSinceIndexCreation(() -> now).toHumanReadableString(2))
+                );
+                if (explainIndex.getRolloverDate() != null) {
+                    assertThat(explainIndexMap.get("rollover_date_millis"), is(explainIndex.getRolloverDate()));
+                    assertThat(
+                        explainIndexMap.get("time_since_rollover"),
+                        is(explainIndex.getTimeSinceRollover(() -> now).toHumanReadableString(2))
+                    );
+                    assertThat(
+                        explainIndexMap.get("generation_time"),
+                        is(explainIndex.getGenerationTime(() -> now).toHumanReadableString(2))
+                    );
+                } else {
+                    assertThat(explainIndexMap.get("generation_time"), is(nullValue()));
+                }
+                assertThat(explainIndexMap.get("lifecycle"), is(new HashMap<>())); // empty lifecycle
+                assertThat(explainIndexMap.get("error"), is(explainIndex.getError()));
+            }
+        }
+
+        {
+            // let's add some rollover conditions (i.e. include defaults)
+            RolloverConditions rolloverConditions = new RolloverConditions(
+                Map.of(
+                    MaxPrimaryShardDocsCondition.NAME,
+                    new MaxPrimaryShardDocsCondition(9L),
+                    MinPrimaryShardDocsCondition.NAME,
+                    new MinPrimaryShardDocsCondition(4L)
+                )
+            );
+            Response response = new Response(List.of(explainIndex), rolloverConditions);
+
+            XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint();
+            response.toXContentChunked(EMPTY_PARAMS).forEachRemaining(xcontent -> {
+                try {
+                    xcontent.toXContent(builder, EMPTY_PARAMS);
+                } catch (IOException e) {
+                    logger.error(e.getMessage(), e);
+                    fail(e.getMessage());
+                }
+            });
+            Map<String, Object> xContentMap = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2();
+            Map<String, Object> indices = (Map<String, Object>) xContentMap.get("indices");
+            assertThat(indices.size(), is(1));
+            Map<String, Object> explainIndexMap = (Map<String, Object>) indices.get(explainIndex.getIndex());
+            assertThat(explainIndexMap.get("index"), is(explainIndex.getIndex()));
+            assertThat(explainIndexMap.get("managed_by_dlm"), is(explainIndex.isManagedByDLM()));
+            if (explainIndex.isManagedByDLM()) {
+                assertThat(explainIndexMap.get("index_creation_date_millis"), is(explainIndex.getIndexCreationDate()));
+                assertThat(
+                    explainIndexMap.get("time_since_index_creation"),
+                    is(explainIndex.getTimeSinceIndexCreation(() -> now).toHumanReadableString(2))
+                );
+                if (explainIndex.getRolloverDate() != null) {
+                    assertThat(explainIndexMap.get("rollover_date_millis"), is(explainIndex.getRolloverDate()));
+                    assertThat(
+                        explainIndexMap.get("time_since_rollover"),
+                        is(explainIndex.getTimeSinceRollover(() -> now).toHumanReadableString(2))
+                    );
+                    assertThat(
+                        explainIndexMap.get("generation_time"),
+                        is(explainIndex.getGenerationTime(() -> now).toHumanReadableString(2))
+                    );
+                } else {
+                    assertThat(explainIndexMap.get("generation_time"), is(nullValue()));
+                }
+                assertThat(explainIndexMap.get("error"), is(explainIndex.getError()));
+
+                Map<String, Object> lifecycleRollover = (Map<String, Object>) ((Map<String, Object>) explainIndexMap.get("lifecycle")).get(
+                    "rollover"
+                );
+                assertThat(lifecycleRollover.get("min_primary_shard_docs"), is(4));
+                assertThat(lifecycleRollover.get("max_primary_shard_docs"), is(9));
+            }
+        }
+    }
+
+    public void testChunkCount() {
+        long now = System.currentTimeMillis();
+        DataLifecycle lifecycle = new DataLifecycle();
+        Response response = new Response(
+            List.of(
+                createRandomIndexDLMExplanation(now, lifecycle),
+                createRandomIndexDLMExplanation(now, lifecycle),
+                createRandomIndexDLMExplanation(now, lifecycle)
+            ),
+            null
+        );
+
+        // 2 chunks are the opening and closing of the json object
+        // one chunk / index in response represent the other 3 chunks
+        AbstractChunkedSerializingTestCase.assertChunkCount(response, ignored -> 5);
+    }
+
+    private static ExplainIndexDataLifecycle createRandomIndexDLMExplanation(long now, @Nullable DataLifecycle lifecycle) {
+        return new ExplainIndexDataLifecycle(
+            randomAlphaOfLengthBetween(10, 30),
+            true,
+            now,
+            randomBoolean() ? now + TimeValue.timeValueDays(1).getMillis() : null,
+            lifecycle,
+            randomBoolean() ? new NullPointerException("bad times").getMessage() : null
+        );
+    }
+
+    @Override
+    protected Writeable.Reader<Response> instanceReader() {
+        return Response::new;
+    }
+
+    @Override
+    protected Response createTestInstance() {
+        return randomResponse();
+    }
+
+    @Override
+    protected Response mutateInstance(Response instance) throws IOException {
+        return randomResponse();
+    }
+
+    private Response randomResponse() {
+        return new Response(
+            List.of(createRandomIndexDLMExplanation(System.nanoTime(), randomBoolean() ? new DataLifecycle() : null)),
+            randomBoolean()
+                ? new RolloverConditions(
+                    Map.of(MaxPrimaryShardDocsCondition.NAME, new MaxPrimaryShardDocsCondition(randomLongBetween(1000, 199_999_000)))
+                )
+                : null
+        );
+    }
+}

+ 40 - 0
modules/dlm/src/yamlRestTest/java/org/elasticsearch/dlm/DlmRestIT.java

@@ -0,0 +1,40 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.dlm;
+
+import com.carrotsearch.randomizedtesting.annotations.Name;
+import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
+import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
+
+import org.apache.lucene.tests.util.TimeUnits;
+import org.elasticsearch.common.settings.SecureString;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.ThreadContext;
+import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
+import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;
+
+@TimeoutSuite(millis = 30 * TimeUnits.MINUTE) // as default timeout seems not enough on the jenkins VMs
+public class DlmRestIT extends ESClientYamlSuiteTestCase {
+
+    public DlmRestIT(@Name("yaml") ClientYamlTestCandidate testCandidate) {
+        super(testCandidate);
+    }
+
+    @ParametersFactory
+    public static Iterable<Object[]> parameters() throws Exception {
+        return createParameters();
+    }
+
+    private static final String BASIC_AUTH_VALUE = basicAuthHeaderValue("x_pack_rest_user", new SecureString("x-pack-test-password"));
+
+    @Override
+    protected Settings restClientSettings() {
+        return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", BASIC_AUTH_VALUE).build();
+    }
+}

+ 53 - 0
modules/dlm/src/yamlRestTest/resources/rest-api-spec/test/dlm/10_explain_lifecycle.yml

@@ -0,0 +1,53 @@
+---
+"Explain backing index lifecycle":
+  - skip:
+      version: " - 8.7.99"
+      reason: "Explain data lifecycle API was added in 8.8"
+      features: allowed_warnings
+  - do:
+      allowed_warnings:
+        - "index template [template-with-lifecycle] has index patterns [dlm-managed-data-stream] matching patterns from existing older templates [global] with patterns (global => [*]); this template [template-with-lifecycle] will take precedence during new index creation"
+      indices.put_index_template:
+        name: template-with-lifecycle
+        body:
+          index_patterns: [ dlm-managed-data-stream ]
+          template:
+            settings:
+              index.number_of_replicas: 0
+            lifecycle:
+              data_retention: "30d"
+          data_stream: { }
+  - do:
+      indices.create_data_stream:
+        name: dlm-managed-data-stream
+  - is_true: acknowledged
+
+  - do:
+      indices.get_data_stream:
+        name: "dlm-managed-data-stream"
+  - match: { data_streams.0.name: dlm-managed-data-stream }
+  - match: { data_streams.0.generation: 1 }
+  - length: { data_streams.0.indices: 1 }
+  - match: { data_streams.0.indices.0.index_name: '/\.ds-dlm-managed-data-stream-(\d{4}\.\d{2}\.\d{2}-)?000001/' }
+  - set:
+      data_streams.0.indices.0.index_name: backing_index
+
+  - do:
+      dlm.explain_lifecycle:
+        index: $backing_index
+  - match: { indices.$backing_index.managed_by_dlm: true }
+  - match: { indices.$backing_index.lifecycle.data_retention: '30d' }
+  - is_false: indices.$backing_index.lifecycle.rollover
+
+
+  - do:
+      dlm.explain_lifecycle:
+        index: $backing_index
+        include_defaults: true
+  - match: { indices.$backing_index.managed_by_dlm: true }
+  - match: { indices.$backing_index.lifecycle.data_retention: '30d' }
+  - is_true: indices.$backing_index.lifecycle.rollover
+
+
+
+

+ 37 - 0
rest-api-spec/src/main/resources/rest-api-spec/api/dlm.explain_lifecycle.json

@@ -0,0 +1,37 @@
+{
+  "dlm.explain_lifecycle": {
+    "documentation": {
+      "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/dlm-explain-lifecycle.html",
+      "description": "Retrieves information about the index's current DLM lifecycle, such as any potential encountered error, time since creation etc."
+    },
+    "stability": "experimental",
+    "visibility": "public",
+    "headers": {
+      "accept": [
+        "application/json"
+      ]
+    },
+    "url": {
+      "paths": [
+        {
+          "path": "/{index}/_lifecycle/explain",
+          "methods": [
+            "POST"
+          ],
+          "parts": {
+            "index": {
+              "type": "string",
+              "description": "The name of the index to explain"
+            }
+          }
+        }
+      ]
+    },
+    "params": {
+      "include_defaults": {
+        "type": "boolean",
+        "description": "indicates if the API should return the default values the system uses for the index's lifecycle"
+      }
+    }
+  }
+}

+ 2 - 0
server/src/main/java/module-info.java

@@ -364,6 +364,8 @@ module org.elasticsearch.server {
 
     opens org.elasticsearch.common.logging to org.apache.logging.log4j.core;
 
+    exports org.elasticsearch.action.dlm;
+
     provides java.util.spi.CalendarDataProvider with org.elasticsearch.common.time.IsoCalendarDataProvider;
     provides org.elasticsearch.xcontent.ErrorOnUnknown with org.elasticsearch.common.xcontent.SuggestingErrorOnUnknown;
     provides org.elasticsearch.xcontent.XContentBuilderExtension with org.elasticsearch.common.xcontent.XContentElasticsearchExtension;

+ 228 - 0
server/src/main/java/org/elasticsearch/action/dlm/ExplainIndexDataLifecycle.java

@@ -0,0 +1,228 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.action.dlm;
+
+import org.elasticsearch.action.admin.indices.rollover.RolloverConditions;
+import org.elasticsearch.cluster.metadata.DataLifecycle;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Writeable;
+import org.elasticsearch.core.Nullable;
+import org.elasticsearch.core.TimeValue;
+import org.elasticsearch.xcontent.ParseField;
+import org.elasticsearch.xcontent.ToXContentObject;
+import org.elasticsearch.xcontent.XContentBuilder;
+
+import java.io.IOException;
+import java.util.Objects;
+import java.util.function.Supplier;
+
+/**
+ * Encapsulates the information that describes an index from its DLM lifecycle perspective.
+ */
+public class ExplainIndexDataLifecycle implements Writeable, ToXContentObject {
+    private static final ParseField INDEX_FIELD = new ParseField("index");
+    private static final ParseField MANAGED_BY_DLM_FIELD = new ParseField("managed_by_dlm");
+    private static final ParseField INDEX_CREATION_DATE_MILLIS_FIELD = new ParseField("index_creation_date_millis");
+    private static final ParseField INDEX_CREATION_DATE_FIELD = new ParseField("index_creation_date");
+    private static final ParseField ROLLOVER_DATE_MILLIS_FIELD = new ParseField("rollover_date_millis");
+    private static final ParseField ROLLOVER_DATE_FIELD = new ParseField("rollover_date");
+    private static final ParseField TIME_SINCE_INDEX_CREATION_FIELD = new ParseField("time_since_index_creation");
+    private static final ParseField TIME_SINCE_ROLLOVER_FIELD = new ParseField("time_since_rollover");
+    private static final ParseField GENERATION_TIME = new ParseField("generation_time");
+    private static final ParseField LIFECYCLE_FIELD = new ParseField("lifecycle");
+    private static final ParseField ERROR_FIELD = new ParseField("error");
+
+    private final String index;
+    private final boolean managedByDLM;
+    @Nullable
+    private final Long indexCreationDate;
+    @Nullable
+    private final Long rolloverDate;
+    @Nullable
+    private final DataLifecycle lifecycle;
+    @Nullable
+    private final String error;
+    private Supplier<Long> nowSupplier = System::currentTimeMillis;
+
+    public ExplainIndexDataLifecycle(
+        String index,
+        boolean managedByDLM,
+        @Nullable Long indexCreationDate,
+        @Nullable Long rolloverDate,
+        @Nullable DataLifecycle lifecycle,
+        @Nullable String error
+    ) {
+        this.index = index;
+        this.managedByDLM = managedByDLM;
+        this.indexCreationDate = indexCreationDate;
+        this.rolloverDate = rolloverDate;
+        this.lifecycle = lifecycle;
+        this.error = error;
+    }
+
+    public ExplainIndexDataLifecycle(StreamInput in) throws IOException {
+        this.index = in.readString();
+        this.managedByDLM = in.readBoolean();
+        if (managedByDLM) {
+            this.indexCreationDate = in.readOptionalLong();
+            this.rolloverDate = in.readOptionalLong();
+            this.lifecycle = in.readOptionalWriteable(DataLifecycle::new);
+            this.error = in.readOptionalString();
+        } else {
+            this.indexCreationDate = null;
+            this.rolloverDate = null;
+            this.lifecycle = null;
+            this.error = null;
+        }
+    }
+
+    @Override
+    public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+        return toXContent(builder, params, null);
+    }
+
+    public XContentBuilder toXContent(XContentBuilder builder, Params params, @Nullable RolloverConditions rolloverConditions)
+        throws IOException {
+        builder.startObject();
+        builder.field(INDEX_FIELD.getPreferredName(), index);
+        builder.field(MANAGED_BY_DLM_FIELD.getPreferredName(), managedByDLM);
+        if (managedByDLM) {
+            if (indexCreationDate != null) {
+                builder.timeField(
+                    INDEX_CREATION_DATE_MILLIS_FIELD.getPreferredName(),
+                    INDEX_CREATION_DATE_FIELD.getPreferredName(),
+                    indexCreationDate
+                );
+                builder.field(
+                    TIME_SINCE_INDEX_CREATION_FIELD.getPreferredName(),
+                    getTimeSinceIndexCreation(nowSupplier).toHumanReadableString(2)
+                );
+            }
+            if (rolloverDate != null) {
+                builder.timeField(ROLLOVER_DATE_MILLIS_FIELD.getPreferredName(), ROLLOVER_DATE_FIELD.getPreferredName(), rolloverDate);
+                builder.field(TIME_SINCE_ROLLOVER_FIELD.getPreferredName(), getTimeSinceRollover(nowSupplier).toHumanReadableString(2));
+                // if the index has been rolled over we'll start reporting the generation time
+                builder.field(GENERATION_TIME.getPreferredName(), getGenerationTime(nowSupplier).toHumanReadableString(2));
+            }
+            if (this.lifecycle != null) {
+                builder.field(LIFECYCLE_FIELD.getPreferredName());
+                lifecycle.toXContent(builder, params, rolloverConditions);
+            }
+            if (this.error != null) {
+                builder.field(ERROR_FIELD.getPreferredName(), error);
+            }
+        }
+        builder.endObject();
+        return builder;
+    }
+
+    @Override
+    public void writeTo(StreamOutput out) throws IOException {
+        out.writeString(index);
+        out.writeBoolean(managedByDLM);
+        if (managedByDLM) {
+            out.writeOptionalLong(indexCreationDate);
+            out.writeOptionalLong(rolloverDate);
+            out.writeOptionalWriteable(lifecycle);
+            out.writeOptionalString(error);
+        }
+    }
+
+    /**
+     * Calculates the time since this index started progressing towards the remaining of its lifecycle past rollover.
+     * Every index will have to wait to be rolled over before progressing towards its retention part of its lifecycle.
+     * If the index has not been rolled over this will return null.
+     * In the future, this will also consider the origination date of the index (however, it'll again only be displayed
+     * after the index is rolled over).
+     */
+    @Nullable
+    public TimeValue getGenerationTime(Supplier<Long> now) {
+        if (rolloverDate == null) {
+            return null;
+        }
+        return TimeValue.timeValueMillis(Math.max(0L, now.get() - rolloverDate));
+    }
+
+    /**
+     * Calculates the time lapsed since the index was created.
+     * It can be null as we don't serialise the index creation field for un-managed indices.
+     */
+    @Nullable
+    public TimeValue getTimeSinceIndexCreation(Supplier<Long> now) {
+        if (indexCreationDate == null) {
+            // unmanaged index
+            return null;
+        }
+        return TimeValue.timeValueMillis(Math.max(0L, now.get() - indexCreationDate));
+    }
+
+    /**
+     * Calculates the time lapsed since the index was rolled over.
+     * It can be null if the index was not rolled over or for un-managed indecs as we don't serialise the rollover data field.
+     */
+    @Nullable
+    public TimeValue getTimeSinceRollover(Supplier<Long> now) {
+        if (rolloverDate == null) {
+            return null;
+        }
+        return TimeValue.timeValueMillis(Math.max(0L, now.get() - rolloverDate));
+    }
+
+    public String getIndex() {
+        return index;
+    }
+
+    public boolean isManagedByDLM() {
+        return managedByDLM;
+    }
+
+    public Long getIndexCreationDate() {
+        return indexCreationDate;
+    }
+
+    public Long getRolloverDate() {
+        return rolloverDate;
+    }
+
+    public DataLifecycle getLifecycle() {
+        return lifecycle;
+    }
+
+    public String getError() {
+        return error;
+    }
+
+    // public for testing purposes only
+    public void setNowSupplier(Supplier<Long> nowSupplier) {
+        this.nowSupplier = nowSupplier;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+        if (this == o) {
+            return true;
+        }
+        if (o == null || getClass() != o.getClass()) {
+            return false;
+        }
+        ExplainIndexDataLifecycle that = (ExplainIndexDataLifecycle) o;
+        return managedByDLM == that.managedByDLM
+            && Objects.equals(index, that.index)
+            && Objects.equals(indexCreationDate, that.indexCreationDate)
+            && Objects.equals(rolloverDate, that.rolloverDate)
+            && Objects.equals(lifecycle, that.lifecycle)
+            && Objects.equals(error, that.error);
+    }
+
+    @Override
+    public int hashCode() {
+        return Objects.hash(index, managedByDLM, indexCreationDate, rolloverDate, lifecycle, error);
+    }
+}

+ 144 - 0
server/src/test/java/org/elasticsearch/action/dlm/ExplainIndexDataLifecycleTests.java

@@ -0,0 +1,144 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.action.dlm;
+
+import org.elasticsearch.cluster.metadata.DataLifecycle;
+import org.elasticsearch.common.io.stream.Writeable;
+import org.elasticsearch.core.Nullable;
+import org.elasticsearch.core.TimeValue;
+import org.elasticsearch.test.AbstractWireSerializingTestCase;
+
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.nullValue;
+
+public class ExplainIndexDataLifecycleTests extends AbstractWireSerializingTestCase<ExplainIndexDataLifecycle> {
+
+    public void testGetAge() {
+        long now = System.currentTimeMillis();
+        {
+            ExplainIndexDataLifecycle randomIndexDLMExplanation = createManagedIndexDLMExplanation(now, new DataLifecycle());
+            if (randomIndexDLMExplanation.getRolloverDate() == null) {
+                // generation time is null for non-rolled indices
+                assertThat(randomIndexDLMExplanation.getGenerationTime(() -> now + 50L), is(nullValue()));
+            } else {
+                assertThat(
+                    randomIndexDLMExplanation.getGenerationTime(() -> randomIndexDLMExplanation.getRolloverDate() + 75L),
+                    is(TimeValue.timeValueMillis(75))
+                );
+            }
+        }
+        {
+            // null for unmanaged index
+            ExplainIndexDataLifecycle indexDataLifecycle = new ExplainIndexDataLifecycle("my-index", false, null, null, null, null);
+            assertThat(indexDataLifecycle.getGenerationTime(() -> now), is(nullValue()));
+        }
+
+        {
+            // should always be gte 0
+            ExplainIndexDataLifecycle indexDataLifecycle = new ExplainIndexDataLifecycle(
+                "my-index",
+                true,
+                now,
+                now + 80L, // rolled over in the future (clocks are funny that way)
+                new DataLifecycle(),
+                null
+            );
+            assertThat(indexDataLifecycle.getGenerationTime(() -> now), is(TimeValue.ZERO));
+        }
+    }
+
+    public void testGetTimeSinceIndexCreation() {
+        long now = System.currentTimeMillis();
+        {
+            ExplainIndexDataLifecycle randomIndexDLMExplanation = createManagedIndexDLMExplanation(now, new DataLifecycle());
+            assertThat(randomIndexDLMExplanation.getTimeSinceIndexCreation(() -> now + 75L), is(TimeValue.timeValueMillis(75)));
+        }
+        {
+            // null for unmanaged index
+            ExplainIndexDataLifecycle indexDataLifecycle = new ExplainIndexDataLifecycle("my-index", false, null, null, null, null);
+            assertThat(indexDataLifecycle.getTimeSinceIndexCreation(() -> now), is(nullValue()));
+        }
+
+        {
+            // should always be gte 0
+            ExplainIndexDataLifecycle indexDataLifecycle = new ExplainIndexDataLifecycle(
+                "my-index",
+                true,
+                now + 80L, // created in the future (clocks are funny that way)
+                null,
+                new DataLifecycle(),
+                null
+            );
+            assertThat(indexDataLifecycle.getTimeSinceIndexCreation(() -> now), is(TimeValue.ZERO));
+        }
+    }
+
+    public void testGetTimeSinceRollover() {
+        long now = System.currentTimeMillis();
+        {
+            ExplainIndexDataLifecycle randomIndexDLMExplanation = createManagedIndexDLMExplanation(now, new DataLifecycle());
+            if (randomIndexDLMExplanation.getRolloverDate() == null) {
+                // age calculated since creation date
+                assertThat(randomIndexDLMExplanation.getTimeSinceRollover(() -> now + 50L), is(nullValue()));
+            } else {
+                assertThat(
+                    randomIndexDLMExplanation.getTimeSinceRollover(() -> randomIndexDLMExplanation.getRolloverDate() + 75L),
+                    is(TimeValue.timeValueMillis(75))
+                );
+            }
+        }
+        {
+            // null for unmanaged index
+            ExplainIndexDataLifecycle indexDataLifecycle = new ExplainIndexDataLifecycle("my-index", false, null, null, null, null);
+            assertThat(indexDataLifecycle.getTimeSinceRollover(() -> now), is(nullValue()));
+        }
+
+        {
+            // should always be gte 0
+            ExplainIndexDataLifecycle indexDataLifecycle = new ExplainIndexDataLifecycle(
+                "my-index",
+                true,
+                now - 50L,
+                now + 100L, // rolled over in the future
+                new DataLifecycle(),
+                null
+            );
+            assertThat(indexDataLifecycle.getTimeSinceRollover(() -> now), is(TimeValue.ZERO));
+        }
+    }
+
+    @Override
+    protected Writeable.Reader<ExplainIndexDataLifecycle> instanceReader() {
+        return ExplainIndexDataLifecycle::new;
+    }
+
+    @Override
+    protected ExplainIndexDataLifecycle createTestInstance() {
+        return createManagedIndexDLMExplanation(System.nanoTime(), randomBoolean() ? new DataLifecycle() : null);
+    }
+
+    @Override
+    protected ExplainIndexDataLifecycle mutateInstance(ExplainIndexDataLifecycle instance) throws IOException {
+        return createManagedIndexDLMExplanation(System.nanoTime(), randomBoolean() ? new DataLifecycle() : null);
+    }
+
+    private static ExplainIndexDataLifecycle createManagedIndexDLMExplanation(long now, @Nullable DataLifecycle lifecycle) {
+        return new ExplainIndexDataLifecycle(
+            randomAlphaOfLengthBetween(10, 30),
+            true,
+            now,
+            randomBoolean() ? now + TimeValue.timeValueDays(1).getMillis() : null,
+            lifecycle,
+            randomBoolean() ? new NullPointerException("bad times").getMessage() : null
+        );
+    }
+
+}

+ 1 - 0
x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java

@@ -383,6 +383,7 @@ public class Constants {
         "indices:admin/data_stream/modify",
         "indices:admin/data_stream/promote",
         "indices:admin/delete",
+        "indices:admin/dlm/explain",
         "indices:admin/flush",
         "indices:admin/flush[s]",
         "indices:admin/forcemerge",