Selaa lähdekoodia

Trim stored fields for _id field in tsdb (#97409)

And in the fetch phase synthesize _id on the fly.

The _id is composed out of a hash of routing fields, tsid and timestamp. These are all properties that can be retrieved from doc values and used to generate the _id on the fly.
Martijn van Groningen 2 vuotta sitten
vanhempi
commit
0ba4e75a9c

+ 5 - 0
docs/changelog/97409.yaml

@@ -0,0 +1,5 @@
+pr: 97409
+summary: Trim stored fields for `_id` field in tsdb
+area: TSDB
+type: enhancement
+issues: []

+ 148 - 1
modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java

@@ -8,25 +8,36 @@
 package org.elasticsearch.datastreams;
 
 import org.elasticsearch.action.DocWriteRequest;
+import org.elasticsearch.action.admin.indices.diskusage.AnalyzeIndexDiskUsageAction;
+import org.elasticsearch.action.admin.indices.diskusage.AnalyzeIndexDiskUsageRequest;
+import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest;
 import org.elasticsearch.action.admin.indices.get.GetIndexRequest;
+import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
 import org.elasticsearch.action.admin.indices.rollover.RolloverRequest;
+import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequest;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest;
 import org.elasticsearch.action.admin.indices.template.put.PutComponentTemplateAction;
 import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction;
+import org.elasticsearch.action.bulk.BulkRequest;
+import org.elasticsearch.action.get.GetRequest;
 import org.elasticsearch.action.index.IndexRequest;
 import org.elasticsearch.action.search.SearchRequest;
 import org.elasticsearch.cluster.metadata.ComponentTemplate;
 import org.elasticsearch.cluster.metadata.ComposableIndexTemplate;
 import org.elasticsearch.cluster.metadata.Template;
+import org.elasticsearch.common.Strings;
 import org.elasticsearch.common.compress.CompressedXContent;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.time.DateFormatter;
 import org.elasticsearch.common.time.FormatNames;
+import org.elasticsearch.common.xcontent.XContentHelper;
 import org.elasticsearch.index.IndexSettings;
 import org.elasticsearch.index.query.RangeQueryBuilder;
 import org.elasticsearch.indices.InvalidIndexTemplateException;
 import org.elasticsearch.plugins.Plugin;
 import org.elasticsearch.search.builder.SearchSourceBuilder;
 import org.elasticsearch.test.ESSingleNodeTestCase;
+import org.elasticsearch.test.InternalSettingsPlugin;
 import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
 import org.elasticsearch.xcontent.XContentType;
 
@@ -36,8 +47,14 @@ import java.util.Collection;
 import java.util.List;
 import java.util.concurrent.CountDownLatch;
 
+import static org.elasticsearch.test.MapMatcher.assertMap;
+import static org.elasticsearch.test.MapMatcher.matchesMap;
 import static org.hamcrest.Matchers.containsString;
 import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+import static org.hamcrest.Matchers.hasSize;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.notNullValue;
 
 public class TSDBIndexingIT extends ESSingleNodeTestCase {
 
@@ -76,7 +93,7 @@ public class TSDBIndexingIT extends ESSingleNodeTestCase {
 
     @Override
     protected Collection<Class<? extends Plugin>> getPlugins() {
-        return List.of(DataStreamsPlugin.class);
+        return List.of(DataStreamsPlugin.class, InternalSettingsPlugin.class);
     }
 
     @Override
@@ -436,6 +453,136 @@ public class TSDBIndexingIT extends ESSingleNodeTestCase {
         }
     }
 
+    public void testTrimId() throws Exception {
+        String dataStreamName = "k8s";
+        var putTemplateRequest = new PutComposableIndexTemplateAction.Request("id");
+        putTemplateRequest.indexTemplate(
+            new ComposableIndexTemplate(
+                List.of(dataStreamName + "*"),
+                new Template(
+                    Settings.builder()
+                        .put("index.mode", "time_series")
+                        .put("index.number_of_replicas", 0)
+                        // Reduce sync interval to speedup this integraton test,
+                        // otherwise by default it will take 30 seconds before minimum retained seqno is updated:
+                        .put("index.soft_deletes.retention_lease.sync_interval", "100ms")
+                        .build(),
+                    new CompressedXContent(MAPPING_TEMPLATE),
+                    null
+                ),
+                null,
+                null,
+                null,
+                null,
+                new ComposableIndexTemplate.DataStreamTemplate(false, false),
+                null
+            )
+        );
+        client().execute(PutComposableIndexTemplateAction.INSTANCE, putTemplateRequest).actionGet();
+
+        // index some data
+        int numBulkRequests = 32;
+        int numDocsPerBulk = 256;
+        String indexName = null;
+        {
+            Instant time = Instant.now();
+            for (int i = 0; i < numBulkRequests; i++) {
+                BulkRequest bulkRequest = new BulkRequest(dataStreamName);
+                for (int j = 0; j < numDocsPerBulk; j++) {
+                    var indexRequest = new IndexRequest(dataStreamName).opType(DocWriteRequest.OpType.CREATE);
+                    indexRequest.source(DOC.replace("$time", formatInstant(time)), XContentType.JSON);
+                    bulkRequest.add(indexRequest);
+                    time = time.plusMillis(1);
+                }
+                var bulkResponse = client().bulk(bulkRequest).actionGet();
+                assertThat(bulkResponse.hasFailures(), is(false));
+                indexName = bulkResponse.getItems()[0].getIndex();
+            }
+            client().admin().indices().refresh(new RefreshRequest(dataStreamName)).actionGet();
+        }
+
+        // Check whether there are multiple segments:
+        var getSegmentsResponse = client().admin().indices().segments(new IndicesSegmentsRequest(dataStreamName)).actionGet();
+        assertThat(
+            getSegmentsResponse.getIndices().get(indexName).getShards().get(0).shards()[0].getSegments(),
+            hasSize(greaterThanOrEqualTo(2))
+        );
+
+        // Pre check whether _id stored field uses diskspace:
+        var diskUsageResponse = client().execute(
+            AnalyzeIndexDiskUsageAction.INSTANCE,
+            new AnalyzeIndexDiskUsageRequest(new String[] { dataStreamName }, AnalyzeIndexDiskUsageRequest.DEFAULT_INDICES_OPTIONS, true)
+        ).actionGet();
+        var map = XContentHelper.convertToMap(XContentType.JSON.xContent(), Strings.toString(diskUsageResponse), false);
+        assertMap(
+            map,
+            matchesMap().extraOk()
+                .entry(
+                    indexName,
+                    matchesMap().extraOk()
+                        .entry(
+                            "fields",
+                            matchesMap().extraOk()
+                                .entry("_id", matchesMap().extraOk().entry("stored_fields_in_bytes", greaterThanOrEqualTo(1)))
+                        )
+                )
+        );
+
+        // Check that the minimum retaining seqno has advanced, otherwise _id (and recovery source) doesn't get trimmed away.
+        var finalIndexName = indexName;
+        assertBusy(() -> {
+            var r = client().admin().indices().stats(new IndicesStatsRequest().indices(dataStreamName).all()).actionGet();
+            var retentionLeasesStats = r.getIndices().get(finalIndexName).getIndexShards().get(0).getShards()[0].getRetentionLeaseStats();
+            assertThat(retentionLeasesStats.retentionLeases().leases(), hasSize(1));
+            assertThat(
+                retentionLeasesStats.retentionLeases().leases().iterator().next().retainingSequenceNumber(),
+                equalTo((long) numBulkRequests * numDocsPerBulk)
+            );
+        });
+
+        // Force merge should trim the _id stored field away for all segments:
+        var forceMergeResponse = client().admin().indices().forceMerge(new ForceMergeRequest(dataStreamName).maxNumSegments(1)).actionGet();
+        assertThat(forceMergeResponse.getTotalShards(), equalTo(1));
+        assertThat(forceMergeResponse.getSuccessfulShards(), equalTo(1));
+        assertThat(forceMergeResponse.getFailedShards(), equalTo(0));
+
+        // Check whether we really end up with 1 segment:
+        getSegmentsResponse = client().admin().indices().segments(new IndicesSegmentsRequest(dataStreamName)).actionGet();
+        assertThat(getSegmentsResponse.getIndices().get(indexName).getShards().get(0).shards()[0].getSegments(), hasSize(1));
+
+        // Check the _id stored field uses no disk space:
+        diskUsageResponse = client().execute(
+            AnalyzeIndexDiskUsageAction.INSTANCE,
+            new AnalyzeIndexDiskUsageRequest(new String[] { dataStreamName }, AnalyzeIndexDiskUsageRequest.DEFAULT_INDICES_OPTIONS, true)
+        ).actionGet();
+        map = XContentHelper.convertToMap(XContentType.JSON.xContent(), Strings.toString(diskUsageResponse), false);
+        assertMap(
+            map,
+            matchesMap().extraOk()
+                .entry(
+                    indexName,
+                    matchesMap().extraOk()
+                        .entry(
+                            "fields",
+                            matchesMap().extraOk().entry("_id", matchesMap().extraOk().entry("stored_fields_in_bytes", equalTo(0)))
+                        )
+                )
+        );
+
+        // Check the search api can synthesize _id
+        var searchRequest = new SearchRequest(dataStreamName);
+        searchRequest.source().trackTotalHits(true);
+        var searchResponse = client().search(searchRequest).actionGet();
+        assertThat(searchResponse.getHits().getTotalHits().value, equalTo((long) numBulkRequests * numDocsPerBulk));
+        String id = searchResponse.getHits().getHits()[0].getId();
+        assertThat(id, notNullValue());
+
+        // Check that the _id is gettable:
+        var getResponse = client().get(new GetRequest(indexName).id(id)).actionGet();
+        assertThat(getResponse.isExists(), is(true));
+        assertThat(getResponse.getId(), equalTo(id));
+    }
+
     static String formatInstant(Instant instant) {
         return DateFormatter.forPattern(FormatNames.STRICT_DATE_OPTIONAL_TIME.getName()).format(instant);
     }

+ 1 - 0
server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java

@@ -2608,6 +2608,7 @@ public class InternalEngine extends Engine {
         iwc.setSoftDeletesField(Lucene.SOFT_DELETES_FIELD);
         mergePolicy = new RecoverySourcePruneMergePolicy(
             SourceFieldMapper.RECOVERY_SOURCE_NAME,
+            engineConfig.getIndexSettings().getMode() == IndexMode.TIME_SERIES,
             softDeletesPolicy::getRetentionQuery,
             new SoftDeletesRetentionMergePolicy(
                 Lucene.SOFT_DELETES_FIELD,

+ 44 - 12
server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java

@@ -31,6 +31,7 @@ import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.Weight;
 import org.apache.lucene.util.BitSet;
 import org.apache.lucene.util.BitSetIterator;
+import org.elasticsearch.index.mapper.IdFieldMapper;
 import org.elasticsearch.search.internal.FilterStoredFieldVisitor;
 
 import java.io.IOException;
@@ -39,18 +40,27 @@ import java.util.Objects;
 import java.util.function.Supplier;
 
 final class RecoverySourcePruneMergePolicy extends OneMergeWrappingMergePolicy {
-    RecoverySourcePruneMergePolicy(String recoverySourceField, Supplier<Query> retainSourceQuerySupplier, MergePolicy in) {
+    RecoverySourcePruneMergePolicy(
+        String recoverySourceField,
+        boolean pruneIdField,
+        Supplier<Query> retainSourceQuerySupplier,
+        MergePolicy in
+    ) {
         super(in, toWrap -> new OneMerge(toWrap.segments) {
             @Override
             public CodecReader wrapForMerge(CodecReader reader) throws IOException {
                 CodecReader wrapped = toWrap.wrapForMerge(reader);
-                return wrapReader(recoverySourceField, wrapped, retainSourceQuerySupplier);
+                return wrapReader(recoverySourceField, pruneIdField, wrapped, retainSourceQuerySupplier);
             }
         });
     }
 
-    private static CodecReader wrapReader(String recoverySourceField, CodecReader reader, Supplier<Query> retainSourceQuerySupplier)
-        throws IOException {
+    private static CodecReader wrapReader(
+        String recoverySourceField,
+        boolean pruneIdField,
+        CodecReader reader,
+        Supplier<Query> retainSourceQuerySupplier
+    ) throws IOException {
         NumericDocValues recoverySource = reader.getNumericDocValues(recoverySourceField);
         if (recoverySource == null || recoverySource.nextDoc() == DocIdSetIterator.NO_MORE_DOCS) {
             return reader; // early terminate - nothing to do here since non of the docs has a recovery source anymore.
@@ -66,20 +76,22 @@ final class RecoverySourcePruneMergePolicy extends OneMergeWrappingMergePolicy {
             if (recoverySourceToKeep.cardinality() == reader.maxDoc()) {
                 return reader; // keep all source
             }
-            return new SourcePruningFilterCodecReader(recoverySourceField, reader, recoverySourceToKeep);
+            return new SourcePruningFilterCodecReader(recoverySourceField, pruneIdField, reader, recoverySourceToKeep);
         } else {
-            return new SourcePruningFilterCodecReader(recoverySourceField, reader, null);
+            return new SourcePruningFilterCodecReader(recoverySourceField, pruneIdField, reader, null);
         }
     }
 
     private static class SourcePruningFilterCodecReader extends FilterCodecReader {
         private final BitSet recoverySourceToKeep;
         private final String recoverySourceField;
+        private final boolean pruneIdField;
 
-        SourcePruningFilterCodecReader(String recoverySourceField, CodecReader reader, BitSet recoverySourceToKeep) {
+        SourcePruningFilterCodecReader(String recoverySourceField, boolean pruneIdField, CodecReader reader, BitSet recoverySourceToKeep) {
             super(reader);
             this.recoverySourceField = recoverySourceField;
             this.recoverySourceToKeep = recoverySourceToKeep;
+            this.pruneIdField = pruneIdField;
         }
 
         @Override
@@ -125,7 +137,12 @@ final class RecoverySourcePruneMergePolicy extends OneMergeWrappingMergePolicy {
 
         @Override
         public StoredFieldsReader getFieldsReader() {
-            return new RecoverySourcePruningStoredFieldsReader(super.getFieldsReader(), recoverySourceToKeep, recoverySourceField);
+            return new RecoverySourcePruningStoredFieldsReader(
+                super.getFieldsReader(),
+                recoverySourceToKeep,
+                recoverySourceField,
+                pruneIdField
+            );
         }
 
         @Override
@@ -212,11 +229,18 @@ final class RecoverySourcePruneMergePolicy extends OneMergeWrappingMergePolicy {
 
             private final BitSet recoverySourceToKeep;
             private final String recoverySourceField;
-
-            RecoverySourcePruningStoredFieldsReader(StoredFieldsReader in, BitSet recoverySourceToKeep, String recoverySourceField) {
+            private final boolean pruneIdField;
+
+            RecoverySourcePruningStoredFieldsReader(
+                StoredFieldsReader in,
+                BitSet recoverySourceToKeep,
+                String recoverySourceField,
+                boolean pruneIdField
+            ) {
                 super(in);
                 this.recoverySourceToKeep = recoverySourceToKeep;
                 this.recoverySourceField = Objects.requireNonNull(recoverySourceField);
+                this.pruneIdField = pruneIdField;
             }
 
             @Override
@@ -230,6 +254,9 @@ final class RecoverySourcePruneMergePolicy extends OneMergeWrappingMergePolicy {
                             if (recoverySourceField.equals(fieldInfo.name)) {
                                 return Status.NO;
                             }
+                            if (pruneIdField && IdFieldMapper.NAME.equals(fieldInfo.name)) {
+                                return Status.NO;
+                            }
                             return super.needsField(fieldInfo);
                         }
                     });
@@ -238,12 +265,17 @@ final class RecoverySourcePruneMergePolicy extends OneMergeWrappingMergePolicy {
 
             @Override
             public StoredFieldsReader getMergeInstance() {
-                return new RecoverySourcePruningStoredFieldsReader(in.getMergeInstance(), recoverySourceToKeep, recoverySourceField);
+                return new RecoverySourcePruningStoredFieldsReader(
+                    in.getMergeInstance(),
+                    recoverySourceToKeep,
+                    recoverySourceField,
+                    pruneIdField
+                );
             }
 
             @Override
             public StoredFieldsReader clone() {
-                return new RecoverySourcePruningStoredFieldsReader(in.clone(), recoverySourceToKeep, recoverySourceField);
+                return new RecoverySourcePruningStoredFieldsReader(in.clone(), recoverySourceToKeep, recoverySourceField, pruneIdField);
             }
 
         }

+ 158 - 0
server/src/main/java/org/elasticsearch/index/mapper/IdLoader.java

@@ -0,0 +1,158 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.index.mapper;
+
+import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.SortedDocValues;
+import org.apache.lucene.index.SortedNumericDocValues;
+import org.apache.lucene.index.SortedSetDocValues;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.cluster.metadata.DataStream;
+import org.elasticsearch.cluster.routing.IndexRouting;
+import org.elasticsearch.index.fieldvisitor.LeafStoredFieldLoader;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * Responsible for loading the _id from stored fields or for TSDB synthesizing the _id from the routing, _tsid and @timestamp fields.
+ */
+public sealed interface IdLoader permits IdLoader.TsIdLoader, IdLoader.StoredIdLoader {
+
+    /**
+     * @return returns an {@link IdLoader} instance the loads the _id from stored field.
+     */
+    static IdLoader fromLeafStoredFieldLoader() {
+        return new StoredIdLoader();
+    }
+
+    /**
+     * @return returns an {@link IdLoader} instance that syn synthesizes _id from routing, _tsid and @timestamp fields.
+     */
+    static IdLoader createTsIdLoader(IndexRouting.ExtractFromSource indexRouting, List<String> routingPaths) {
+        return new TsIdLoader(indexRouting, routingPaths);
+    }
+
+    Leaf leaf(LeafStoredFieldLoader loader, LeafReader reader, int[] docIdsInLeaf) throws IOException;
+
+    /**
+     * Returns a leaf instance for a leaf reader that returns the _id for segment level doc ids.
+     */
+    sealed interface Leaf permits StoredLeaf, TsIdLeaf {
+
+        /**
+         * @param subDocId The segment level doc id for which the return the _id
+         * @return the _id for the provided subDocId
+         */
+        String getId(int subDocId);
+
+    }
+
+    final class TsIdLoader implements IdLoader {
+
+        private final IndexRouting.ExtractFromSource indexRouting;
+        private final List<String> routingPaths;
+
+        TsIdLoader(IndexRouting.ExtractFromSource indexRouting, List<String> routingPaths) {
+            this.routingPaths = routingPaths;
+            this.indexRouting = indexRouting;
+        }
+
+        public IdLoader.Leaf leaf(LeafStoredFieldLoader loader, LeafReader reader, int[] docIdsInLeaf) throws IOException {
+            IndexRouting.ExtractFromSource.Builder[] builders = new IndexRouting.ExtractFromSource.Builder[docIdsInLeaf.length];
+            for (int i = 0; i < builders.length; i++) {
+                builders[i] = indexRouting.builder();
+            }
+
+            for (String routingField : routingPaths) {
+                // Routing field must always be keyword fields, so it is ok to use SortedSetDocValues directly here.
+                SortedSetDocValues dv = DocValues.getSortedSet(reader, routingField);
+                for (int i = 0; i < docIdsInLeaf.length; i++) {
+                    int docId = docIdsInLeaf[i];
+                    var builder = builders[i];
+                    if (dv.advanceExact(docId)) {
+                        for (int j = 0; j < dv.docValueCount(); j++) {
+                            BytesRef routingValue = dv.lookupOrd(dv.nextOrd());
+                            builder.addMatching(routingField, routingValue);
+                        }
+                    }
+                }
+            }
+
+            String[] ids = new String[docIdsInLeaf.length];
+            // Each document always has exactly one tsid and one timestamp:
+            SortedDocValues tsIdDocValues = DocValues.getSorted(reader, TimeSeriesIdFieldMapper.NAME);
+            SortedNumericDocValues timestampDocValues = DocValues.getSortedNumeric(reader, DataStream.TIMESTAMP_FIELD_NAME);
+            for (int i = 0; i < docIdsInLeaf.length; i++) {
+                int docId = docIdsInLeaf[i];
+
+                boolean found = tsIdDocValues.advanceExact(docId);
+                assert found;
+                BytesRef tsid = tsIdDocValues.lookupOrd(tsIdDocValues.ordValue());
+                found = timestampDocValues.advanceExact(docId);
+                assert found;
+                assert timestampDocValues.docValueCount() == 1;
+                long timestamp = timestampDocValues.nextValue();
+
+                var routingBuilder = builders[i];
+                ids[i] = TsidExtractingIdFieldMapper.createId(false, routingBuilder, tsid, timestamp, new byte[16]);
+            }
+            return new TsIdLeaf(docIdsInLeaf, ids);
+        }
+    }
+
+    final class StoredIdLoader implements IdLoader {
+
+        public StoredIdLoader() {}
+
+        @Override
+        public Leaf leaf(LeafStoredFieldLoader loader, LeafReader reader, int[] docIdsInLeaf) throws IOException {
+            return new StoredLeaf(loader);
+        }
+    }
+
+    final class TsIdLeaf implements Leaf {
+
+        private final String[] ids;
+        private final int[] docIdsInLeaf;
+
+        private int idx = -1;
+
+        TsIdLeaf(int[] docIdsInLeaf, String[] ids) {
+            this.ids = ids;
+            this.docIdsInLeaf = docIdsInLeaf;
+        }
+
+        public String getId(int subDocId) {
+            idx++;
+            if (docIdsInLeaf[idx] != subDocId) {
+                throw new IllegalArgumentException(
+                    "expected to be called with [" + docIdsInLeaf[idx] + "] but was called with " + subDocId + " instead"
+                );
+            }
+            return ids[idx];
+        }
+    }
+
+    final class StoredLeaf implements Leaf {
+
+        private final LeafStoredFieldLoader loader;
+
+        StoredLeaf(LeafStoredFieldLoader loader) {
+            this.loader = loader;
+        }
+
+        @Override
+        public String getId(int subDocId) {
+            return loader.id();
+        }
+    }
+
+}

+ 13 - 0
server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java

@@ -17,14 +17,17 @@ import org.apache.lucene.search.Query;
 import org.apache.lucene.search.TotalHits;
 import org.elasticsearch.action.search.SearchShardTask;
 import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.cluster.routing.IndexRouting;
 import org.elasticsearch.common.lucene.search.Queries;
 import org.elasticsearch.core.Nullable;
 import org.elasticsearch.core.Releasable;
 import org.elasticsearch.core.TimeValue;
+import org.elasticsearch.index.IndexMode;
 import org.elasticsearch.index.IndexService;
 import org.elasticsearch.index.IndexSettings;
 import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
 import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.mapper.IdLoader;
 import org.elasticsearch.index.mapper.NestedLookup;
 import org.elasticsearch.index.mapper.SourceLoader;
 import org.elasticsearch.index.query.AbstractQueryBuilder;
@@ -815,4 +818,14 @@ final class DefaultSearchContext extends SearchContext {
     public SourceLoader newSourceLoader() {
         return searchExecutionContext.newSourceLoader(request.isForceSyntheticSource());
     }
+
+    @Override
+    public IdLoader newIdLoader() {
+        if (indexService.getIndexSettings().getMode() == IndexMode.TIME_SERIES) {
+            var indexRouting = (IndexRouting.ExtractFromSource) indexService.getIndexSettings().getIndexRouting();
+            return IdLoader.createTsIdLoader(indexRouting, indexService.getMetadata().getRoutingPaths());
+        } else {
+            return IdLoader.fromLeafStoredFieldLoader();
+        }
+    }
 }

+ 22 - 6
server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java

@@ -14,6 +14,7 @@ import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.search.TotalHits;
 import org.elasticsearch.index.fieldvisitor.LeafStoredFieldLoader;
 import org.elasticsearch.index.fieldvisitor.StoredFieldLoader;
+import org.elasticsearch.index.mapper.IdLoader;
 import org.elasticsearch.index.mapper.SourceLoader;
 import org.elasticsearch.search.LeafNestedDocuments;
 import org.elasticsearch.search.NestedDocuments;
@@ -110,6 +111,7 @@ public class FetchPhase {
         storedFieldsSpec = storedFieldsSpec.merge(new StoredFieldsSpec(false, false, sourceLoader.requiredStoredFields()));
 
         StoredFieldLoader storedFieldLoader = profiler.storedFields(StoredFieldLoader.fromSpec(storedFieldsSpec));
+        IdLoader idLoader = context.newIdLoader();
         boolean requiresSource = storedFieldsSpec.requiresSource();
 
         NestedDocuments nestedDocuments = context.getSearchExecutionContext().getNestedDocuments();
@@ -120,6 +122,7 @@ public class FetchPhase {
             LeafNestedDocuments leafNestedDocuments;
             LeafStoredFieldLoader leafStoredFieldLoader;
             SourceLoader.Leaf leafSourceLoader;
+            IdLoader.Leaf leafIdLoader;
 
             @Override
             protected void setNextReader(LeafReaderContext ctx, int[] docsInLeaf) throws IOException {
@@ -128,6 +131,7 @@ public class FetchPhase {
                 this.leafNestedDocuments = nestedDocuments.getLeafNestedDocuments(ctx);
                 this.leafStoredFieldLoader = storedFieldLoader.getLoader(ctx, docsInLeaf);
                 this.leafSourceLoader = sourceLoader.leaf(ctx.reader(), docsInLeaf);
+                this.leafIdLoader = idLoader.leaf(leafStoredFieldLoader, ctx.reader(), docsInLeaf);
                 fieldLookupProvider.setNextReader(ctx);
                 for (FetchSubPhaseProcessor processor : processors) {
                     processor.setNextReader(ctx);
@@ -150,7 +154,8 @@ public class FetchPhase {
                     leafStoredFieldLoader,
                     doc,
                     ctx,
-                    leafSourceLoader
+                    leafSourceLoader,
+                    leafIdLoader
                 );
                 sourceProvider.source = hit.source();
                 fieldLookupProvider.storedFields = hit.loadedFields();
@@ -194,10 +199,19 @@ public class FetchPhase {
         LeafStoredFieldLoader leafStoredFieldLoader,
         int docId,
         LeafReaderContext subReaderContext,
-        SourceLoader.Leaf sourceLoader
+        SourceLoader.Leaf sourceLoader,
+        IdLoader.Leaf idLoader
     ) throws IOException {
         if (nestedDocuments.advance(docId - subReaderContext.docBase) == null) {
-            return prepareNonNestedHitContext(requiresSource, profiler, leafStoredFieldLoader, docId, subReaderContext, sourceLoader);
+            return prepareNonNestedHitContext(
+                requiresSource,
+                profiler,
+                leafStoredFieldLoader,
+                docId,
+                subReaderContext,
+                sourceLoader,
+                idLoader
+            );
         } else {
             return prepareNestedHitContext(
                 context,
@@ -224,18 +238,20 @@ public class FetchPhase {
         LeafStoredFieldLoader leafStoredFieldLoader,
         int docId,
         LeafReaderContext subReaderContext,
-        SourceLoader.Leaf sourceLoader
+        SourceLoader.Leaf sourceLoader,
+        IdLoader.Leaf idLoader
     ) throws IOException {
         int subDocId = docId - subReaderContext.docBase;
 
         leafStoredFieldLoader.advanceTo(subDocId);
 
-        if (leafStoredFieldLoader.id() == null) {
+        String id = idLoader.getId(subDocId);
+        if (id == null) {
             SearchHit hit = new SearchHit(docId, null);
             Source source = Source.lazy(lazyStoredSourceLoader(profiler, subReaderContext, subDocId));
             return new HitContext(hit, subReaderContext, subDocId, Map.of(), source);
         } else {
-            SearchHit hit = new SearchHit(docId, leafStoredFieldLoader.id());
+            SearchHit hit = new SearchHit(docId, id);
             Source source;
             if (requiresSource) {
                 Timer timer = profiler.startLoadingSource();

+ 6 - 0
server/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java

@@ -15,6 +15,7 @@ import org.elasticsearch.action.search.SearchShardTask;
 import org.elasticsearch.action.search.SearchType;
 import org.elasticsearch.core.TimeValue;
 import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
+import org.elasticsearch.index.mapper.IdLoader;
 import org.elasticsearch.index.mapper.SourceLoader;
 import org.elasticsearch.index.query.ParsedQuery;
 import org.elasticsearch.index.query.SearchExecutionContext;
@@ -482,4 +483,9 @@ public abstract class FilteredSearchContext extends SearchContext {
     public SourceLoader newSourceLoader() {
         return in.newSourceLoader();
     }
+
+    @Override
+    public IdLoader newIdLoader() {
+        return in.newIdLoader();
+    }
 }

+ 3 - 0
server/src/main/java/org/elasticsearch/search/internal/SearchContext.java

@@ -17,6 +17,7 @@ import org.elasticsearch.core.Releasable;
 import org.elasticsearch.core.Releasables;
 import org.elasticsearch.core.TimeValue;
 import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
+import org.elasticsearch.index.mapper.IdLoader;
 import org.elasticsearch.index.mapper.SourceLoader;
 import org.elasticsearch.index.query.ParsedQuery;
 import org.elasticsearch.index.query.QueryShardException;
@@ -398,4 +399,6 @@ public abstract class SearchContext implements Releasable {
      * Build something to load source {@code _source}.
      */
     public abstract SourceLoader newSourceLoader();
+
+    public abstract IdLoader newIdLoader();
 }

+ 6 - 0
server/src/main/java/org/elasticsearch/search/rank/RankSearchContext.java

@@ -15,6 +15,7 @@ import org.elasticsearch.action.search.SearchShardTask;
 import org.elasticsearch.action.search.SearchType;
 import org.elasticsearch.core.TimeValue;
 import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
+import org.elasticsearch.index.mapper.IdLoader;
 import org.elasticsearch.index.mapper.SourceLoader;
 import org.elasticsearch.index.query.ParsedQuery;
 import org.elasticsearch.index.query.SearchExecutionContext;
@@ -548,4 +549,9 @@ public class RankSearchContext extends SearchContext {
     public SourceLoader newSourceLoader() {
         throw new UnsupportedOperationException();
     }
+
+    @Override
+    public IdLoader newIdLoader() {
+        throw new UnsupportedOperationException();
+    }
 }

+ 24 - 6
server/src/test/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicyTests.java

@@ -32,6 +32,7 @@ import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.tests.util.NullInfoStream;
 import org.apache.lucene.util.InfoStream;
+import org.elasticsearch.index.mapper.IdFieldMapper;
 import org.elasticsearch.test.ESTestCase;
 
 import java.io.IOException;
@@ -43,9 +44,11 @@ public class RecoverySourcePruneMergePolicyTests extends ESTestCase {
 
     public void testPruneAll() throws IOException {
         try (Directory dir = newDirectory()) {
+            boolean pruneIdField = randomBoolean();
             IndexWriterConfig iwc = newIndexWriterConfig();
             RecoverySourcePruneMergePolicy mp = new RecoverySourcePruneMergePolicy(
                 "extra_source",
+                pruneIdField,
                 MatchNoDocsQuery::new,
                 newLogMergePolicy()
             );
@@ -56,6 +59,7 @@ public class RecoverySourcePruneMergePolicyTests extends ESTestCase {
                         writer.flush();
                     }
                     Document doc = new Document();
+                    doc.add(new StoredField(IdFieldMapper.NAME, "_id"));
                     doc.add(new StoredField("source", "hello world"));
                     doc.add(new StoredField("extra_source", "hello world"));
                     doc.add(new NumericDocValuesField("extra_source", 1));
@@ -66,8 +70,14 @@ public class RecoverySourcePruneMergePolicyTests extends ESTestCase {
                 try (DirectoryReader reader = DirectoryReader.open(writer)) {
                     for (int i = 0; i < reader.maxDoc(); i++) {
                         Document document = reader.document(i);
-                        assertEquals(1, document.getFields().size());
-                        assertEquals("source", document.getFields().get(0).name());
+                        if (pruneIdField) {
+                            assertEquals(1, document.getFields().size());
+                            assertEquals("source", document.getFields().get(0).name());
+                        } else {
+                            assertEquals(2, document.getFields().size());
+                            assertEquals(IdFieldMapper.NAME, document.getFields().get(0).name());
+                            assertEquals("source", document.getFields().get(1).name());
+                        }
                     }
                     assertEquals(1, reader.leaves().size());
                     LeafReader leafReader = reader.leaves().get(0).reader();
@@ -111,9 +121,15 @@ public class RecoverySourcePruneMergePolicyTests extends ESTestCase {
 
     public void testPruneSome() throws IOException {
         try (Directory dir = newDirectory()) {
+            boolean pruneIdField = randomBoolean();
             IndexWriterConfig iwc = newIndexWriterConfig();
             iwc.setMergePolicy(
-                new RecoverySourcePruneMergePolicy("extra_source", () -> new TermQuery(new Term("even", "true")), iwc.getMergePolicy())
+                new RecoverySourcePruneMergePolicy(
+                    "extra_source",
+                    pruneIdField,
+                    () -> new TermQuery(new Term("even", "true")),
+                    iwc.getMergePolicy()
+                )
             );
             try (IndexWriter writer = new IndexWriter(dir, iwc)) {
                 for (int i = 0; i < 20; i++) {
@@ -121,6 +137,7 @@ public class RecoverySourcePruneMergePolicyTests extends ESTestCase {
                         writer.flush();
                     }
                     Document doc = new Document();
+                    doc.add(new StoredField(IdFieldMapper.NAME, "_id"));
                     doc.add(new StringField("even", Boolean.toString(i % 2 == 0), Field.Store.YES));
                     doc.add(new StoredField("source", "hello world"));
                     doc.add(new StoredField("extra_source", "hello world"));
@@ -138,12 +155,13 @@ public class RecoverySourcePruneMergePolicyTests extends ESTestCase {
                         Set<String> collect = document.getFields().stream().map(IndexableField::name).collect(Collectors.toSet());
                         assertTrue(collect.contains("source"));
                         assertTrue(collect.contains("even"));
-                        if (collect.size() == 3) {
+                        if (collect.size() == 4) {
                             assertTrue(collect.contains("extra_source"));
+                            assertTrue(collect.contains(IdFieldMapper.NAME));
                             assertEquals("true", document.getField("even").stringValue());
                             assertEquals(i, extra_source.nextDoc());
                         } else {
-                            assertEquals(2, document.getFields().size());
+                            assertEquals(pruneIdField ? 2 : 3, document.getFields().size());
                         }
                     }
                     assertEquals(DocIdSetIterator.NO_MORE_DOCS, extra_source.nextDoc());
@@ -155,7 +173,7 @@ public class RecoverySourcePruneMergePolicyTests extends ESTestCase {
     public void testPruneNone() throws IOException {
         try (Directory dir = newDirectory()) {
             IndexWriterConfig iwc = newIndexWriterConfig();
-            iwc.setMergePolicy(new RecoverySourcePruneMergePolicy("extra_source", () -> new MatchAllDocsQuery(), iwc.getMergePolicy()));
+            iwc.setMergePolicy(new RecoverySourcePruneMergePolicy("extra_source", false, MatchAllDocsQuery::new, iwc.getMergePolicy()));
             try (IndexWriter writer = new IndexWriter(dir, iwc)) {
                 for (int i = 0; i < 20; i++) {
                     if (i > 0 && randomBoolean()) {

+ 272 - 0
server/src/test/java/org/elasticsearch/index/mapper/IdLoaderTests.java

@@ -0,0 +1,272 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.index.mapper;
+
+import org.apache.lucene.document.LongPoint;
+import org.apache.lucene.document.SortedDocValuesField;
+import org.apache.lucene.document.SortedNumericDocValuesField;
+import org.apache.lucene.document.SortedSetDocValuesField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.search.SortedNumericSortField;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.tests.analysis.MockAnalyzer;
+import org.apache.lucene.tests.index.RandomIndexWriter;
+import org.apache.lucene.tests.util.LuceneTestCase;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.cluster.metadata.IndexMetadata;
+import org.elasticsearch.cluster.routing.IndexRouting;
+import org.elasticsearch.core.CheckedConsumer;
+import org.elasticsearch.index.IndexSettings;
+import org.elasticsearch.index.IndexVersion;
+import org.elasticsearch.test.ESTestCase;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.stream.IntStream;
+
+import static org.hamcrest.Matchers.empty;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.hasSize;
+
+public class IdLoaderTests extends ESTestCase {
+
+    public void testSynthesizeIdSimple() throws Exception {
+        var routingPaths = List.of("dim1");
+        var routing = createRouting(routingPaths);
+        var idLoader = IdLoader.createTsIdLoader(routing, routingPaths);
+
+        long startTime = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2023-01-01T00:00:00Z");
+        List<Doc> docs = List.of(
+            new Doc(startTime, List.of(new Dimension("dim1", "aaa"), new Dimension("dim2", "xxx"))),
+            new Doc(startTime + 1, List.of(new Dimension("dim1", "aaa"), new Dimension("dim2", "yyy"))),
+            new Doc(startTime + 2, List.of(new Dimension("dim1", "bbb"), new Dimension("dim2", "xxx")))
+        );
+        CheckedConsumer<IndexReader, IOException> verify = indexReader -> {
+            assertThat(indexReader.leaves(), hasSize(1));
+            LeafReader leafReader = indexReader.leaves().get(0).reader();
+            assertThat(leafReader.numDocs(), equalTo(3));
+            var leaf = idLoader.leaf(null, leafReader, new int[] { 0, 1, 2 });
+            assertThat(leaf.getId(0), equalTo(expectedId(routing, docs.get(0))));
+            assertThat(leaf.getId(1), equalTo(expectedId(routing, docs.get(1))));
+            assertThat(leaf.getId(2), equalTo(expectedId(routing, docs.get(2))));
+        };
+        prepareIndexReader(indexAndForceMerge(routing, docs), verify);
+    }
+
+    public void testSynthesizeIdMultipleSegments() throws Exception {
+        var routingPaths = List.of("dim1");
+        var routing = createRouting(routingPaths);
+        var idLoader = IdLoader.createTsIdLoader(routing, routingPaths);
+
+        long startTime = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2023-01-01T00:00:00Z");
+        List<Doc> docs1 = List.of(
+            new Doc(startTime, List.of(new Dimension("dim1", "aaa"), new Dimension("dim2", "xxx"))),
+            new Doc(startTime - 1, List.of(new Dimension("dim1", "aaa"), new Dimension("dim2", "xxx"))),
+            new Doc(startTime - 2, List.of(new Dimension("dim1", "aaa"), new Dimension("dim2", "xxx"))),
+            new Doc(startTime - 3, List.of(new Dimension("dim1", "aaa"), new Dimension("dim2", "xxx")))
+        );
+        List<Doc> docs2 = List.of(
+            new Doc(startTime, List.of(new Dimension("dim1", "aaa"), new Dimension("dim2", "yyy"))),
+            new Doc(startTime - 1, List.of(new Dimension("dim1", "aaa"), new Dimension("dim2", "yyy"))),
+            new Doc(startTime - 2, List.of(new Dimension("dim1", "aaa"), new Dimension("dim2", "yyy"))),
+            new Doc(startTime - 3, List.of(new Dimension("dim1", "aaa"), new Dimension("dim2", "yyy")))
+        );
+        List<Doc> docs3 = List.of(
+            new Doc(startTime, List.of(new Dimension("dim1", "bbb"), new Dimension("dim2", "yyy"))),
+            new Doc(startTime - 1, List.of(new Dimension("dim1", "bbb"), new Dimension("dim2", "yyy"))),
+            new Doc(startTime - 2, List.of(new Dimension("dim1", "bbb"), new Dimension("dim2", "yyy"))),
+            new Doc(startTime - 3, List.of(new Dimension("dim1", "bbb"), new Dimension("dim2", "yyy")))
+        );
+        CheckedConsumer<RandomIndexWriter, IOException> buildIndex = writer -> {
+            for (Doc doc : docs1) {
+                indexDoc(routing, writer, doc);
+            }
+            writer.flush();
+            for (Doc doc : docs2) {
+                indexDoc(routing, writer, doc);
+            }
+            writer.flush();
+            for (Doc doc : docs3) {
+                indexDoc(routing, writer, doc);
+            }
+            writer.flush();
+        };
+        CheckedConsumer<IndexReader, IOException> verify = indexReader -> {
+            assertThat(indexReader.leaves(), hasSize(3));
+            {
+                LeafReader leafReader = indexReader.leaves().get(0).reader();
+                assertThat(leafReader.numDocs(), equalTo(docs1.size()));
+                var leaf = idLoader.leaf(null, leafReader, IntStream.range(0, docs1.size()).toArray());
+                for (int i = 0; i < docs1.size(); i++) {
+                    assertThat(leaf.getId(i), equalTo(expectedId(routing, docs1.get(i))));
+                }
+            }
+            {
+                LeafReader leafReader = indexReader.leaves().get(1).reader();
+                assertThat(leafReader.numDocs(), equalTo(docs2.size()));
+                var leaf = idLoader.leaf(null, leafReader, new int[] { 0, 3 });
+                assertThat(leaf.getId(0), equalTo(expectedId(routing, docs2.get(0))));
+                assertThat(leaf.getId(3), equalTo(expectedId(routing, docs2.get(3))));
+            }
+            {
+                LeafReader leafReader = indexReader.leaves().get(2).reader();
+                assertThat(leafReader.numDocs(), equalTo(docs3.size()));
+                var leaf = idLoader.leaf(null, leafReader, new int[] { 1, 2 });
+                assertThat(leaf.getId(1), equalTo(expectedId(routing, docs3.get(1))));
+                assertThat(leaf.getId(2), equalTo(expectedId(routing, docs3.get(2))));
+            }
+            {
+                LeafReader leafReader = indexReader.leaves().get(2).reader();
+                assertThat(leafReader.numDocs(), equalTo(docs3.size()));
+                var leaf = idLoader.leaf(null, leafReader, new int[] { 3 });
+                expectThrows(IllegalArgumentException.class, () -> leaf.getId(0));
+            }
+        };
+        prepareIndexReader(buildIndex, verify);
+    }
+
+    public void testSynthesizeIdRandom() throws Exception {
+        var routingPaths = List.of("dim1");
+        var routing = createRouting(routingPaths);
+        var idLoader = IdLoader.createTsIdLoader(routing, routingPaths);
+
+        long startTime = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2023-01-01T00:00:00Z");
+        Set<String> expectedIDs = new HashSet<>();
+        List<Doc> randomDocs = new ArrayList<>();
+        int numberOfTimeSeries = randomIntBetween(8, 64);
+        for (int i = 0; i < numberOfTimeSeries; i++) {
+            int numberOfDimensions = randomIntBetween(1, 6);
+            List<Dimension> dimensions = new ArrayList<>(numberOfDimensions);
+            for (int j = 1; j <= numberOfDimensions; j++) {
+                String fieldName = "dim" + j;
+                Object value;
+                if (j == 5) {
+                    value = randomLongBetween(1, 20);
+                } else {
+                    value = randomAlphaOfLength(4);
+                }
+                dimensions.add(new Dimension(fieldName, value));
+            }
+            int numberOfSamples = randomIntBetween(1, 16);
+            for (int j = 0; j < numberOfSamples; j++) {
+                Doc doc = new Doc(startTime++, dimensions);
+                randomDocs.add(doc);
+                expectedIDs.add(expectedId(routing, doc));
+            }
+        }
+        CheckedConsumer<IndexReader, IOException> verify = indexReader -> {
+            assertThat(indexReader.leaves(), hasSize(1));
+            LeafReader leafReader = indexReader.leaves().get(0).reader();
+            assertThat(leafReader.numDocs(), equalTo(randomDocs.size()));
+            var leaf = idLoader.leaf(null, leafReader, IntStream.range(0, randomDocs.size()).toArray());
+            for (int i = 0; i < randomDocs.size(); i++) {
+                String actualId = leaf.getId(i);
+                assertTrue("docId=" + i + " id=" + actualId, expectedIDs.remove(actualId));
+            }
+        };
+        prepareIndexReader(indexAndForceMerge(routing, randomDocs), verify);
+        assertThat(expectedIDs, empty());
+    }
+
+    private static CheckedConsumer<RandomIndexWriter, IOException> indexAndForceMerge(
+        IndexRouting.ExtractFromSource routing,
+        List<Doc> docs
+    ) {
+        return writer -> {
+            for (Doc doc : docs) {
+                indexDoc(routing, writer, doc);
+            }
+            writer.forceMerge(1);
+        };
+    }
+
+    private void prepareIndexReader(
+        CheckedConsumer<RandomIndexWriter, IOException> buildIndex,
+        CheckedConsumer<IndexReader, IOException> verify
+    ) throws IOException {
+        try (Directory directory = newDirectory()) {
+            IndexWriterConfig config = LuceneTestCase.newIndexWriterConfig(random(), new MockAnalyzer(random()));
+            Sort sort = new Sort(
+                new SortField(TimeSeriesIdFieldMapper.NAME, SortField.Type.STRING, false),
+                new SortedNumericSortField(DataStreamTimestampFieldMapper.DEFAULT_PATH, SortField.Type.LONG, true)
+            );
+            config.setIndexSort(sort);
+            RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory, config);
+            buildIndex.accept(indexWriter);
+            indexWriter.close();
+
+            try (DirectoryReader indexReader = DirectoryReader.open(directory);) {
+                verify.accept(indexReader);
+            }
+        }
+    }
+
+    private static void indexDoc(IndexRouting.ExtractFromSource routing, RandomIndexWriter iw, Doc doc) throws IOException {
+        final TimeSeriesIdFieldMapper.TimeSeriesIdBuilder builder = new TimeSeriesIdFieldMapper.TimeSeriesIdBuilder(routing.builder());
+
+        final List<IndexableField> fields = new ArrayList<>();
+        fields.add(new SortedNumericDocValuesField(DataStreamTimestampFieldMapper.DEFAULT_PATH, doc.timestamp));
+        fields.add(new LongPoint(DataStreamTimestampFieldMapper.DEFAULT_PATH, doc.timestamp));
+        for (Dimension dimension : doc.dimensions) {
+            if (dimension.value instanceof Number n) {
+                builder.addLong(dimension.field, n.longValue());
+                fields.add(new SortedNumericDocValuesField(dimension.field, ((Number) dimension.value).longValue()));
+            } else {
+                builder.addString(dimension.field, dimension.value.toString());
+                fields.add(new SortedSetDocValuesField(dimension.field, new BytesRef(dimension.value.toString())));
+            }
+        }
+        BytesRef tsid = builder.build().toBytesRef();
+        fields.add(new SortedDocValuesField(TimeSeriesIdFieldMapper.NAME, tsid));
+        iw.addDocument(fields);
+    }
+
+    private static String expectedId(IndexRouting.ExtractFromSource routing, Doc doc) throws IOException {
+        var routingBuilder = routing.builder();
+        var timeSeriesIdBuilder = new TimeSeriesIdFieldMapper.TimeSeriesIdBuilder(routingBuilder);
+        for (Dimension dimension : doc.dimensions) {
+            if (dimension.value instanceof Number n) {
+                timeSeriesIdBuilder.addLong(dimension.field, n.longValue());
+            } else {
+                timeSeriesIdBuilder.addString(dimension.field, dimension.value.toString());
+            }
+        }
+        return TsidExtractingIdFieldMapper.createId(
+            false,
+            routingBuilder,
+            timeSeriesIdBuilder.build().toBytesRef(),
+            doc.timestamp,
+            new byte[16]
+        );
+    }
+
+    private static IndexRouting.ExtractFromSource createRouting(List<String> routingPaths) {
+        var settings = indexSettings(IndexVersion.current(), 2, 1).put(IndexSettings.MODE.getKey(), "time_series")
+            .put(IndexSettings.TIME_SERIES_START_TIME.getKey(), "2000-01-01T00:00:00.000Z")
+            .put(IndexSettings.TIME_SERIES_END_TIME.getKey(), "2001-01-01T00:00:00.000Z")
+            .putList(IndexMetadata.INDEX_ROUTING_PATH.getKey(), routingPaths)
+            .build();
+        var indexMetadata = IndexMetadata.builder("index").settings(settings).build();
+        return (IndexRouting.ExtractFromSource) IndexRouting.fromIndexMetadata(indexMetadata);
+    }
+
+    record Doc(long timestamp, List<Dimension> dimensions) {}
+
+    record Dimension(String field, Object value) {}
+
+}

+ 105 - 0
server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java

@@ -26,6 +26,7 @@ import org.elasticsearch.index.IndexVersion;
 import org.elasticsearch.index.cache.IndexCache;
 import org.elasticsearch.index.cache.query.QueryCache;
 import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.mapper.IdLoader;
 import org.elasticsearch.index.mapper.MappedFieldType;
 import org.elasticsearch.index.mapper.MapperService;
 import org.elasticsearch.index.query.AbstractQueryBuilder;
@@ -52,6 +53,7 @@ import java.util.function.Function;
 import java.util.function.Supplier;
 
 import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.instanceOf;
 import static org.hamcrest.Matchers.is;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyString;
@@ -409,6 +411,109 @@ public class DefaultSearchContextTests extends ESTestCase {
         }
     }
 
+    public void testNewIdLoader() throws Exception {
+        try (DefaultSearchContext context = createDefaultSearchContext(Settings.EMPTY)) {
+            assertThat(context.newIdLoader(), instanceOf(IdLoader.StoredIdLoader.class));
+            context.indexShard().getThreadPool().shutdown();
+        }
+    }
+
+    public void testNewIdLoaderWithTsdb() throws Exception {
+        Settings settings = Settings.builder()
+            .put(IndexSettings.MODE.getKey(), "time_series")
+            .put(IndexSettings.TIME_SERIES_START_TIME.getKey(), "2000-01-01T00:00:00.000Z")
+            .put(IndexSettings.TIME_SERIES_END_TIME.getKey(), "2001-01-01T00:00:00.000Z")
+            .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "field")
+            .build();
+        try (DefaultSearchContext context = createDefaultSearchContext(settings)) {
+            assertThat(context.newIdLoader(), instanceOf(IdLoader.TsIdLoader.class));
+            context.indexShard().getThreadPool().shutdown();
+        }
+    }
+
+    private DefaultSearchContext createDefaultSearchContext(Settings providedIndexSettings) throws IOException {
+        TimeValue timeout = new TimeValue(randomIntBetween(1, 100));
+        ShardSearchRequest shardSearchRequest = mock(ShardSearchRequest.class);
+        when(shardSearchRequest.searchType()).thenReturn(SearchType.DEFAULT);
+        ShardId shardId = new ShardId("index", UUID.randomUUID().toString(), 1);
+        when(shardSearchRequest.shardId()).thenReturn(shardId);
+        when(shardSearchRequest.shardRequestIndex()).thenReturn(shardId.id());
+        when(shardSearchRequest.numberOfShards()).thenReturn(2);
+
+        ThreadPool threadPool = new TestThreadPool(this.getClass().getName());
+        IndexShard indexShard = mock(IndexShard.class);
+        when(indexShard.getThreadPool()).thenReturn(threadPool);
+
+        Settings settings = indexSettings(IndexVersion.current(), 2, 1).put(providedIndexSettings).build();
+
+        IndexService indexService = mock(IndexService.class);
+        IndexCache indexCache = mock(IndexCache.class);
+        QueryCache queryCache = mock(QueryCache.class);
+        when(indexCache.query()).thenReturn(queryCache);
+        when(indexService.cache()).thenReturn(indexCache);
+        SearchExecutionContext searchExecutionContext = mock(SearchExecutionContext.class);
+        when(indexService.newSearchExecutionContext(eq(shardId.id()), eq(shardId.id()), any(), any(), nullable(String.class), any()))
+            .thenReturn(searchExecutionContext);
+        MapperService mapperService = mock(MapperService.class);
+        when(mapperService.hasNested()).thenReturn(randomBoolean());
+        when(indexService.mapperService()).thenReturn(mapperService);
+
+        IndexMetadata indexMetadata = IndexMetadata.builder("index").settings(settings).build();
+        IndexSettings indexSettings = new IndexSettings(indexMetadata, Settings.EMPTY);
+        when(indexService.getIndexSettings()).thenReturn(indexSettings);
+        when(indexService.getMetadata()).thenReturn(indexMetadata);
+        when(mapperService.getIndexSettings()).thenReturn(indexSettings);
+        when(searchExecutionContext.getIndexSettings()).thenReturn(indexSettings);
+        when(searchExecutionContext.indexVersionCreated()).thenReturn(indexSettings.getIndexVersionCreated());
+
+        try (Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir)) {
+            final Supplier<Engine.SearcherSupplier> searcherSupplier = () -> new Engine.SearcherSupplier(Function.identity()) {
+                @Override
+                protected void doClose() {}
+
+                @Override
+                protected Engine.Searcher acquireSearcherInternal(String source) {
+                    try {
+                        IndexReader reader = w.getReader();
+                        return new Engine.Searcher(
+                            "test",
+                            reader,
+                            IndexSearcher.getDefaultSimilarity(),
+                            IndexSearcher.getDefaultQueryCache(),
+                            IndexSearcher.getDefaultQueryCachingPolicy(),
+                            reader
+                        );
+                    } catch (IOException exc) {
+                        throw new AssertionError(exc);
+                    }
+                }
+            };
+
+            SearchShardTarget target = new SearchShardTarget("node", shardId, null);
+
+            ReaderContext readerContext = new ReaderContext(
+                newContextId(),
+                indexService,
+                indexShard,
+                searcherSupplier.get(),
+                randomNonNegativeLong(),
+                false
+            );
+            return new DefaultSearchContext(
+                readerContext,
+                shardSearchRequest,
+                target,
+                null,
+                timeout,
+                null,
+                false,
+                null,
+                randomInt(),
+                randomInt()
+            );
+        }
+    }
+
     private ShardSearchContextId newContextId() {
         return new ShardSearchContextId(UUIDs.randomBase64UUID(), randomNonNegativeLong());
     }

+ 2 - 0
test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java

@@ -82,6 +82,7 @@ import org.elasticsearch.index.mapper.FieldAliasMapper;
 import org.elasticsearch.index.mapper.FieldMapper;
 import org.elasticsearch.index.mapper.GeoPointFieldMapper;
 import org.elasticsearch.index.mapper.GeoShapeFieldMapper;
+import org.elasticsearch.index.mapper.IdLoader;
 import org.elasticsearch.index.mapper.KeywordFieldMapper;
 import org.elasticsearch.index.mapper.MappedFieldType;
 import org.elasticsearch.index.mapper.Mapper;
@@ -435,6 +436,7 @@ public abstract class AggregatorTestCase extends ESTestCase {
         when(indexShard.indexSettings()).thenReturn(indexSettings);
         when(ctx.indexShard()).thenReturn(indexShard);
         when(ctx.newSourceLoader()).thenAnswer(inv -> searchExecutionContext.newSourceLoader(false));
+        when(ctx.newIdLoader()).thenReturn(IdLoader.fromLeafStoredFieldLoader());
         return new SubSearchContext(ctx);
     }
 

+ 6 - 0
test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java

@@ -15,6 +15,7 @@ import org.elasticsearch.action.search.SearchType;
 import org.elasticsearch.core.TimeValue;
 import org.elasticsearch.index.IndexService;
 import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
+import org.elasticsearch.index.mapper.IdLoader;
 import org.elasticsearch.index.mapper.SourceLoader;
 import org.elasticsearch.index.query.ParsedQuery;
 import org.elasticsearch.index.query.SearchExecutionContext;
@@ -565,4 +566,9 @@ public class TestSearchContext extends SearchContext {
     public SourceLoader newSourceLoader() {
         return searchExecutionContext.newSourceLoader(false);
     }
+
+    @Override
+    public IdLoader newIdLoader() {
+        throw new UnsupportedOperationException();
+    }
 }