浏览代码

[8.x] Listing all available databases in the _ingest/geoip/database API (#113498) (#114081)

* Listing all available databases in the _ingest/geoip/database API (#113498)

* yamlRestCompatTestTransform renamed to yamlRestTestV7CompatTransform
Keith Massey 1 年之前
父节点
当前提交
0b224dcef4
共有 17 个文件被更改,包括 619 次插入51 次删除
  1. 5 0
      docs/changelog/113498.yaml
  2. 5 0
      modules/ingest-geoip/build.gradle
  3. 34 0
      modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseNodeService.java
  4. 13 0
      modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseReaderLazyLoader.java
  5. 3 1
      modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloader.java
  6. 3 3
      modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskState.java
  7. 17 1
      modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java
  8. 153 11
      modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfiguration.java
  9. 6 1
      modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/PutDatabaseConfigurationAction.java
  10. 2 0
      modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportDeleteDatabaseConfigurationAction.java
  11. 170 23
      modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportGetDatabaseConfigurationAction.java
  12. 7 0
      modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/IngestGeoIpMetadataTests.java
  13. 7 0
      modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfigurationMetadataTests.java
  14. 30 8
      modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfigurationTests.java
  15. 131 0
      modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/TransportGetDatabaseConfigurationActionTests.java
  16. 32 3
      modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/40_geoip_databases.yml
  17. 1 0
      server/src/main/java/org/elasticsearch/TransportVersions.java

+ 5 - 0
docs/changelog/113498.yaml

@@ -0,0 +1,5 @@
+pr: 113498
+summary: Listing all available databases in the _ingest/geoip/database API
+area: Ingest Node
+type: enhancement
+issues: []

+ 5 - 0
modules/ingest-geoip/build.gradle

@@ -93,3 +93,8 @@ tasks.named("yamlRestTestV7CompatTransform").configure { task ->
 artifacts {
   restTests(new File(projectDir, "src/yamlRestTest/resources/rest-api-spec/test"))
 }
+
+tasks.named("yamlRestTestV7CompatTransform").configure({ task ->
+  task.skipTest("ingest_geoip/40_geoip_databases/Test adding, getting, and removing geoip databases",
+    "get databases behavior began returning more results in 8.16")
+})

+ 34 - 0
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseNodeService.java

@@ -20,11 +20,13 @@ import org.elasticsearch.cluster.metadata.IndexAbstraction;
 import org.elasticsearch.cluster.node.DiscoveryNode;
 import org.elasticsearch.cluster.routing.IndexRoutingTable;
 import org.elasticsearch.cluster.service.ClusterService;
+import org.elasticsearch.common.Strings;
 import org.elasticsearch.common.hash.MessageDigests;
 import org.elasticsearch.common.logging.HeaderWarning;
 import org.elasticsearch.core.CheckedConsumer;
 import org.elasticsearch.core.CheckedRunnable;
 import org.elasticsearch.core.IOUtils;
+import org.elasticsearch.core.Nullable;
 import org.elasticsearch.core.Tuple;
 import org.elasticsearch.env.Environment;
 import org.elasticsearch.gateway.GatewayService;
@@ -37,6 +39,7 @@ import org.elasticsearch.search.SearchHit;
 import org.elasticsearch.watcher.ResourceWatcherService;
 
 import java.io.Closeable;
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.UncheckedIOException;
 import java.nio.file.FileAlreadyExistsException;
@@ -51,8 +54,10 @@ import java.nio.file.attribute.BasicFileAttributes;
 import java.security.MessageDigest;
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Locale;
+import java.util.Map;
 import java.util.Objects;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
@@ -541,6 +546,35 @@ public final class DatabaseNodeService implements IpDatabaseProvider {
         return configDatabases.getConfigDatabases().keySet();
     }
 
+    public Map<String, ConfigDatabaseDetail> getConfigDatabasesDetail() {
+        Map<String, ConfigDatabaseDetail> allDatabases = new HashMap<>();
+        for (Map.Entry<String, DatabaseReaderLazyLoader> entry : configDatabases.getConfigDatabases().entrySet()) {
+            DatabaseReaderLazyLoader databaseReaderLazyLoader = entry.getValue();
+            try {
+                allDatabases.put(
+                    entry.getKey(),
+                    new ConfigDatabaseDetail(
+                        entry.getKey(),
+                        databaseReaderLazyLoader.getMd5(),
+                        databaseReaderLazyLoader.getBuildDateMillis(),
+                        databaseReaderLazyLoader.getDatabaseType()
+                    )
+                );
+            } catch (FileNotFoundException e) {
+                /*
+                 * Since there is nothing to prevent a database from being deleted while this method is running, it is possible we get an
+                 * exception here because the file no longer exists. We just log it and move on -- it's preferable to synchronization.
+                 */
+                logger.trace(Strings.format("Unable to get metadata for config database %s", entry.getKey()), e);
+            } catch (IOException e) {
+                throw new RuntimeException(e);
+            }
+        }
+        return allDatabases;
+    }
+
+    public record ConfigDatabaseDetail(String name, @Nullable String md5, @Nullable Long buildDateInMillis, @Nullable String type) {}
+
     public Set<String> getFilesInTemp() {
         try (Stream<Path> files = Files.list(geoipTmpDirectory)) {
             return files.map(Path::getFileName).map(Path::toString).collect(Collectors.toSet());

+ 13 - 0
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseReaderLazyLoader.java

@@ -63,6 +63,7 @@ class DatabaseReaderLazyLoader implements IpDatabase {
 
     // cache the database type so that we do not re-read it on every pipeline execution
     final SetOnce<String> databaseType;
+    final SetOnce<Long> buildDate;
 
     private volatile boolean deleteDatabaseFileOnShutdown;
     private final AtomicInteger currentUsages = new AtomicInteger(0);
@@ -74,6 +75,7 @@ class DatabaseReaderLazyLoader implements IpDatabase {
         this.loader = createDatabaseLoader(databasePath);
         this.databaseReader = new SetOnce<>();
         this.databaseType = new SetOnce<>();
+        this.buildDate = new SetOnce<>();
     }
 
     /**
@@ -277,4 +279,15 @@ class DatabaseReaderLazyLoader implements IpDatabase {
             return Optional.of(builder.build(result, NetworkAddress.format(inetAddress), record.getNetwork(), List.of("en")));
         }
     }
+
+    long getBuildDateMillis() throws IOException {
+        if (buildDate.get() == null) {
+            synchronized (buildDate) {
+                if (buildDate.get() == null) {
+                    buildDate.set(loader.get().getMetadata().getBuildDate().getTime());
+                }
+            }
+        }
+        return buildDate.get();
+    }
 }

+ 3 - 1
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloader.java

@@ -444,7 +444,9 @@ public class EnterpriseGeoIpDownloader extends AllocatedPersistentTask {
     }
 
     private ProviderDownload downloaderFor(DatabaseConfiguration database) {
-        return new MaxmindDownload(database.name(), database.maxmind());
+        assert database.provider() instanceof DatabaseConfiguration.Maxmind
+            : "Attempt to use maxmind downloader with a provider of type" + database.provider().getClass();
+        return new MaxmindDownload(database.name(), (DatabaseConfiguration.Maxmind) database.provider());
     }
 
     class MaxmindDownload implements ProviderDownload {

+ 3 - 3
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskState.java

@@ -41,7 +41,7 @@ import static org.elasticsearch.persistent.PersistentTasksCustomMetadata.getTask
 import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg;
 import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg;
 
-class GeoIpTaskState implements PersistentTaskState, VersionedNamedWriteable {
+public class GeoIpTaskState implements PersistentTaskState, VersionedNamedWriteable {
 
     private static boolean includeSha256(TransportVersion version) {
         return version.isPatchFrom(TransportVersions.ENTERPRISE_GEOIP_DOWNLOADER_BACKPORT_8_15)
@@ -150,7 +150,7 @@ class GeoIpTaskState implements PersistentTaskState, VersionedNamedWriteable {
         });
     }
 
-    record Metadata(long lastUpdate, int firstChunk, int lastChunk, String md5, long lastCheck, @Nullable String sha256)
+    public record Metadata(long lastUpdate, int firstChunk, int lastChunk, String md5, long lastCheck, @Nullable String sha256)
         implements
             ToXContentObject {
 
@@ -198,7 +198,7 @@ class GeoIpTaskState implements PersistentTaskState, VersionedNamedWriteable {
             }
         }
 
-        Metadata {
+        public Metadata {
             Objects.requireNonNull(md5);
         }
 

+ 17 - 1
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java

@@ -31,6 +31,7 @@ import org.elasticsearch.indices.SystemIndexDescriptor;
 import org.elasticsearch.ingest.EnterpriseGeoIpTask.EnterpriseGeoIpTaskParams;
 import org.elasticsearch.ingest.IngestService;
 import org.elasticsearch.ingest.Processor;
+import org.elasticsearch.ingest.geoip.direct.DatabaseConfiguration;
 import org.elasticsearch.ingest.geoip.direct.DeleteDatabaseConfigurationAction;
 import org.elasticsearch.ingest.geoip.direct.GetDatabaseConfigurationAction;
 import org.elasticsearch.ingest.geoip.direct.PutDatabaseConfigurationAction;
@@ -232,7 +233,22 @@ public class IngestGeoIpPlugin extends Plugin
             new NamedWriteableRegistry.Entry(PersistentTaskParams.class, GEOIP_DOWNLOADER, GeoIpTaskParams::new),
             new NamedWriteableRegistry.Entry(PersistentTaskState.class, ENTERPRISE_GEOIP_DOWNLOADER, EnterpriseGeoIpTaskState::new),
             new NamedWriteableRegistry.Entry(PersistentTaskParams.class, ENTERPRISE_GEOIP_DOWNLOADER, EnterpriseGeoIpTaskParams::new),
-            new NamedWriteableRegistry.Entry(Task.Status.class, GEOIP_DOWNLOADER, GeoIpDownloaderStats::new)
+            new NamedWriteableRegistry.Entry(Task.Status.class, GEOIP_DOWNLOADER, GeoIpDownloaderStats::new),
+            new NamedWriteableRegistry.Entry(
+                DatabaseConfiguration.Provider.class,
+                DatabaseConfiguration.Maxmind.NAME,
+                DatabaseConfiguration.Maxmind::new
+            ),
+            new NamedWriteableRegistry.Entry(
+                DatabaseConfiguration.Provider.class,
+                DatabaseConfiguration.Local.NAME,
+                DatabaseConfiguration.Local::new
+            ),
+            new NamedWriteableRegistry.Entry(
+                DatabaseConfiguration.Provider.class,
+                DatabaseConfiguration.Web.NAME,
+                DatabaseConfiguration.Web::new
+            )
         );
     }
 

+ 153 - 11
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfiguration.java

@@ -9,13 +9,16 @@
 
 package org.elasticsearch.ingest.geoip.direct;
 
+import org.elasticsearch.TransportVersions;
 import org.elasticsearch.action.ActionRequestValidationException;
 import org.elasticsearch.cluster.metadata.MetadataCreateIndexService;
 import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.NamedWriteable;
 import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.common.io.stream.StreamOutput;
 import org.elasticsearch.common.io.stream.Writeable;
 import org.elasticsearch.xcontent.ConstructingObjectParser;
+import org.elasticsearch.xcontent.ObjectParser;
 import org.elasticsearch.xcontent.ParseField;
 import org.elasticsearch.xcontent.ToXContentObject;
 import org.elasticsearch.xcontent.XContentBuilder;
@@ -34,19 +37,19 @@ import java.util.regex.Pattern;
  * That is, it has an id e.g. "my_db_config_1" and it says "download the file named XXXX from SomeCompany, and here's the
  * magic token to use to do that."
  */
-public record DatabaseConfiguration(String id, String name, Maxmind maxmind) implements Writeable, ToXContentObject {
+public record DatabaseConfiguration(String id, String name, Provider provider) implements Writeable, ToXContentObject {
 
     // id is a user selected signifier like 'my_domain_db'
     // name is the name of a file that can be downloaded (like 'GeoIP2-Domain')
 
-    // a configuration will have a 'type' like "maxmind", and that might have some more details,
+    // a configuration will have a 'provider' like "maxmind", and that might have some more details,
     // for now, though the important thing is that the json has to have it even though we don't model it meaningfully in this class
 
     public DatabaseConfiguration {
         // these are invariants, not actual validation
         Objects.requireNonNull(id);
         Objects.requireNonNull(name);
-        Objects.requireNonNull(maxmind);
+        Objects.requireNonNull(provider);
     }
 
     /**
@@ -76,25 +79,49 @@ public record DatabaseConfiguration(String id, String name, Maxmind maxmind) imp
     );
 
     private static final ParseField NAME = new ParseField("name");
-    private static final ParseField MAXMIND = new ParseField("maxmind");
+    private static final ParseField MAXMIND = new ParseField(Maxmind.NAME);
+    private static final ParseField WEB = new ParseField(Web.NAME);
+    private static final ParseField LOCAL = new ParseField(Local.NAME);
 
     private static final ConstructingObjectParser<DatabaseConfiguration, String> PARSER = new ConstructingObjectParser<>(
         "database",
         false,
         (a, id) -> {
             String name = (String) a[0];
-            Maxmind maxmind = (Maxmind) a[1];
-            return new DatabaseConfiguration(id, name, maxmind);
+            Provider provider;
+            if (a[1] != null) {
+                provider = (Maxmind) a[1];
+            } else if (a[2] != null) {
+                provider = (Web) a[2];
+            } else {
+                provider = (Local) a[3];
+            }
+            return new DatabaseConfiguration(id, name, provider);
         }
     );
 
     static {
         PARSER.declareString(ConstructingObjectParser.constructorArg(), NAME);
-        PARSER.declareObject(ConstructingObjectParser.constructorArg(), (parser, id) -> Maxmind.PARSER.apply(parser, null), MAXMIND);
+        PARSER.declareObject(
+            ConstructingObjectParser.optionalConstructorArg(),
+            (parser, id) -> Maxmind.PARSER.apply(parser, null),
+            MAXMIND
+        );
+        PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (parser, id) -> Web.PARSER.apply(parser, null), WEB);
+        PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (parser, id) -> Local.PARSER.apply(parser, null), LOCAL);
     }
 
     public DatabaseConfiguration(StreamInput in) throws IOException {
-        this(in.readString(), in.readString(), new Maxmind(in));
+        this(in.readString(), in.readString(), readProvider(in));
+    }
+
+    private static Provider readProvider(StreamInput in) throws IOException {
+        if (in.getTransportVersion().onOrAfter(TransportVersions.INGEST_GEO_DATABASE_PROVIDERS)) {
+            return in.readNamedWriteable(Provider.class);
+        } else {
+            // prior to the above version, everything was always a maxmind, so this half of the if is logical
+            return new Maxmind(in.readString());
+        }
     }
 
     public static DatabaseConfiguration parse(XContentParser parser, String id) {
@@ -105,14 +132,27 @@ public record DatabaseConfiguration(String id, String name, Maxmind maxmind) imp
     public void writeTo(StreamOutput out) throws IOException {
         out.writeString(id);
         out.writeString(name);
-        maxmind.writeTo(out);
+        if (out.getTransportVersion().onOrAfter(TransportVersions.INGEST_GEO_DATABASE_PROVIDERS)) {
+            out.writeNamedWriteable(provider);
+        } else {
+            if (provider instanceof Maxmind maxmind) {
+                out.writeString(maxmind.accountId);
+            } else {
+                /*
+                 * The existence of a non-Maxmind providers is gated on the feature get_database_configuration_action.multi_node, and
+                 * get_database_configuration_action.multi_node is only available on or after
+                 * TransportVersions.INGEST_GEO_DATABASE_PROVIDERS.
+                 */
+                assert false : "non-maxmind DatabaseConfiguration.Provider [" + provider.getWriteableName() + "]";
+            }
+        }
     }
 
     @Override
     public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
         builder.startObject();
         builder.field("name", name);
-        builder.field("maxmind", maxmind);
+        builder.field(provider.getWriteableName(), provider);
         builder.endObject();
         return builder;
     }
@@ -168,7 +208,24 @@ public record DatabaseConfiguration(String id, String name, Maxmind maxmind) imp
         return err.validationErrors().isEmpty() ? null : err;
     }
 
-    public record Maxmind(String accountId) implements Writeable, ToXContentObject {
+    public boolean isReadOnly() {
+        return provider.isReadOnly();
+    }
+
+    /**
+      * A marker interface that all providers need to implement.
+      */
+    public interface Provider extends NamedWriteable, ToXContentObject {
+        boolean isReadOnly();
+    }
+
+    public record Maxmind(String accountId) implements Provider {
+        public static final String NAME = "maxmind";
+
+        @Override
+        public String getWriteableName() {
+            return NAME;
+        }
 
         public Maxmind {
             // this is an invariant, not actual validation
@@ -206,5 +263,90 @@ public record DatabaseConfiguration(String id, String name, Maxmind maxmind) imp
             builder.endObject();
             return builder;
         }
+
+        @Override
+        public boolean isReadOnly() {
+            return false;
+        }
+    }
+
+    public record Local(String type) implements Provider {
+        public static final String NAME = "local";
+
+        private static final ParseField TYPE = new ParseField("type");
+
+        private static final ConstructingObjectParser<Local, Void> PARSER = new ConstructingObjectParser<>("database", false, (a, id) -> {
+            String type = (String) a[0];
+            return new Local(type);
+        });
+
+        static {
+            PARSER.declareString(ConstructingObjectParser.constructorArg(), TYPE);
+        }
+
+        public Local(StreamInput in) throws IOException {
+            this(in.readString());
+        }
+
+        public static Local parse(XContentParser parser) {
+            return PARSER.apply(parser, null);
+        }
+
+        @Override
+        public void writeTo(StreamOutput out) throws IOException {
+            out.writeString(type);
+        }
+
+        @Override
+        public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+            builder.startObject();
+            builder.field("type", type);
+            builder.endObject();
+            return builder;
+        }
+
+        @Override
+        public String getWriteableName() {
+            return NAME;
+        }
+
+        @Override
+        public boolean isReadOnly() {
+            return true;
+        }
+    }
+
+    public record Web() implements Provider {
+        public static final String NAME = "web";
+
+        private static final ObjectParser<Web, Void> PARSER = new ObjectParser<>("database", Web::new);
+
+        public Web(StreamInput in) throws IOException {
+            this();
+        }
+
+        public static Web parse(XContentParser parser) {
+            return PARSER.apply(parser, null);
+        }
+
+        @Override
+        public void writeTo(StreamOutput out) throws IOException {}
+
+        @Override
+        public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+            builder.startObject();
+            builder.endObject();
+            return builder;
+        }
+
+        @Override
+        public String getWriteableName() {
+            return NAME;
+        }
+
+        @Override
+        public boolean isReadOnly() {
+            return true;
+        }
     }
 }

+ 6 - 1
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/PutDatabaseConfigurationAction.java

@@ -49,7 +49,12 @@ public class PutDatabaseConfigurationAction extends ActionType<AcknowledgedRespo
         }
 
         public static Request parseRequest(TimeValue masterNodeTimeout, TimeValue ackTimeout, String id, XContentParser parser) {
-            return new Request(masterNodeTimeout, ackTimeout, DatabaseConfiguration.parse(parser, id));
+            DatabaseConfiguration database = DatabaseConfiguration.parse(parser, id);
+            if (database.isReadOnly()) {
+                throw new IllegalArgumentException("Database " + id + " is read only");
+            } else {
+                return new Request(masterNodeTimeout, ackTimeout, database);
+            }
         }
 
         @Override

+ 2 - 0
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportDeleteDatabaseConfigurationAction.java

@@ -91,6 +91,8 @@ public class TransportDeleteDatabaseConfigurationAction extends TransportMasterN
         final IngestGeoIpMetadata geoIpMeta = state.metadata().custom(IngestGeoIpMetadata.TYPE, IngestGeoIpMetadata.EMPTY);
         if (geoIpMeta.getDatabases().containsKey(id) == false) {
             throw new ResourceNotFoundException("Database configuration not found: {}", id);
+        } else if (geoIpMeta.getDatabases().get(id).database().isReadOnly()) {
+            throw new IllegalArgumentException("Database " + id + " is read only");
         }
         deleteDatabaseConfigurationTaskQueue.submitTask(
             Strings.format("delete-geoip-database-configuration-[%s]", id),

+ 170 - 23
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportGetDatabaseConfigurationAction.java

@@ -9,7 +9,6 @@
 
 package org.elasticsearch.ingest.geoip.direct;
 
-import org.elasticsearch.ResourceNotFoundException;
 import org.elasticsearch.action.ActionListener;
 import org.elasticsearch.action.FailedNodeException;
 import org.elasticsearch.action.support.ActionFilters;
@@ -19,19 +18,28 @@ import org.elasticsearch.cluster.service.ClusterService;
 import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.common.regex.Regex;
 import org.elasticsearch.features.FeatureService;
+import org.elasticsearch.ingest.geoip.DatabaseNodeService;
+import org.elasticsearch.ingest.geoip.GeoIpTaskState;
 import org.elasticsearch.ingest.geoip.IngestGeoIpMetadata;
 import org.elasticsearch.injection.guice.Inject;
+import org.elasticsearch.persistent.PersistentTasksCustomMetadata;
 import org.elasticsearch.tasks.Task;
 import org.elasticsearch.threadpool.ThreadPool;
 import org.elasticsearch.transport.TransportService;
 
 import java.io.IOException;
+import java.nio.charset.StandardCharsets;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Base64;
+import java.util.Collection;
+import java.util.Comparator;
 import java.util.LinkedHashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Optional;
 import java.util.Set;
+import java.util.stream.Collectors;
 
 import static org.elasticsearch.ingest.IngestGeoIpFeatures.GET_DATABASE_CONFIGURATION_ACTION_MULTI_NODE;
 
@@ -43,6 +51,7 @@ public class TransportGetDatabaseConfigurationAction extends TransportNodesActio
     List<DatabaseConfigurationMetadata>> {
 
     private final FeatureService featureService;
+    private final DatabaseNodeService databaseNodeService;
 
     @Inject
     public TransportGetDatabaseConfigurationAction(
@@ -50,7 +59,8 @@ public class TransportGetDatabaseConfigurationAction extends TransportNodesActio
         ClusterService clusterService,
         ThreadPool threadPool,
         ActionFilters actionFilters,
-        FeatureService featureService
+        FeatureService featureService,
+        DatabaseNodeService databaseNodeService
     ) {
         super(
             GetDatabaseConfigurationAction.NAME,
@@ -61,6 +71,7 @@ public class TransportGetDatabaseConfigurationAction extends TransportNodesActio
             threadPool.executor(ThreadPool.Names.MANAGEMENT)
         );
         this.featureService = featureService;
+        this.databaseNodeService = databaseNodeService;
     }
 
     @Override
@@ -74,9 +85,19 @@ public class TransportGetDatabaseConfigurationAction extends TransportNodesActio
              * TransportGetDatabaseConfigurationAction used to be a TransportMasterNodeAction, and not all nodes in the cluster have been
              * updated. So we don't want to send node requests to the other nodes because they will blow up. Instead, we just return
              * the information that we used to return from the master node (it doesn't make any difference that this might not be the master
-             * node, because we're only reading the cluster state).
+             * node, because we're only reading the cluster state). Because older nodes only know about the Maxmind provider type, we filter
+             * out all others here to avoid causing problems on those nodes.
              */
-            newResponseAsync(task, request, createActionContext(task, request), List.of(), List.of(), listener);
+            newResponseAsync(
+                task,
+                request,
+                createActionContext(task, request).stream()
+                    .filter(database -> database.database().provider() instanceof DatabaseConfiguration.Maxmind)
+                    .toList(),
+                List.of(),
+                List.of(),
+                listener
+            );
         } else {
             super.doExecute(task, request, listener);
         }
@@ -97,28 +118,79 @@ public class TransportGetDatabaseConfigurationAction extends TransportNodesActio
             );
         }
 
-        final IngestGeoIpMetadata geoIpMeta = clusterService.state().metadata().custom(IngestGeoIpMetadata.TYPE, IngestGeoIpMetadata.EMPTY);
         List<DatabaseConfigurationMetadata> results = new ArrayList<>();
-
+        PersistentTasksCustomMetadata tasksMetadata = PersistentTasksCustomMetadata.getPersistentTasksCustomMetadata(
+            clusterService.state()
+        );
         for (String id : ids) {
-            if (Regex.isSimpleMatchPattern(id)) {
-                for (Map.Entry<String, DatabaseConfigurationMetadata> entry : geoIpMeta.getDatabases().entrySet()) {
-                    if (Regex.simpleMatch(id, entry.getKey())) {
-                        results.add(entry.getValue());
+            results.addAll(getWebDatabases(tasksMetadata, id));
+            results.addAll(getMaxmindDatabases(clusterService, id));
+        }
+        return results;
+    }
+
+    /*
+     * This returns read-only database information about the databases managed by the standard downloader
+     */
+    private static Collection<DatabaseConfigurationMetadata> getWebDatabases(PersistentTasksCustomMetadata tasksMetadata, String id) {
+        List<DatabaseConfigurationMetadata> webDatabases = new ArrayList<>();
+        if (tasksMetadata != null) {
+            PersistentTasksCustomMetadata.PersistentTask<?> maybeGeoIpTask = tasksMetadata.getTask("geoip-downloader");
+            if (maybeGeoIpTask != null) {
+                GeoIpTaskState geoIpTaskState = (GeoIpTaskState) maybeGeoIpTask.getState();
+                if (geoIpTaskState != null) {
+                    Map<String, GeoIpTaskState.Metadata> databases = geoIpTaskState.getDatabases();
+                    for (String databaseFileName : databases.keySet()) {
+                        String databaseName = getDatabaseNameForFileName(databaseFileName);
+                        String databaseId = getDatabaseIdForFileName(DatabaseConfiguration.Web.NAME, databaseFileName);
+                        if ((Regex.isSimpleMatchPattern(id) && Regex.simpleMatch(id, databaseId)) || id.equals(databaseId)) {
+                            webDatabases.add(
+                                new DatabaseConfigurationMetadata(
+                                    new DatabaseConfiguration(databaseId, databaseName, new DatabaseConfiguration.Web()),
+                                    -1,
+                                    databases.get(databaseFileName).lastUpdate()
+                                )
+                            );
+                        }
                     }
                 }
-            } else {
-                DatabaseConfigurationMetadata meta = geoIpMeta.getDatabases().get(id);
-                if (meta == null) {
-                    throw new ResourceNotFoundException("database configuration not found: {}", id);
-                } else {
-                    results.add(meta);
+            }
+        }
+        return webDatabases;
+    }
+
+    private static String getDatabaseIdForFileName(String providerType, String databaseFileName) {
+        return "_" + providerType + "_" + Base64.getEncoder().encodeToString(databaseFileName.getBytes(StandardCharsets.UTF_8));
+    }
+
+    private static String getDatabaseNameForFileName(String databaseFileName) {
+        return databaseFileName.endsWith(".mmdb")
+            ? databaseFileName.substring(0, databaseFileName.length() + 1 - ".mmmdb".length())
+            : databaseFileName;
+    }
+
+    /*
+     * This returns information about databases that are downloaded from maxmind.
+     */
+    private static Collection<DatabaseConfigurationMetadata> getMaxmindDatabases(ClusterService clusterService, String id) {
+        List<DatabaseConfigurationMetadata> maxmindDatabases = new ArrayList<>();
+        final IngestGeoIpMetadata geoIpMeta = clusterService.state().metadata().custom(IngestGeoIpMetadata.TYPE, IngestGeoIpMetadata.EMPTY);
+        if (Regex.isSimpleMatchPattern(id)) {
+            for (Map.Entry<String, DatabaseConfigurationMetadata> entry : geoIpMeta.getDatabases().entrySet()) {
+                if (Regex.simpleMatch(id, entry.getKey())) {
+                    maxmindDatabases.add(entry.getValue());
                 }
             }
+        } else {
+            DatabaseConfigurationMetadata meta = geoIpMeta.getDatabases().get(id);
+            if (meta != null) {
+                maxmindDatabases.add(meta);
+            }
         }
-        return results;
+        return maxmindDatabases;
     }
 
+    @Override
     protected void newResponseAsync(
         Task task,
         GetDatabaseConfigurationAction.Request request,
@@ -127,13 +199,47 @@ public class TransportGetDatabaseConfigurationAction extends TransportNodesActio
         List<FailedNodeException> failures,
         ActionListener<GetDatabaseConfigurationAction.Response> listener
     ) {
-        ActionListener.run(
-            listener,
-            l -> ActionListener.respondAndRelease(
+        ActionListener.run(listener, l -> {
+            List<DatabaseConfigurationMetadata> combinedResults = new ArrayList<>(results);
+            combinedResults.addAll(
+                deduplicateNodeResponses(responses, results.stream().map(result -> result.database().name()).collect(Collectors.toSet()))
+            );
+            ActionListener.respondAndRelease(
                 l,
-                new GetDatabaseConfigurationAction.Response(results, clusterService.getClusterName(), responses, failures)
+                new GetDatabaseConfigurationAction.Response(combinedResults, clusterService.getClusterName(), responses, failures)
+            );
+        });
+    }
+
+    /*
+     * This deduplicates the nodeResponses by name, favoring the most recent. This is because each node is reporting the local databases
+     * that it has, and we don't want to report duplicates to the user. It also filters out any that already exist in the set of
+     * preExistingNames. This is because the non-local databases take precedence, so any local database with the same name as a non-local
+     * one will not be used.
+     * Non-private for unit testing
+     */
+    static Collection<DatabaseConfigurationMetadata> deduplicateNodeResponses(
+        List<GetDatabaseConfigurationAction.NodeResponse> nodeResponses,
+        Set<String> preExistingNames
+    ) {
+        /*
+         * Each node reports the list of databases that are in its config/ingest-geoip directory. For the sake of this API we assume all
+         * local databases with the same name are the same database, and deduplicate by name and just return the newest.
+         */
+        return nodeResponses.stream()
+            .flatMap(response -> response.getDatabases().stream())
+            .collect(
+                Collectors.groupingBy(
+                    database -> database.database().name(),
+                    Collectors.maxBy(Comparator.comparing(DatabaseConfigurationMetadata::modifiedDate))
+                )
             )
-        );
+            .values()
+            .stream()
+            .filter(Optional::isPresent)
+            .map(Optional::get)
+            .filter(database -> preExistingNames.contains(database.database().name()) == false)
+            .toList();
     }
 
     @Override
@@ -157,7 +263,48 @@ public class TransportGetDatabaseConfigurationAction extends TransportNodesActio
 
     @Override
     protected GetDatabaseConfigurationAction.NodeResponse nodeOperation(GetDatabaseConfigurationAction.NodeRequest request, Task task) {
-        return new GetDatabaseConfigurationAction.NodeResponse(transportService.getLocalNode(), List.of());
+        final Set<String> ids;
+        if (request.getDatabaseIds().length == 0) {
+            // if we did not ask for a specific name, then return all databases
+            ids = Set.of("*");
+        } else {
+            ids = new LinkedHashSet<>(Arrays.asList(request.getDatabaseIds()));
+        }
+        if (ids.size() > 1 && ids.stream().anyMatch(Regex::isSimpleMatchPattern)) {
+            throw new IllegalArgumentException(
+                "wildcard only supports a single value, please use comma-separated values or a single wildcard value"
+            );
+        }
+
+        List<DatabaseConfigurationMetadata> results = new ArrayList<>();
+        for (String id : ids) {
+            results.addAll(getLocalDatabases(databaseNodeService, id));
+        }
+        return new GetDatabaseConfigurationAction.NodeResponse(transportService.getLocalNode(), results);
     }
 
+    /*
+     * This returns information about the databases that users have put in the config/ingest-geoip directory on the node.
+     */
+    private static List<DatabaseConfigurationMetadata> getLocalDatabases(DatabaseNodeService databaseNodeService, String id) {
+        List<DatabaseConfigurationMetadata> localDatabases = new ArrayList<>();
+        Map<String, DatabaseNodeService.ConfigDatabaseDetail> configDatabases = databaseNodeService.getConfigDatabasesDetail();
+        for (DatabaseNodeService.ConfigDatabaseDetail configDatabase : configDatabases.values()) {
+            String databaseId = getDatabaseIdForFileName(DatabaseConfiguration.Local.NAME, configDatabase.name());
+            if ((Regex.isSimpleMatchPattern(id) && Regex.simpleMatch(id, databaseId)) || id.equals(databaseId)) {
+                localDatabases.add(
+                    new DatabaseConfigurationMetadata(
+                        new DatabaseConfiguration(
+                            databaseId,
+                            getDatabaseNameForFileName(configDatabase.name()),
+                            new DatabaseConfiguration.Local(configDatabase.type())
+                        ),
+                        -1,
+                        configDatabase.buildDateInMillis() == null ? -1 : configDatabase.buildDateInMillis()
+                    )
+                );
+            }
+        }
+        return localDatabases;
+    }
 }

+ 7 - 0
modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/IngestGeoIpMetadataTests.java

@@ -9,6 +9,7 @@
 
 package org.elasticsearch.ingest.geoip;
 
+import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
 import org.elasticsearch.common.io.stream.Writeable;
 import org.elasticsearch.ingest.geoip.direct.DatabaseConfiguration;
 import org.elasticsearch.ingest.geoip.direct.DatabaseConfigurationMetadata;
@@ -21,6 +22,12 @@ import java.util.HashMap;
 import java.util.Map;
 
 public class IngestGeoIpMetadataTests extends AbstractChunkedSerializingTestCase<IngestGeoIpMetadata> {
+
+    @Override
+    protected NamedWriteableRegistry getNamedWriteableRegistry() {
+        return new NamedWriteableRegistry(new IngestGeoIpPlugin().getNamedWriteables());
+    }
+
     @Override
     protected IngestGeoIpMetadata doParseInstance(XContentParser parser) throws IOException {
         return IngestGeoIpMetadata.fromXContent(parser);

+ 7 - 0
modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfigurationMetadataTests.java

@@ -9,7 +9,9 @@
 
 package org.elasticsearch.ingest.geoip.direct;
 
+import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
 import org.elasticsearch.common.io.stream.Writeable;
+import org.elasticsearch.ingest.geoip.IngestGeoIpPlugin;
 import org.elasticsearch.test.AbstractXContentSerializingTestCase;
 import org.elasticsearch.test.ESTestCase;
 import org.elasticsearch.xcontent.XContentParser;
@@ -21,6 +23,11 @@ import static org.elasticsearch.ingest.geoip.direct.DatabaseConfigurationTests.r
 
 public class DatabaseConfigurationMetadataTests extends AbstractXContentSerializingTestCase<DatabaseConfigurationMetadata> {
 
+    @Override
+    protected NamedWriteableRegistry getNamedWriteableRegistry() {
+        return new NamedWriteableRegistry(new IngestGeoIpPlugin().getNamedWriteables());
+    }
+
     private String id;
 
     @Override

+ 30 - 8
modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfigurationTests.java

@@ -9,8 +9,12 @@
 
 package org.elasticsearch.ingest.geoip.direct;
 
+import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
 import org.elasticsearch.common.io.stream.Writeable;
+import org.elasticsearch.ingest.geoip.IngestGeoIpPlugin;
+import org.elasticsearch.ingest.geoip.direct.DatabaseConfiguration.Local;
 import org.elasticsearch.ingest.geoip.direct.DatabaseConfiguration.Maxmind;
+import org.elasticsearch.ingest.geoip.direct.DatabaseConfiguration.Web;
 import org.elasticsearch.test.AbstractXContentSerializingTestCase;
 import org.elasticsearch.xcontent.XContentParser;
 
@@ -21,6 +25,11 @@ import static org.elasticsearch.ingest.geoip.direct.DatabaseConfiguration.MAXMIN
 
 public class DatabaseConfigurationTests extends AbstractXContentSerializingTestCase<DatabaseConfiguration> {
 
+    @Override
+    protected NamedWriteableRegistry getNamedWriteableRegistry() {
+        return new NamedWriteableRegistry(new IngestGeoIpPlugin().getNamedWriteables());
+    }
+
     private String id;
 
     @Override
@@ -35,26 +44,39 @@ public class DatabaseConfigurationTests extends AbstractXContentSerializingTestC
     }
 
     public static DatabaseConfiguration randomDatabaseConfiguration(String id) {
-        return new DatabaseConfiguration(id, randomFrom(MAXMIND_NAMES), new Maxmind(randomAlphaOfLength(5)));
+        DatabaseConfiguration.Provider provider = switch (between(0, 2)) {
+            case 0 -> new Maxmind(randomAlphaOfLength(5));
+            case 1 -> new Web();
+            case 2 -> new Local(randomAlphaOfLength(10));
+            default -> throw new AssertionError("failure, got illegal switch case");
+        };
+        return new DatabaseConfiguration(id, randomFrom(MAXMIND_NAMES), provider);
     }
 
     @Override
     protected DatabaseConfiguration mutateInstance(DatabaseConfiguration instance) {
         switch (between(0, 2)) {
             case 0:
-                return new DatabaseConfiguration(instance.id() + randomAlphaOfLength(2), instance.name(), instance.maxmind());
+                return new DatabaseConfiguration(instance.id() + randomAlphaOfLength(2), instance.name(), instance.provider());
             case 1:
                 return new DatabaseConfiguration(
                     instance.id(),
                     randomValueOtherThan(instance.name(), () -> randomFrom(MAXMIND_NAMES)),
-                    instance.maxmind()
+                    instance.provider()
                 );
             case 2:
-                return new DatabaseConfiguration(
-                    instance.id(),
-                    instance.name(),
-                    new Maxmind(instance.maxmind().accountId() + randomAlphaOfLength(2))
-                );
+                DatabaseConfiguration.Provider provider = instance.provider();
+                DatabaseConfiguration.Provider modifiedProvider;
+                if (provider instanceof Maxmind maxmind) {
+                    modifiedProvider = new Maxmind(((Maxmind) instance.provider()).accountId() + randomAlphaOfLength(2));
+                } else if (provider instanceof Web) {
+                    modifiedProvider = new Maxmind(randomAlphaOfLength(20)); // can't modify a Web
+                } else if (provider instanceof Local local) {
+                    modifiedProvider = new Local(local.type() + randomAlphaOfLength(2));
+                } else {
+                    throw new AssertionError("Unexpected provider type: " + provider.getClass());
+                }
+                return new DatabaseConfiguration(instance.id(), instance.name(), modifiedProvider);
             default:
                 throw new AssertionError("failure, got illegal switch case");
         }

+ 131 - 0
modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/TransportGetDatabaseConfigurationActionTests.java

@@ -0,0 +1,131 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the "Elastic License
+ * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
+ * Public License v 1"; you may not use this file except in compliance with, at
+ * your election, the "Elastic License 2.0", the "GNU Affero General Public
+ * License v3.0 only", or the "Server Side Public License, v 1".
+ */
+
+package org.elasticsearch.ingest.geoip.direct;
+
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.test.ESTestCase;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import static org.elasticsearch.ingest.geoip.direct.GetDatabaseConfigurationAction.NodeResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.mockito.Mockito.mock;
+
+public class TransportGetDatabaseConfigurationActionTests extends ESTestCase {
+    public void testDeduplicateNodeResponses() {
+        {
+            List<NodeResponse> nodeResponses = new ArrayList<>();
+            Set<String> preExistingNames = Set.of();
+            Collection<DatabaseConfigurationMetadata> deduplicated = TransportGetDatabaseConfigurationAction.deduplicateNodeResponses(
+                nodeResponses,
+                preExistingNames
+            );
+            assertTrue(deduplicated.isEmpty());
+        }
+        {
+            List<NodeResponse> nodeResponses = List.of(
+                generateTestNodeResponse(List.of()),
+                generateTestNodeResponse(List.of()),
+                generateTestNodeResponse(List.of())
+            );
+            Set<String> preExistingNames = Set.of();
+            Collection<DatabaseConfigurationMetadata> deduplicated = TransportGetDatabaseConfigurationAction.deduplicateNodeResponses(
+                nodeResponses,
+                preExistingNames
+            );
+            assertTrue(deduplicated.isEmpty());
+        }
+        {
+            // 3 nodes with 3 overlapping responses. We expect the deduplicated collection to include 1, 2, 3, and 4.
+            List<NodeResponse> nodeResponses = List.of(
+                generateTestNodeResponse(List.of("1", "2", "3")),
+                generateTestNodeResponse(List.of("1", "2", "3")),
+                generateTestNodeResponse(List.of("1", "4"))
+            );
+            Set<String> preExistingNames = Set.of();
+            Collection<DatabaseConfigurationMetadata> deduplicated = TransportGetDatabaseConfigurationAction.deduplicateNodeResponses(
+                nodeResponses,
+                preExistingNames
+            );
+            assertThat(deduplicated.size(), equalTo(4));
+            assertThat(
+                deduplicated.stream().map(database -> database.database().name()).collect(Collectors.toSet()),
+                equalTo(Set.of("1", "2", "3", "4"))
+            );
+        }
+        {
+            /*
+             * 3 nodes with 3 overlapping responses, but this time we're also passing in a set of pre-existing names that overlap with
+             * two of them. So we expect the deduplicated collection to include 1 and 4.
+             */
+            List<NodeResponse> nodeResponses = List.of(
+                generateTestNodeResponse(List.of("1", "2", "3")),
+                generateTestNodeResponse(List.of("1", "2", "3")),
+                generateTestNodeResponse(List.of("1", "4"))
+            );
+            Set<String> preExistingNames = Set.of("2", "3", "5");
+            Collection<DatabaseConfigurationMetadata> deduplicated = TransportGetDatabaseConfigurationAction.deduplicateNodeResponses(
+                nodeResponses,
+                preExistingNames
+            );
+            assertThat(deduplicated.size(), equalTo(2));
+            assertThat(
+                deduplicated.stream().map(database -> database.database().name()).collect(Collectors.toSet()),
+                equalTo(Set.of("1", "4"))
+            );
+        }
+        {
+            /*
+             * Here 3 nodes report the same database, but with different modified dates and versions. We expect the one with the highest
+             * modified date to win out.
+             */
+            List<NodeResponse> nodeResponses = List.of(
+                generateTestNodeResponseFromDatabases(List.of(generateTestDatabase("1", 1))),
+                generateTestNodeResponseFromDatabases(List.of(generateTestDatabase("1", 1000))),
+                generateTestNodeResponseFromDatabases(List.of(generateTestDatabase("1", 3)))
+            );
+            Set<String> preExistingNames = Set.of("2", "3", "5");
+            Collection<DatabaseConfigurationMetadata> deduplicated = TransportGetDatabaseConfigurationAction.deduplicateNodeResponses(
+                nodeResponses,
+                preExistingNames
+            );
+            assertThat(deduplicated.size(), equalTo(1));
+            DatabaseConfigurationMetadata result = deduplicated.iterator().next();
+            assertThat(result, equalTo(nodeResponses.get(1).getDatabases().get(0)));
+        }
+    }
+
+    private NodeResponse generateTestNodeResponse(List<String> databaseNames) {
+        List<DatabaseConfigurationMetadata> databases = databaseNames.stream().map(this::generateTestDatabase).toList();
+        return generateTestNodeResponseFromDatabases(databases);
+    }
+
+    private NodeResponse generateTestNodeResponseFromDatabases(List<DatabaseConfigurationMetadata> databases) {
+        DiscoveryNode discoveryNode = mock(DiscoveryNode.class);
+        return new NodeResponse(discoveryNode, databases);
+    }
+
+    private DatabaseConfigurationMetadata generateTestDatabase(String databaseName) {
+        return generateTestDatabase(databaseName, randomLongBetween(0, Long.MAX_VALUE));
+    }
+
+    private DatabaseConfigurationMetadata generateTestDatabase(String databaseName, long modifiedDate) {
+        DatabaseConfiguration databaseConfiguration = new DatabaseConfiguration(
+            randomAlphaOfLength(50),
+            databaseName,
+            new DatabaseConfiguration.Local(randomAlphaOfLength(20))
+        );
+        return new DatabaseConfigurationMetadata(databaseConfiguration, randomLongBetween(0, Long.MAX_VALUE), modifiedDate);
+    }
+}

+ 32 - 3
modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/40_geoip_databases.yml

@@ -1,7 +1,7 @@
 setup:
   - requires:
-      cluster_features: ["geoip.downloader.database.configuration"]
-      reason: "geoip downloader database configuration APIs added in 8.15"
+      cluster_features: ["geoip.downloader.database.configuration", "get_database_configuration_action.multi_node"]
+      reason: "geoip downloader database configuration APIs added in 8.15, and updated in 8.16 to return more results"
 
 ---
 "Test adding, getting, and removing geoip databases":
@@ -41,6 +41,17 @@ setup:
           }
   - match: { acknowledged: true }
 
+  - do:
+      catch: /illegal_argument_exception/
+      ingest.put_geoip_database:
+        id: "_web_TXlDdXN0b21HZW9MaXRlMi1DaXR5Lm1tZGI="
+        body:  >
+          {
+            "name": "GeoIP2-City",
+            "web": {
+            }
+          }
+
   - do:
       ingest.get_geoip_database:
         id: "my_database_1"
@@ -52,19 +63,37 @@ setup:
 
   - do:
       ingest.get_geoip_database: {}
-  - length: { databases: 2 }
+  - length: { databases: 6 }
 
   - do:
       ingest.get_geoip_database:
         id: "my_database_1,my_database_2"
   - length: { databases: 2 }
 
+  - do:
+      ingest.get_geoip_database:
+        id: "_web_TXlDdXN0b21HZW9MaXRlMi1DaXR5Lm1tZGI="
+  - length: { databases: 1 }
+  - match: { databases.0.id: "_web_TXlDdXN0b21HZW9MaXRlMi1DaXR5Lm1tZGI=" }
+  - gte: { databases.0.modified_date_millis: -1 }
+  - match: { databases.0.database.name: "MyCustomGeoLite2-City" }
+
   - do:
       ingest.delete_geoip_database:
         id: "my_database_1"
 
+  - do:
+      catch: /resource_not_found_exception/
+      ingest.delete_geoip_database:
+        id: "_web_TXlDdXN0b21HZW9MaXRlMi1DaXR5Lm1tZGI="
+
   - do:
       ingest.get_geoip_database: {}
+  - length: { databases: 5 }
+
+  - do:
+      ingest.get_geoip_database:
+        id: "my_database_2"
   - length: { databases: 1 }
   - match: { databases.0.id: "my_database_2" }
   - gte: { databases.0.modified_date_millis: 0 }

+ 1 - 0
server/src/main/java/org/elasticsearch/TransportVersions.java

@@ -233,6 +233,7 @@ public class TransportVersions {
     public static final TransportVersion REGEX_AND_RANGE_INTERVAL_QUERIES = def(8_757_00_0);
     public static final TransportVersion RRF_QUERY_REWRITE = def(8_758_00_0);
     public static final TransportVersion SEARCH_FAILURE_STATS = def(8_759_00_0);
+    public static final TransportVersion INGEST_GEO_DATABASE_PROVIDERS = def(8_760_00_0);
 
     /*
      * STOP! READ THIS FIRST! No, really,