Ver Fonte

Directly download commercial ip geolocation databases from providers (#110844)

Co-authored-by: Keith Massey <keith.massey@elastic.co>
Joe Gallo há 1 ano atrás
pai
commit
27e7601698
52 ficheiros alterados com 4429 adições e 54 exclusões
  1. 5 0
      docs/changelog/110844.yaml
  2. 1 1
      docs/reference/security/authorization/privileges.asciidoc
  3. 194 0
      modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderIT.java
  4. 15 15
      modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java
  5. 1 0
      modules/ingest-geoip/src/main/java/module-info.java
  6. 51 18
      modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseNodeService.java
  7. 474 0
      modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloader.java
  8. 257 0
      modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderTaskExecutor.java
  9. 153 0
      modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpTaskState.java
  10. 6 4
      modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java
  11. 1 1
      modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java
  12. 34 5
      modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskState.java
  13. 26 0
      modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/HttpClient.java
  14. 157 0
      modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpMetadata.java
  15. 68 7
      modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java
  16. 209 0
      modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfiguration.java
  17. 84 0
      modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfigurationMetadata.java
  18. 70 0
      modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DeleteDatabaseConfigurationAction.java
  19. 142 0
      modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/GetDatabaseConfigurationAction.java
  20. 87 0
      modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/PutDatabaseConfigurationAction.java
  21. 46 0
      modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/RestDeleteDatabaseConfigurationAction.java
  22. 47 0
      modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/RestGetDatabaseConfigurationAction.java
  23. 52 0
      modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/RestPutDatabaseConfigurationAction.java
  24. 128 0
      modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportDeleteDatabaseConfigurationAction.java
  25. 109 0
      modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportGetDatabaseConfigurationAction.java
  26. 178 0
      modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportPutDatabaseConfigurationAction.java
  27. 538 0
      modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderTests.java
  28. 72 0
      modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpTaskStateSerializationTests.java
  29. 49 0
      modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java
  30. 91 0
      modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/IngestGeoIpMetadataTests.java
  31. 74 0
      modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfigurationMetadataTests.java
  32. 86 0
      modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfigurationTests.java
  33. 69 0
      modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/TransportPutDatabaseConfigurationActionTests.java
  34. 5 0
      modules/ingest-geoip/src/yamlRestTest/java/org/elasticsearch/ingest/geoip/IngestGeoIpClientYamlTestSuiteIT.java
  35. 72 0
      modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/40_geoip_databases.yml
  36. 31 0
      rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_geoip_database.json
  37. 37 0
      rest-api-spec/src/main/resources/rest-api-spec/api/ingest.get_geoip_database.json
  38. 35 0
      rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_geoip_database.json
  39. 2 0
      server/src/main/java/module-info.java
  40. 1 0
      server/src/main/java/org/elasticsearch/TransportVersions.java
  41. 86 0
      server/src/main/java/org/elasticsearch/ingest/EnterpriseGeoIpTask.java
  42. 22 0
      server/src/main/java/org/elasticsearch/ingest/IngestGeoIpFeatures.java
  43. 1 0
      server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification
  44. 3 3
      server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java
  45. 125 0
      test/fixtures/geoip-fixture/src/main/java/fixture/geoip/EnterpriseGeoIpHttpFixture.java
  46. BIN
      test/fixtures/geoip-fixture/src/main/resources/geoip-fixture/GeoIP2-City.tgz
  47. 1 0
      x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java
  48. 19 0
      x-pack/plugin/geoip-enterprise-downloader/build.gradle
  49. 48 0
      x-pack/plugin/geoip-enterprise-downloader/src/main/java/org/elasticsearch/xpack/geoip/EnterpriseDownloaderPlugin.java
  50. 145 0
      x-pack/plugin/geoip-enterprise-downloader/src/main/java/org/elasticsearch/xpack/geoip/EnterpriseGeoIpDownloaderLicenseListener.java
  51. 219 0
      x-pack/plugin/geoip-enterprise-downloader/src/test/java/org/elasticsearch/xpack/geoip/EnterpriseGeoIpDownloaderLicenseListenerTests.java
  52. 3 0
      x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java

+ 5 - 0
docs/changelog/110844.yaml

@@ -0,0 +1,5 @@
+pr: 110844
+summary: Directly download commercial ip geolocation databases from providers
+area: Ingest Node
+type: feature
+issues: []

+ 1 - 1
docs/reference/security/authorization/privileges.asciidoc

@@ -282,7 +282,7 @@ status of {Ilm}
 This privilege is not available in {serverless-full}.
 
 `read_pipeline`::
-Read-only access to ingest pipline (get, simulate).
+Read-only access to ingest pipeline (get, simulate).
 
 `read_slm`::
 All read-only {slm-init} actions, such as getting policies and checking the

+ 194 - 0
modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderIT.java

@@ -0,0 +1,194 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.ingest.geoip;
+
+import fixture.geoip.EnterpriseGeoIpHttpFixture;
+
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.ResourceAlreadyExistsException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.bulk.BulkItemResponse;
+import org.elasticsearch.action.bulk.BulkRequest;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.get.GetRequest;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.ingest.PutPipelineRequest;
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.settings.MockSecureSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.CollectionUtils;
+import org.elasticsearch.core.Booleans;
+import org.elasticsearch.core.TimeValue;
+import org.elasticsearch.ingest.EnterpriseGeoIpTask;
+import org.elasticsearch.ingest.geoip.direct.DatabaseConfiguration;
+import org.elasticsearch.ingest.geoip.direct.PutDatabaseConfigurationAction;
+import org.elasticsearch.persistent.PersistentTasksService;
+import org.elasticsearch.plugins.Plugin;
+import org.elasticsearch.reindex.ReindexPlugin;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.test.ESIntegTestCase;
+import org.elasticsearch.transport.RemoteTransportException;
+import org.elasticsearch.xcontent.XContentBuilder;
+import org.elasticsearch.xcontent.XContentType;
+import org.elasticsearch.xcontent.json.JsonXContent;
+import org.junit.ClassRule;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Map;
+
+import static org.elasticsearch.ingest.EnterpriseGeoIpTask.ENTERPRISE_GEOIP_DOWNLOADER;
+import static org.elasticsearch.ingest.geoip.EnterpriseGeoIpDownloaderTaskExecutor.MAXMIND_LICENSE_KEY_SETTING;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.equalTo;
+
+public class EnterpriseGeoIpDownloaderIT extends ESIntegTestCase {
+
+    private static final String DATABASE_TYPE = "GeoIP2-City";
+    private static final boolean useFixture = Booleans.parseBoolean(System.getProperty("geoip_use_service", "false")) == false;
+
+    @ClassRule
+    public static final EnterpriseGeoIpHttpFixture fixture = new EnterpriseGeoIpHttpFixture(useFixture, DATABASE_TYPE);
+
+    protected String getEndpoint() {
+        return useFixture ? fixture.getAddress() : null;
+    }
+
+    @Override
+    protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
+        MockSecureSettings secureSettings = new MockSecureSettings();
+        secureSettings.setString(MAXMIND_LICENSE_KEY_SETTING.getKey(), "license_key");
+        Settings.Builder builder = Settings.builder();
+        builder.setSecureSettings(secureSettings)
+            .put(super.nodeSettings(nodeOrdinal, otherSettings))
+            .put(GeoIpDownloaderTaskExecutor.ENABLED_SETTING.getKey(), true);
+        if (getEndpoint() != null) {
+            // note: this is using the enterprise fixture for the regular downloader, too, as
+            // a slightly hacky way of making the regular downloader not actually download any files
+            builder.put(GeoIpDownloader.ENDPOINT_SETTING.getKey(), getEndpoint());
+        }
+        return builder.build();
+    }
+
+    @SuppressWarnings("unchecked")
+    protected Collection<Class<? extends Plugin>> nodePlugins() {
+        // the reindex plugin is (somewhat surprisingly) necessary in order to be able to delete-by-query,
+        // which modules/ingest-geoip does to delete old chunks
+        return CollectionUtils.appendToCopyNoNullElements(super.nodePlugins(), IngestGeoIpPlugin.class, ReindexPlugin.class);
+    }
+
+    @SuppressWarnings("unchecked")
+    public void testEnterpriseDownloaderTask() throws Exception {
+        /*
+         * This test starts the enterprise geoip downloader task, and creates a database configuration. Then it creates an ingest
+         * pipeline that references that database, and ingests a single document using that pipeline. It then asserts that the document
+         * was updated with information from the database.
+         * Note that the "enterprise database" is actually just a geolite database being loaded by the GeoIpHttpFixture.
+         */
+        if (getEndpoint() != null) {
+            EnterpriseGeoIpDownloader.DEFAULT_MAXMIND_ENDPOINT = getEndpoint();
+        }
+        final String pipelineName = "enterprise_geoip_pipeline";
+        final String indexName = "enterprise_geoip_test_index";
+        final String sourceField = "ip";
+        final String targetField = "ip-city";
+
+        startEnterpriseGeoIpDownloaderTask();
+        configureDatabase(DATABASE_TYPE);
+        createGeoIpPipeline(pipelineName, DATABASE_TYPE, sourceField, targetField);
+        String documentId = ingestDocument(indexName, pipelineName, sourceField);
+        GetResponse getResponse = client().get(new GetRequest(indexName, documentId)).actionGet();
+        Map<String, Object> returnedSource = getResponse.getSource();
+        assertNotNull(returnedSource);
+        Object targetFieldValue = returnedSource.get(targetField);
+        assertNotNull(targetFieldValue);
+        assertThat(((Map<String, Object>) targetFieldValue).get("organization_name"), equalTo("Bredband2 AB"));
+    }
+
+    private void startEnterpriseGeoIpDownloaderTask() {
+        PersistentTasksService persistentTasksService = internalCluster().getInstance(PersistentTasksService.class);
+        persistentTasksService.sendStartRequest(
+            ENTERPRISE_GEOIP_DOWNLOADER,
+            ENTERPRISE_GEOIP_DOWNLOADER,
+            new EnterpriseGeoIpTask.EnterpriseGeoIpTaskParams(),
+            TimeValue.MAX_VALUE,
+            ActionListener.wrap(r -> logger.debug("Started enterprise geoip downloader task"), e -> {
+                Throwable t = e instanceof RemoteTransportException ? ExceptionsHelper.unwrapCause(e) : e;
+                if (t instanceof ResourceAlreadyExistsException == false) {
+                    logger.error("failed to create enterprise geoip downloader task", e);
+                }
+            })
+        );
+    }
+
+    private void configureDatabase(String databaseType) throws Exception {
+        admin().cluster()
+            .execute(
+                PutDatabaseConfigurationAction.INSTANCE,
+                new PutDatabaseConfigurationAction.Request(
+                    TimeValue.MAX_VALUE,
+                    TimeValue.MAX_VALUE,
+                    new DatabaseConfiguration("test", databaseType, new DatabaseConfiguration.Maxmind("test_account"))
+                )
+            )
+            .actionGet();
+        ensureGreen(GeoIpDownloader.DATABASES_INDEX);
+        assertBusy(() -> {
+            SearchResponse searchResponse = client().search(new SearchRequest(GeoIpDownloader.DATABASES_INDEX)).actionGet();
+            try {
+                assertThat(searchResponse.getHits().getHits().length, equalTo(1));
+            } finally {
+                searchResponse.decRef();
+            }
+        });
+    }
+
+    private void createGeoIpPipeline(String pipelineName, String databaseType, String sourceField, String targetField) throws IOException {
+        final BytesReference bytes;
+        try (XContentBuilder builder = JsonXContent.contentBuilder()) {
+            builder.startObject();
+            {
+                builder.field("description", "test");
+                builder.startArray("processors");
+                {
+                    builder.startObject();
+                    {
+                        builder.startObject("geoip");
+                        {
+                            builder.field("field", sourceField);
+                            builder.field("target_field", targetField);
+                            builder.field("database_file", databaseType + ".mmdb");
+                        }
+                        builder.endObject();
+                    }
+                    builder.endObject();
+                }
+                builder.endArray();
+            }
+            builder.endObject();
+            bytes = BytesReference.bytes(builder);
+        }
+        assertAcked(clusterAdmin().putPipeline(new PutPipelineRequest(pipelineName, bytes, XContentType.JSON)).actionGet());
+    }
+
+    private String ingestDocument(String indexName, String pipelineName, String sourceField) {
+        BulkRequest bulkRequest = new BulkRequest();
+        bulkRequest.add(
+            new IndexRequest(indexName).source("{\"" + sourceField + "\": \"89.160.20.128\"}", XContentType.JSON).setPipeline(pipelineName)
+        );
+        BulkResponse response = client().bulk(bulkRequest).actionGet();
+        BulkItemResponse[] bulkItemResponses = response.getItems();
+        assertThat(bulkItemResponses.length, equalTo(1));
+        assertThat(bulkItemResponses[0].status(), equalTo(RestStatus.CREATED));
+        return bulkItemResponses[0].getId();
+    }
+}

+ 15 - 15
modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java

@@ -152,9 +152,9 @@ public class GeoIpDownloaderIT extends AbstractGeoIpIT {
         updateClusterSettings(Settings.builder().put(GeoIpDownloaderTaskExecutor.ENABLED_SETTING.getKey(), true));
         assertBusy(() -> {
             GeoIpTaskState state = getGeoIpTaskState();
-            assertEquals(
-                Set.of("GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb", "MyCustomGeoLite2-City.mmdb"),
-                state.getDatabases().keySet()
+            assertThat(
+                state.getDatabases().keySet(),
+                containsInAnyOrder("GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb", "MyCustomGeoLite2-City.mmdb")
             );
         }, 2, TimeUnit.MINUTES);
 
@@ -227,9 +227,9 @@ public class GeoIpDownloaderIT extends AbstractGeoIpIT {
         updateClusterSettings(Settings.builder().put(GeoIpDownloaderTaskExecutor.ENABLED_SETTING.getKey(), true));
         assertBusy(() -> {
             GeoIpTaskState state = getGeoIpTaskState();
-            assertEquals(
-                Set.of("GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb", "MyCustomGeoLite2-City.mmdb"),
-                state.getDatabases().keySet()
+            assertThat(
+                state.getDatabases().keySet(),
+                containsInAnyOrder("GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb", "MyCustomGeoLite2-City.mmdb")
             );
             putGeoIpPipeline(); // This is to work around the race condition described in #92888
         }, 2, TimeUnit.MINUTES);
@@ -238,9 +238,9 @@ public class GeoIpDownloaderIT extends AbstractGeoIpIT {
             assertBusy(() -> {
                 try {
                     GeoIpTaskState state = (GeoIpTaskState) getTask().getState();
-                    assertEquals(
-                        Set.of("GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb", "MyCustomGeoLite2-City.mmdb"),
-                        state.getDatabases().keySet()
+                    assertThat(
+                        state.getDatabases().keySet(),
+                        containsInAnyOrder("GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb", "MyCustomGeoLite2-City.mmdb")
                     );
                     GeoIpTaskState.Metadata metadata = state.getDatabases().get(id);
                     int size = metadata.lastChunk() - metadata.firstChunk() + 1;
@@ -301,9 +301,9 @@ public class GeoIpDownloaderIT extends AbstractGeoIpIT {
         assertNotNull(getTask().getState()); // removing all geoip processors should not result in the task being stopped
         assertBusy(() -> {
             GeoIpTaskState state = getGeoIpTaskState();
-            assertEquals(
-                Set.of("GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb", "MyCustomGeoLite2-City.mmdb"),
-                state.getDatabases().keySet()
+            assertThat(
+                state.getDatabases().keySet(),
+                containsInAnyOrder("GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb", "MyCustomGeoLite2-City.mmdb")
             );
         });
     }
@@ -337,9 +337,9 @@ public class GeoIpDownloaderIT extends AbstractGeoIpIT {
         assertAcked(indicesAdmin().prepareUpdateSettings(indexIdentifier).setSettings(indexSettings).get());
         assertBusy(() -> {
             GeoIpTaskState state = getGeoIpTaskState();
-            assertEquals(
-                Set.of("GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb", "MyCustomGeoLite2-City.mmdb"),
-                state.getDatabases().keySet()
+            assertThat(
+                state.getDatabases().keySet(),
+                containsInAnyOrder("GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb", "MyCustomGeoLite2-City.mmdb")
             );
         }, 2, TimeUnit.MINUTES);
 

+ 1 - 0
modules/ingest-geoip/src/main/java/module-info.java

@@ -15,5 +15,6 @@ module org.elasticsearch.ingest.geoip {
     requires com.maxmind.geoip2;
     requires com.maxmind.db;
 
+    exports org.elasticsearch.ingest.geoip.direct to org.elasticsearch.server;
     exports org.elasticsearch.ingest.geoip.stats to org.elasticsearch.server;
 }

+ 51 - 18
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseNodeService.java

@@ -24,6 +24,7 @@ import org.elasticsearch.common.logging.HeaderWarning;
 import org.elasticsearch.core.CheckedConsumer;
 import org.elasticsearch.core.CheckedRunnable;
 import org.elasticsearch.core.IOUtils;
+import org.elasticsearch.core.Tuple;
 import org.elasticsearch.env.Environment;
 import org.elasticsearch.gateway.GatewayService;
 import org.elasticsearch.index.Index;
@@ -52,7 +53,6 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
 import java.util.Locale;
-import java.util.Map;
 import java.util.Objects;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
@@ -64,6 +64,7 @@ import java.util.stream.Stream;
 import java.util.zip.GZIPInputStream;
 
 import static org.elasticsearch.core.Strings.format;
+import static org.elasticsearch.ingest.geoip.EnterpriseGeoIpTaskState.getEnterpriseGeoIpTaskState;
 import static org.elasticsearch.ingest.geoip.GeoIpTaskState.getGeoIpTaskState;
 
 /**
@@ -183,13 +184,14 @@ public final class DatabaseNodeService implements GeoIpDatabaseProvider, Closeab
         if (state == null) {
             return true;
         }
+
         GeoIpTaskState.Metadata metadata = state.getDatabases().get(databaseFile);
         // we never remove metadata from cluster state, if metadata is null we deal with built-in database, which is always valid
         if (metadata == null) {
             return true;
         }
 
-        boolean valid = metadata.isValid(currentState.metadata().settings());
+        boolean valid = metadata.isNewEnough(currentState.metadata().settings());
         if (valid && metadata.isCloseToExpiration()) {
             HeaderWarning.addWarning(
                 "database [{}] was not updated for over 25 days, geoip processor will stop working if there is no update for 30 days",
@@ -269,20 +271,52 @@ public final class DatabaseNodeService implements GeoIpDatabaseProvider, Closeab
             }
         }
 
-        GeoIpTaskState taskState = getGeoIpTaskState(state);
-        if (taskState == null) {
-            // Note: an empty state will purge stale entries in databases map
-            taskState = GeoIpTaskState.EMPTY;
+        // we'll consult each of the geoip downloaders to build up a list of database metadatas to work with
+        List<Tuple<String, GeoIpTaskState.Metadata>> validMetadatas = new ArrayList<>();
+
+        // process the geoip task state for the (ordinary) geoip downloader
+        {
+            GeoIpTaskState taskState = getGeoIpTaskState(state);
+            if (taskState == null) {
+                // Note: an empty state will purge stale entries in databases map
+                taskState = GeoIpTaskState.EMPTY;
+            }
+            validMetadatas.addAll(
+                taskState.getDatabases()
+                    .entrySet()
+                    .stream()
+                    .filter(e -> e.getValue().isNewEnough(state.getMetadata().settings()))
+                    .map(entry -> Tuple.tuple(entry.getKey(), entry.getValue()))
+                    .toList()
+            );
+        }
+
+        // process the geoip task state for the enterprise geoip downloader
+        {
+            EnterpriseGeoIpTaskState taskState = getEnterpriseGeoIpTaskState(state);
+            if (taskState == null) {
+                // Note: an empty state will purge stale entries in databases map
+                taskState = EnterpriseGeoIpTaskState.EMPTY;
+            }
+            validMetadatas.addAll(
+                taskState.getDatabases()
+                    .entrySet()
+                    .stream()
+                    .filter(e -> e.getValue().isNewEnough(state.getMetadata().settings()))
+                    .map(entry -> Tuple.tuple(entry.getKey(), entry.getValue()))
+                    .toList()
+            );
         }
 
-        taskState.getDatabases().entrySet().stream().filter(e -> e.getValue().isValid(state.getMetadata().settings())).forEach(e -> {
-            String name = e.getKey();
-            GeoIpTaskState.Metadata metadata = e.getValue();
+        // run through all the valid metadatas, regardless of source, and retrieve them
+        validMetadatas.forEach(e -> {
+            String name = e.v1();
+            GeoIpTaskState.Metadata metadata = e.v2();
             DatabaseReaderLazyLoader reference = databases.get(name);
             String remoteMd5 = metadata.md5();
             String localMd5 = reference != null ? reference.getMd5() : null;
             if (Objects.equals(localMd5, remoteMd5)) {
-                logger.debug("Current reference of [{}] is up to date [{}] with was recorded in CS [{}]", name, localMd5, remoteMd5);
+                logger.debug("[{}] is up to date [{}] with cluster state [{}]", name, localMd5, remoteMd5);
                 return;
             }
 
@@ -293,15 +327,14 @@ public final class DatabaseNodeService implements GeoIpDatabaseProvider, Closeab
             }
         });
 
+        // TODO perhaps we need to handle the license flap persistent task state better than we do
+        // i think the ideal end state is that we *do not* drop the files that the enterprise downloader
+        // handled if they fall out -- which means we need to track that in the databases map itself
+
+        // start with the list of all databases we currently know about in this service,
+        // then drop the ones that didn't check out as valid from the task states
         List<String> staleEntries = new ArrayList<>(databases.keySet());
-        staleEntries.removeAll(
-            taskState.getDatabases()
-                .entrySet()
-                .stream()
-                .filter(e -> e.getValue().isValid(state.getMetadata().settings()))
-                .map(Map.Entry::getKey)
-                .collect(Collectors.toSet())
-        );
+        staleEntries.removeAll(validMetadatas.stream().map(Tuple::v1).collect(Collectors.toSet()));
         removeStaleEntries(staleEntries);
     }
 

+ 474 - 0
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloader.java

@@ -0,0 +1,474 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.ingest.geoip;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.indices.flush.FlushRequest;
+import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.support.PlainActionFuture;
+import org.elasticsearch.client.internal.Client;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.service.ClusterService;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.hash.MessageDigests;
+import org.elasticsearch.core.Nullable;
+import org.elasticsearch.core.TimeValue;
+import org.elasticsearch.core.Tuple;
+import org.elasticsearch.index.query.BoolQueryBuilder;
+import org.elasticsearch.index.query.MatchQueryBuilder;
+import org.elasticsearch.index.query.RangeQueryBuilder;
+import org.elasticsearch.index.reindex.DeleteByQueryAction;
+import org.elasticsearch.index.reindex.DeleteByQueryRequest;
+import org.elasticsearch.ingest.geoip.GeoIpTaskState.Metadata;
+import org.elasticsearch.ingest.geoip.direct.DatabaseConfiguration;
+import org.elasticsearch.ingest.geoip.direct.DatabaseConfigurationMetadata;
+import org.elasticsearch.persistent.AllocatedPersistentTask;
+import org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask;
+import org.elasticsearch.tasks.TaskId;
+import org.elasticsearch.threadpool.Scheduler;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.xcontent.XContentType;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.PasswordAuthentication;
+import java.nio.charset.StandardCharsets;
+import java.security.MessageDigest;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Set;
+import java.util.function.Function;
+import java.util.function.Supplier;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+
+import static org.elasticsearch.ingest.geoip.EnterpriseGeoIpDownloaderTaskExecutor.MAXMIND_SETTINGS_PREFIX;
+
+/**
+ * Main component responsible for downloading new GeoIP databases.
+ * New databases are downloaded in chunks and stored in .geoip_databases index
+ * Downloads are verified against MD5 checksum provided by the server
+ * Current state of all stored databases is stored in cluster state in persistent task state
+ */
+public class EnterpriseGeoIpDownloader extends AllocatedPersistentTask {
+
+    private static final Logger logger = LogManager.getLogger(EnterpriseGeoIpDownloader.class);
+    private static final Pattern CHECKSUM_PATTERN = Pattern.compile("(\\w{64})\\s\\s(.*)");
+
+    // for overriding in tests
+    static String DEFAULT_MAXMIND_ENDPOINT = System.getProperty(
+        MAXMIND_SETTINGS_PREFIX + "endpoint.default",
+        "https://download.maxmind.com/geoip/databases"
+    );
+    // n.b. a future enhancement might be to allow for a MAXMIND_ENDPOINT_SETTING, but
+    // at the moment this is an unsupported system property for use in tests (only)
+
+    static String downloadUrl(final String name, final String suffix) {
+        String endpointPattern = DEFAULT_MAXMIND_ENDPOINT;
+        if (endpointPattern.contains("%")) {
+            throw new IllegalArgumentException("Invalid endpoint [" + endpointPattern + "]");
+        }
+        if (endpointPattern.endsWith("/") == false) {
+            endpointPattern += "/";
+        }
+        endpointPattern += "%s/download?suffix=%s";
+
+        // at this point the pattern looks like this (in the default case):
+        // https://download.maxmind.com/geoip/databases/%s/download?suffix=%s
+
+        return Strings.format(endpointPattern, name, suffix);
+    }
+
+    static final String DATABASES_INDEX = ".geoip_databases";
+    static final int MAX_CHUNK_SIZE = 1024 * 1024;
+
+    private final Client client;
+    private final HttpClient httpClient;
+    private final ClusterService clusterService;
+    private final ThreadPool threadPool;
+
+    // visible for testing
+    protected volatile EnterpriseGeoIpTaskState state;
+    private volatile Scheduler.ScheduledCancellable scheduled;
+    private final Supplier<TimeValue> pollIntervalSupplier;
+    private final Function<String, HttpClient.PasswordAuthenticationHolder> credentialsBuilder;
+
+    EnterpriseGeoIpDownloader(
+        Client client,
+        HttpClient httpClient,
+        ClusterService clusterService,
+        ThreadPool threadPool,
+        long id,
+        String type,
+        String action,
+        String description,
+        TaskId parentTask,
+        Map<String, String> headers,
+        Supplier<TimeValue> pollIntervalSupplier,
+        Function<String, HttpClient.PasswordAuthenticationHolder> credentialsBuilder
+    ) {
+        super(id, type, action, description, parentTask, headers);
+        this.client = client;
+        this.httpClient = httpClient;
+        this.clusterService = clusterService;
+        this.threadPool = threadPool;
+        this.pollIntervalSupplier = pollIntervalSupplier;
+        this.credentialsBuilder = credentialsBuilder;
+    }
+
+    void setState(EnterpriseGeoIpTaskState state) {
+        // this is for injecting the state in GeoIpDownloaderTaskExecutor#nodeOperation just after the task instance has been created
+        // by the PersistentTasksNodeService -- since the GeoIpDownloader is newly created, the state will be null, and the passed-in
+        // state cannot be null
+        assert this.state == null
+            : "setState() cannot be called when state is already non-null. This most likely happened because setState() was called twice";
+        assert state != null : "Should never call setState with a null state. Pass an EnterpriseGeoIpTaskState.EMPTY instead.";
+        this.state = state;
+    }
+
+    // visible for testing
+    void updateDatabases() throws IOException {
+        var clusterState = clusterService.state();
+        var geoipIndex = clusterState.getMetadata().getIndicesLookup().get(EnterpriseGeoIpDownloader.DATABASES_INDEX);
+        if (geoipIndex != null) {
+            logger.trace("the geoip index [{}] exists", EnterpriseGeoIpDownloader.DATABASES_INDEX);
+            if (clusterState.getRoutingTable().index(geoipIndex.getWriteIndex()).allPrimaryShardsActive() == false) {
+                logger.debug("not updating databases because not all primary shards of [{}] index are active yet", DATABASES_INDEX);
+                return;
+            }
+            var blockException = clusterState.blocks().indexBlockedException(ClusterBlockLevel.WRITE, geoipIndex.getWriteIndex().getName());
+            if (blockException != null) {
+                throw blockException;
+            }
+        }
+
+        logger.trace("Updating geoip databases");
+        IngestGeoIpMetadata geoIpMeta = clusterState.metadata().custom(IngestGeoIpMetadata.TYPE, IngestGeoIpMetadata.EMPTY);
+
+        // if there are entries in the cs that aren't in the persistent task state,
+        // then download those (only)
+        // ---
+        // if there are in the persistent task state, that aren't in the cluster state
+        // then nuke those (only)
+        // ---
+        // else, just download everything
+        boolean addedSomething = false;
+        {
+            Set<String> existingDatabaseNames = state.getDatabases().keySet();
+            for (Map.Entry<String, DatabaseConfigurationMetadata> entry : geoIpMeta.getDatabases().entrySet()) {
+                final String id = entry.getKey();
+                DatabaseConfiguration database = entry.getValue().database();
+                if (existingDatabaseNames.contains(database.name() + ".mmdb") == false) {
+                    logger.debug("A new database appeared [{}]", database.name());
+
+                    final String accountId = database.maxmind().accountId();
+                    try (HttpClient.PasswordAuthenticationHolder holder = credentialsBuilder.apply(accountId)) {
+                        if (holder == null) {
+                            logger.warn("No credentials found to download database [{}], skipping download...", id);
+                        } else {
+                            processDatabase(holder.get(), database);
+                            addedSomething = true;
+                        }
+                    }
+                }
+            }
+        }
+
+        boolean droppedSomething = false;
+        {
+            // rip anything out of the task state that doesn't match what's in the cluster state,
+            // that is, if there's no longer an entry for a database in the repository,
+            // then drop it from the task state, too
+            Set<String> databases = geoIpMeta.getDatabases()
+                .values()
+                .stream()
+                .map(c -> c.database().name() + ".mmdb")
+                .collect(Collectors.toSet());
+            EnterpriseGeoIpTaskState _state = state;
+            Collection<Tuple<String, Metadata>> metas = _state.getDatabases()
+                .entrySet()
+                .stream()
+                .map(entry -> Tuple.tuple(entry.getKey(), entry.getValue()))
+                .toList();
+            for (Tuple<String, Metadata> metaTuple : metas) {
+                String name = metaTuple.v1();
+                Metadata meta = metaTuple.v2();
+                if (databases.contains(name) == false) {
+                    logger.debug("Dropping [{}], databases was {}", name, databases);
+                    _state = _state.remove(name);
+                    deleteOldChunks(name, meta.lastChunk() + 1);
+                    droppedSomething = true;
+                }
+            }
+            if (droppedSomething) {
+                state = _state;
+                updateTaskState();
+            }
+        }
+
+        if (addedSomething == false && droppedSomething == false) {
+            RuntimeException accumulator = null;
+            for (Map.Entry<String, DatabaseConfigurationMetadata> entry : geoIpMeta.getDatabases().entrySet()) {
+                final String id = entry.getKey();
+                DatabaseConfiguration database = entry.getValue().database();
+
+                final String accountId = database.maxmind().accountId();
+                try (HttpClient.PasswordAuthenticationHolder holder = credentialsBuilder.apply(accountId)) {
+                    if (holder == null) {
+                        logger.warn("No credentials found to download database [{}], skipping download...", id);
+                    } else {
+                        processDatabase(holder.get(), database);
+                    }
+                } catch (Exception e) {
+                    accumulator = ExceptionsHelper.useOrSuppress(accumulator, ExceptionsHelper.convertToRuntime(e));
+                }
+            }
+            if (accumulator != null) {
+                throw accumulator;
+            }
+        }
+    }
+
+    /**
+     * This method fetches the sha256 file and tar.gz file for the given database from the Maxmind endpoint, then indexes that tar.gz
+     * file into the .geoip_databases Elasticsearch index, deleting any old versions of the database tar.gz from the index if they exist.
+     * If the computed sha256 does not match the expected sha256, an error will be logged and the database will not be put into the
+     * Elasticsearch index.
+     * <p>
+     * As an implementation detail, this method retrieves the sha256 checksum of the database to download and then invokes
+     * {@link EnterpriseGeoIpDownloader#processDatabase(PasswordAuthentication, String, String, String)} with that checksum, deferring to
+     * that method to actually download and process the tar.gz itself.
+     *
+     * @param auth The credentials to use to download from the Maxmind endpoint
+     * @param database The database to be downloaded from Maxmind and indexed into an Elasticsearch index
+     * @throws IOException If there is an error fetching the sha256 file
+     */
+    void processDatabase(PasswordAuthentication auth, DatabaseConfiguration database) throws IOException {
+        final String name = database.name();
+        logger.debug("Processing database [{}] for configuration [{}]", name, database.id());
+
+        final String sha256Url = downloadUrl(name, "tar.gz.sha256");
+        final String tgzUrl = downloadUrl(name, "tar.gz");
+
+        String result = new String(httpClient.getBytes(auth, sha256Url), StandardCharsets.UTF_8).trim(); // this throws if the auth is bad
+        var matcher = CHECKSUM_PATTERN.matcher(result);
+        boolean match = matcher.matches();
+        if (match == false) {
+            throw new RuntimeException("Unexpected sha256 response from [" + sha256Url + "]");
+        }
+        final String sha256 = matcher.group(1);
+        // the name that comes from the enterprise downloader cluster state doesn't include the .mmdb extension,
+        // but the downloading and indexing of database code expects it to be there, so we add it on here before further processing
+        processDatabase(auth, name + ".mmdb", sha256, tgzUrl);
+    }
+
+    /**
+     * This method fetches the tar.gz file for the given database from the Maxmind endpoint, then indexes that tar.gz
+     * file into the .geoip_databases Elasticsearch index, deleting any old versions of the database tar.gz from the index if they exist.
+     *
+     * @param auth The credentials to use to download from the Maxmind endpoint
+     * The name of the database to be downloaded from Maxmind and indexed into an Elasticsearch index
+     * @param sha256 The sha256 to compare to the computed sha256 of the downloaded tar.gz file
+     * @param url The URL for the Maxmind endpoint from which the database's tar.gz will be downloaded
+     */
+    private void processDatabase(PasswordAuthentication auth, String name, String sha256, String url) {
+        Metadata metadata = state.getDatabases().getOrDefault(name, Metadata.EMPTY);
+        if (Objects.equals(metadata.sha256(), sha256)) {
+            updateTimestamp(name, metadata);
+            return;
+        }
+        logger.debug("downloading geoip database [{}]", name);
+        long start = System.currentTimeMillis();
+        try (InputStream is = httpClient.get(auth, url)) {
+            int firstChunk = metadata.lastChunk() + 1; // if there is no metadata, then Metadata.EMPTY + 1 = 0
+            Tuple<Integer, String> tuple = indexChunks(name, is, firstChunk, MessageDigests.sha256(), sha256, start);
+            int lastChunk = tuple.v1();
+            String md5 = tuple.v2();
+            if (lastChunk > firstChunk) {
+                state = state.put(name, new Metadata(start, firstChunk, lastChunk - 1, md5, start, sha256));
+                updateTaskState();
+                logger.info("successfully downloaded geoip database [{}]", name);
+                deleteOldChunks(name, firstChunk);
+            }
+        } catch (Exception e) {
+            logger.error(() -> "error downloading geoip database [" + name + "]", e);
+        }
+    }
+
+    // visible for testing
+    void deleteOldChunks(String name, int firstChunk) {
+        BoolQueryBuilder queryBuilder = new BoolQueryBuilder().filter(new MatchQueryBuilder("name", name))
+            .filter(new RangeQueryBuilder("chunk").to(firstChunk, false));
+        DeleteByQueryRequest request = new DeleteByQueryRequest();
+        request.indices(DATABASES_INDEX);
+        request.setQuery(queryBuilder);
+        client.execute(
+            DeleteByQueryAction.INSTANCE,
+            request,
+            ActionListener.wrap(r -> {}, e -> logger.warn("could not delete old chunks for geoip database [" + name + "]", e))
+        );
+    }
+
+    // visible for testing
+    protected void updateTimestamp(String name, Metadata old) {
+        logger.debug("geoip database [{}] is up to date, updated timestamp", name);
+        state = state.put(
+            name,
+            new Metadata(old.lastUpdate(), old.firstChunk(), old.lastChunk(), old.md5(), System.currentTimeMillis(), old.sha256())
+        );
+        updateTaskState();
+    }
+
+    void updateTaskState() {
+        PlainActionFuture<PersistentTask<?>> future = new PlainActionFuture<>();
+        updatePersistentTaskState(state, future);
+        state = ((EnterpriseGeoIpTaskState) future.actionGet().getState());
+    }
+
+    // visible for testing
+    Tuple<Integer, String> indexChunks(
+        String name,
+        InputStream is,
+        int chunk,
+        @Nullable MessageDigest digest,
+        String expectedChecksum,
+        long timestamp
+    ) throws IOException {
+        MessageDigest md5 = MessageDigests.md5();
+        for (byte[] buf = getChunk(is); buf.length != 0; buf = getChunk(is)) {
+            md5.update(buf);
+            if (digest != null) {
+                digest.update(buf);
+            }
+            IndexRequest indexRequest = new IndexRequest(DATABASES_INDEX).id(name + "_" + chunk + "_" + timestamp)
+                .create(true)
+                .source(XContentType.SMILE, "name", name, "chunk", chunk, "data", buf);
+            client.index(indexRequest).actionGet();
+            chunk++;
+        }
+
+        // May take some time before automatic flush kicks in:
+        // (otherwise the translog will contain large documents for some time without good reason)
+        FlushRequest flushRequest = new FlushRequest(DATABASES_INDEX);
+        client.admin().indices().flush(flushRequest).actionGet();
+        // Ensure that the chunk documents are visible:
+        RefreshRequest refreshRequest = new RefreshRequest(DATABASES_INDEX);
+        client.admin().indices().refresh(refreshRequest).actionGet();
+
+        String actualMd5 = MessageDigests.toHexString(md5.digest());
+        String actualChecksum = digest == null ? actualMd5 : MessageDigests.toHexString(digest.digest());
+        if (Objects.equals(expectedChecksum, actualChecksum) == false) {
+            throw new IOException("checksum mismatch, expected [" + expectedChecksum + "], actual [" + actualChecksum + "]");
+        }
+        return Tuple.tuple(chunk, actualMd5);
+    }
+
+    // visible for testing
+    static byte[] getChunk(InputStream is) throws IOException {
+        byte[] buf = new byte[MAX_CHUNK_SIZE];
+        int chunkSize = 0;
+        while (chunkSize < MAX_CHUNK_SIZE) {
+            int read = is.read(buf, chunkSize, MAX_CHUNK_SIZE - chunkSize);
+            if (read == -1) {
+                break;
+            }
+            chunkSize += read;
+        }
+        if (chunkSize < MAX_CHUNK_SIZE) {
+            buf = Arrays.copyOf(buf, chunkSize);
+        }
+        return buf;
+    }
+
+    /**
+     * Downloads the geoip databases now, and schedules them to be downloaded again after pollInterval.
+     */
+    synchronized void runDownloader() {
+        // by the time we reach here, the state will never be null
+        assert this.state != null : "this.setState() is null. You need to call setState() before calling runDownloader()";
+
+        // there's a race condition between here and requestReschedule. originally this scheduleNextRun call was at the end of this
+        // block, but remember that updateDatabases can take seconds to run (it's downloading bytes from the internet), and so during the
+        // very first run there would be no future run scheduled to reschedule in requestReschedule. which meant that if you went from zero
+        // to N(>=2) databases in quick succession, then all but the first database wouldn't necessarily get downloaded, because the
+        // requestReschedule call in the EnterpriseGeoIpDownloaderTaskExecutor's clusterChanged wouldn't have a scheduled future run to
+        // reschedule. scheduling the next run at the beginning of this run means that there's a much smaller window (milliseconds?, rather
+        // than seconds) in which such a race could occur. technically there's a window here, still, but i think it's _greatly_ reduced.
+        scheduleNextRun(pollIntervalSupplier.get());
+        // TODO regardless of the above comment, i like the idea of checking the lowest last-checked time and then running the math to get
+        // to the next interval from then -- maybe that's a neat future enhancement to add
+
+        if (isCancelled() || isCompleted()) {
+            return;
+        }
+        try {
+            updateDatabases(); // n.b. this downloads bytes from the internet, it can take a while
+        } catch (Exception e) {
+            logger.error("exception during geoip databases update", e);
+        }
+        try {
+            cleanDatabases();
+        } catch (Exception e) {
+            logger.error("exception during geoip databases cleanup", e);
+        }
+    }
+
+    /**
+     * This method requests that the downloader be rescheduled to run immediately (presumably because a dynamic property supplied by
+     * pollIntervalSupplier or eagerDownloadSupplier has changed, or a pipeline with a geoip processor has been added). This method does
+     * nothing if this task is cancelled, completed, or has not yet been scheduled to run for the first time. It cancels any existing
+     * scheduled run.
+     */
+    public void requestReschedule() {
+        if (isCancelled() || isCompleted()) {
+            return;
+        }
+        if (scheduled != null && scheduled.cancel()) {
+            scheduleNextRun(TimeValue.ZERO);
+        }
+    }
+
+    private void cleanDatabases() {
+        List<Tuple<String, Metadata>> expiredDatabases = state.getDatabases()
+            .entrySet()
+            .stream()
+            .filter(e -> e.getValue().isNewEnough(clusterService.state().metadata().settings()) == false)
+            .map(entry -> Tuple.tuple(entry.getKey(), entry.getValue()))
+            .toList();
+        expiredDatabases.forEach(e -> {
+            String name = e.v1();
+            Metadata meta = e.v2();
+            deleteOldChunks(name, meta.lastChunk() + 1);
+            state = state.put(name, new Metadata(meta.lastUpdate(), meta.firstChunk(), meta.lastChunk(), meta.md5(), meta.lastCheck() - 1));
+            updateTaskState();
+        });
+    }
+
+    @Override
+    protected void onCancelled() {
+        if (scheduled != null) {
+            scheduled.cancel();
+        }
+        markAsCompleted();
+    }
+
+    private void scheduleNextRun(TimeValue time) {
+        if (threadPool.scheduler().isShutdown() == false) {
+            scheduled = threadPool.schedule(this::runDownloader, time, threadPool.generic());
+        }
+    }
+
+}

+ 257 - 0
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderTaskExecutor.java

@@ -0,0 +1,257 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.ingest.geoip;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.client.internal.Client;
+import org.elasticsearch.client.internal.OriginSettingClient;
+import org.elasticsearch.cluster.ClusterChangedEvent;
+import org.elasticsearch.cluster.ClusterStateListener;
+import org.elasticsearch.cluster.service.ClusterService;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.SecureSetting;
+import org.elasticsearch.common.settings.SecureSettings;
+import org.elasticsearch.common.settings.SecureString;
+import org.elasticsearch.common.settings.Setting;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.core.TimeValue;
+import org.elasticsearch.ingest.EnterpriseGeoIpTask.EnterpriseGeoIpTaskParams;
+import org.elasticsearch.ingest.IngestService;
+import org.elasticsearch.persistent.AllocatedPersistentTask;
+import org.elasticsearch.persistent.PersistentTaskState;
+import org.elasticsearch.persistent.PersistentTasksCustomMetadata;
+import org.elasticsearch.persistent.PersistentTasksExecutor;
+import org.elasticsearch.tasks.TaskId;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.security.GeneralSecurityException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.elasticsearch.ingest.EnterpriseGeoIpTask.ENTERPRISE_GEOIP_DOWNLOADER;
+import static org.elasticsearch.ingest.geoip.GeoIpDownloaderTaskExecutor.ENABLED_SETTING;
+import static org.elasticsearch.ingest.geoip.GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING;
+
+public class EnterpriseGeoIpDownloaderTaskExecutor extends PersistentTasksExecutor<EnterpriseGeoIpTaskParams>
+    implements
+        ClusterStateListener {
+    private static final Logger logger = LogManager.getLogger(EnterpriseGeoIpDownloader.class);
+
+    static final String MAXMIND_SETTINGS_PREFIX = "ingest.geoip.downloader.maxmind.";
+
+    public static final Setting<SecureString> MAXMIND_LICENSE_KEY_SETTING = SecureSetting.secureString(
+        MAXMIND_SETTINGS_PREFIX + "license_key",
+        null
+    );
+
+    private final Client client;
+    private final HttpClient httpClient;
+    private final ClusterService clusterService;
+    private final ThreadPool threadPool;
+    private final Settings settings;
+    private volatile TimeValue pollInterval;
+    private final AtomicReference<EnterpriseGeoIpDownloader> currentTask = new AtomicReference<>();
+
+    private volatile SecureSettings cachedSecureSettings;
+
+    EnterpriseGeoIpDownloaderTaskExecutor(Client client, HttpClient httpClient, ClusterService clusterService, ThreadPool threadPool) {
+        super(ENTERPRISE_GEOIP_DOWNLOADER, threadPool.generic());
+        this.client = new OriginSettingClient(client, IngestService.INGEST_ORIGIN);
+        this.httpClient = httpClient;
+        this.clusterService = clusterService;
+        this.threadPool = threadPool;
+        this.settings = clusterService.getSettings();
+        this.pollInterval = POLL_INTERVAL_SETTING.get(settings);
+
+        // do an initial load using the node settings
+        reload(clusterService.getSettings());
+    }
+
+    /**
+     * This method completes the initialization of the EnterpriseGeoIpDownloaderTaskExecutor by registering several listeners.
+     */
+    public void init() {
+        clusterService.addListener(this);
+        clusterService.getClusterSettings().addSettingsUpdateConsumer(POLL_INTERVAL_SETTING, this::setPollInterval);
+    }
+
+    private void setPollInterval(TimeValue pollInterval) {
+        if (Objects.equals(this.pollInterval, pollInterval) == false) {
+            this.pollInterval = pollInterval;
+            EnterpriseGeoIpDownloader currentDownloader = getCurrentTask();
+            if (currentDownloader != null) {
+                currentDownloader.requestReschedule();
+            }
+        }
+    }
+
+    private HttpClient.PasswordAuthenticationHolder buildCredentials(final String username) {
+        final char[] passwordChars;
+        if (cachedSecureSettings.getSettingNames().contains(MAXMIND_LICENSE_KEY_SETTING.getKey())) {
+            passwordChars = cachedSecureSettings.getString(MAXMIND_LICENSE_KEY_SETTING.getKey()).getChars();
+        } else {
+            passwordChars = null;
+        }
+
+        // if the username is missing, empty, or blank, return null as 'no auth'
+        if (username == null || username.isEmpty() || username.isBlank()) {
+            return null;
+        }
+
+        // likewise if the password chars array is missing or empty, return null as 'no auth'
+        if (passwordChars == null || passwordChars.length == 0) {
+            return null;
+        }
+
+        return new HttpClient.PasswordAuthenticationHolder(username, passwordChars);
+    }
+
+    @Override
+    protected EnterpriseGeoIpDownloader createTask(
+        long id,
+        String type,
+        String action,
+        TaskId parentTaskId,
+        PersistentTasksCustomMetadata.PersistentTask<EnterpriseGeoIpTaskParams> taskInProgress,
+        Map<String, String> headers
+    ) {
+        return new EnterpriseGeoIpDownloader(
+            client,
+            httpClient,
+            clusterService,
+            threadPool,
+            id,
+            type,
+            action,
+            getDescription(taskInProgress),
+            parentTaskId,
+            headers,
+            () -> pollInterval,
+            this::buildCredentials
+        );
+    }
+
+    @Override
+    protected void nodeOperation(AllocatedPersistentTask task, EnterpriseGeoIpTaskParams params, PersistentTaskState state) {
+        EnterpriseGeoIpDownloader downloader = (EnterpriseGeoIpDownloader) task;
+        EnterpriseGeoIpTaskState geoIpTaskState = (state == null) ? EnterpriseGeoIpTaskState.EMPTY : (EnterpriseGeoIpTaskState) state;
+        downloader.setState(geoIpTaskState);
+        currentTask.set(downloader);
+        if (ENABLED_SETTING.get(clusterService.state().metadata().settings(), settings)) {
+            downloader.runDownloader();
+        }
+    }
+
+    public EnterpriseGeoIpDownloader getCurrentTask() {
+        return currentTask.get();
+    }
+
+    @Override
+    public void clusterChanged(ClusterChangedEvent event) {
+        EnterpriseGeoIpDownloader currentDownloader = getCurrentTask();
+        if (currentDownloader != null) {
+            boolean hasGeoIpMetadataChanges = event.metadataChanged()
+                && event.changedCustomMetadataSet().contains(IngestGeoIpMetadata.TYPE);
+            if (hasGeoIpMetadataChanges) {
+                currentDownloader.requestReschedule(); // watching the cluster changed events to kick the thing off if it's not running
+            }
+        }
+    }
+
+    public synchronized void reload(Settings settings) {
+        // `SecureSettings` are available here! cache them as they will be needed
+        // whenever dynamic cluster settings change and we have to rebuild the accounts
+        try {
+            this.cachedSecureSettings = extractSecureSettings(settings, List.of(MAXMIND_LICENSE_KEY_SETTING));
+        } catch (GeneralSecurityException e) {
+            // rethrow as a runtime exception, there's logging higher up the call chain around ReloadablePlugin
+            throw new ElasticsearchException("Exception while reloading enterprise geoip download task executor", e);
+        }
+    }
+
+    /**
+     * Extracts the {@link SecureSettings}` out of the passed in {@link Settings} object. The {@code Setting} argument has to have the
+     * {@code SecureSettings} open/available. Normally {@code SecureSettings} are available only under specific callstacks (eg. during node
+     * initialization or during a `reload` call). The returned copy can be reused freely as it will never be closed (this is a bit of
+     * cheating, but it is necessary in this specific circumstance). Only works for secure settings of type string (not file).
+     *
+     * @param source               A {@code Settings} object with its {@code SecureSettings} open/available.
+     * @param securePluginSettings The list of settings to copy.
+     * @return A copy of the {@code SecureSettings} of the passed in {@code Settings} argument.
+     */
+    private static SecureSettings extractSecureSettings(Settings source, List<Setting<?>> securePluginSettings)
+        throws GeneralSecurityException {
+        // get the secure settings out
+        final SecureSettings sourceSecureSettings = Settings.builder().put(source, true).getSecureSettings();
+        // filter and cache them...
+        final Map<String, SecureSettingValue> innerMap = new HashMap<>();
+        if (sourceSecureSettings != null && securePluginSettings != null) {
+            for (final String settingKey : sourceSecureSettings.getSettingNames()) {
+                for (final Setting<?> secureSetting : securePluginSettings) {
+                    if (secureSetting.match(settingKey)) {
+                        innerMap.put(
+                            settingKey,
+                            new SecureSettingValue(
+                                sourceSecureSettings.getString(settingKey),
+                                sourceSecureSettings.getSHA256Digest(settingKey)
+                            )
+                        );
+                    }
+                }
+            }
+        }
+        return new SecureSettings() {
+            @Override
+            public boolean isLoaded() {
+                return true;
+            }
+
+            @Override
+            public SecureString getString(String setting) {
+                return innerMap.get(setting).value();
+            }
+
+            @Override
+            public Set<String> getSettingNames() {
+                return innerMap.keySet();
+            }
+
+            @Override
+            public InputStream getFile(String setting) {
+                throw new UnsupportedOperationException("A cached SecureSetting cannot be a file");
+            }
+
+            @Override
+            public byte[] getSHA256Digest(String setting) {
+                return innerMap.get(setting).sha256Digest();
+            }
+
+            @Override
+            public void close() throws IOException {}
+
+            @Override
+            public void writeTo(StreamOutput out) throws IOException {
+                throw new UnsupportedOperationException("A cached SecureSetting cannot be serialized");
+            }
+        };
+    }
+
+    /**
+     * A single-purpose record for the internal implementation of extractSecureSettings
+     */
+    private record SecureSettingValue(SecureString value, byte[] sha256Digest) {}
+}

+ 153 - 0
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpTaskState.java

@@ -0,0 +1,153 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.ingest.geoip;
+
+import org.elasticsearch.TransportVersion;
+import org.elasticsearch.TransportVersions;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.VersionedNamedWriteable;
+import org.elasticsearch.core.Nullable;
+import org.elasticsearch.core.Tuple;
+import org.elasticsearch.ingest.EnterpriseGeoIpTask;
+import org.elasticsearch.ingest.geoip.GeoIpTaskState.Metadata;
+import org.elasticsearch.persistent.PersistentTaskState;
+import org.elasticsearch.persistent.PersistentTasksCustomMetadata;
+import org.elasticsearch.xcontent.ConstructingObjectParser;
+import org.elasticsearch.xcontent.ParseField;
+import org.elasticsearch.xcontent.XContentBuilder;
+import org.elasticsearch.xcontent.XContentParser;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.stream.Collectors;
+
+import static org.elasticsearch.ingest.geoip.GeoIpDownloader.GEOIP_DOWNLOADER;
+import static org.elasticsearch.persistent.PersistentTasksCustomMetadata.getTaskWithId;
+import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg;
+
+class EnterpriseGeoIpTaskState implements PersistentTaskState, VersionedNamedWriteable {
+
+    private static final ParseField DATABASES = new ParseField("databases");
+
+    static final EnterpriseGeoIpTaskState EMPTY = new EnterpriseGeoIpTaskState(Map.of());
+
+    @SuppressWarnings("unchecked")
+    private static final ConstructingObjectParser<EnterpriseGeoIpTaskState, Void> PARSER = new ConstructingObjectParser<>(
+        GEOIP_DOWNLOADER,
+        true,
+        args -> {
+            List<Tuple<String, Metadata>> databases = (List<Tuple<String, Metadata>>) args[0];
+            return new EnterpriseGeoIpTaskState(databases.stream().collect(Collectors.toMap(Tuple::v1, Tuple::v2)));
+        }
+    );
+
+    static {
+        PARSER.declareNamedObjects(constructorArg(), (p, c, name) -> Tuple.tuple(name, Metadata.fromXContent(p)), DATABASES);
+    }
+
+    public static EnterpriseGeoIpTaskState fromXContent(XContentParser parser) throws IOException {
+        return PARSER.parse(parser, null);
+    }
+
+    private final Map<String, Metadata> databases;
+
+    EnterpriseGeoIpTaskState(Map<String, Metadata> databases) {
+        this.databases = Map.copyOf(databases);
+    }
+
+    EnterpriseGeoIpTaskState(StreamInput input) throws IOException {
+        databases = input.readImmutableMap(
+            in -> new Metadata(in.readLong(), in.readVInt(), in.readVInt(), in.readString(), in.readLong(), in.readOptionalString())
+        );
+    }
+
+    public EnterpriseGeoIpTaskState put(String name, Metadata metadata) {
+        HashMap<String, Metadata> newDatabases = new HashMap<>(databases);
+        newDatabases.put(name, metadata);
+        return new EnterpriseGeoIpTaskState(newDatabases);
+    }
+
+    public EnterpriseGeoIpTaskState remove(String name) {
+        HashMap<String, Metadata> newDatabases = new HashMap<>(databases);
+        newDatabases.remove(name);
+        return new EnterpriseGeoIpTaskState(newDatabases);
+    }
+
+    public Map<String, Metadata> getDatabases() {
+        return databases;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+        if (this == o) return true;
+        if (o == null || getClass() != o.getClass()) return false;
+        EnterpriseGeoIpTaskState that = (EnterpriseGeoIpTaskState) o;
+        return databases.equals(that.databases);
+    }
+
+    @Override
+    public int hashCode() {
+        return Objects.hash(databases);
+    }
+
+    @Override
+    public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+        builder.startObject();
+        {
+            builder.startObject("databases");
+            for (Map.Entry<String, Metadata> e : databases.entrySet()) {
+                builder.field(e.getKey(), e.getValue());
+            }
+            builder.endObject();
+        }
+        builder.endObject();
+        return builder;
+    }
+
+    @Override
+    public String getWriteableName() {
+        return "enterprise-geoip-downloader";
+    }
+
+    @Override
+    public TransportVersion getMinimalSupportedVersion() {
+        return TransportVersions.ENTERPRISE_GEOIP_DOWNLOADER;
+    }
+
+    @Override
+    public void writeTo(StreamOutput out) throws IOException {
+        out.writeMap(databases, (o, v) -> {
+            o.writeLong(v.lastUpdate());
+            o.writeVInt(v.firstChunk());
+            o.writeVInt(v.lastChunk());
+            o.writeString(v.md5());
+            o.writeLong(v.lastCheck());
+            o.writeOptionalString(v.sha256());
+        });
+    }
+
+    /**
+     * Retrieves the geoip downloader's task state from the cluster state. This may return null in some circumstances,
+     * for example if the geoip downloader task hasn't been created yet (which it wouldn't be if it's disabled).
+     *
+     * @param state the cluster state to read the task state from
+     * @return the geoip downloader's task state or null if there is not a state to read
+     */
+    @Nullable
+    static EnterpriseGeoIpTaskState getEnterpriseGeoIpTaskState(ClusterState state) {
+        PersistentTasksCustomMetadata.PersistentTask<?> task = getTaskWithId(state, EnterpriseGeoIpTask.ENTERPRISE_GEOIP_DOWNLOADER);
+        return (task == null) ? null : (EnterpriseGeoIpTaskState) task.getState();
+    }
+
+}

+ 6 - 4
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java

@@ -24,6 +24,7 @@ import org.elasticsearch.common.settings.Setting;
 import org.elasticsearch.common.settings.Setting.Property;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.core.TimeValue;
+import org.elasticsearch.core.Tuple;
 import org.elasticsearch.index.query.BoolQueryBuilder;
 import org.elasticsearch.index.query.MatchQueryBuilder;
 import org.elasticsearch.index.query.RangeQueryBuilder;
@@ -318,14 +319,15 @@ public class GeoIpDownloader extends AllocatedPersistentTask {
     }
 
     private void cleanDatabases() {
-        List<Map.Entry<String, Metadata>> expiredDatabases = state.getDatabases()
+        List<Tuple<String, Metadata>> expiredDatabases = state.getDatabases()
             .entrySet()
             .stream()
-            .filter(e -> e.getValue().isValid(clusterService.state().metadata().settings()) == false)
+            .filter(e -> e.getValue().isNewEnough(clusterService.state().metadata().settings()) == false)
+            .map(entry -> Tuple.tuple(entry.getKey(), entry.getValue()))
             .toList();
         expiredDatabases.forEach(e -> {
-            String name = e.getKey();
-            Metadata meta = e.getValue();
+            String name = e.v1();
+            Metadata meta = e.v2();
             deleteOldChunks(name, meta.lastChunk() + 1);
             state = state.put(name, new Metadata(meta.lastUpdate(), meta.firstChunk(), meta.lastChunk(), meta.md5(), meta.lastCheck() - 1));
             updateTaskState();

+ 1 - 1
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java

@@ -217,7 +217,7 @@ public final class GeoIpDownloaderTaskExecutor extends PersistentTasksExecutor<G
         }
 
         boolean hasIndicesChanges = event.previousState().metadata().indices().equals(event.state().metadata().indices()) == false;
-        boolean hasIngestPipelineChanges = event.changedCustomMetadataSet().contains(IngestMetadata.TYPE);
+        boolean hasIngestPipelineChanges = event.metadataChanged() && event.changedCustomMetadataSet().contains(IngestMetadata.TYPE);
 
         if (hasIngestPipelineChanges || hasIndicesChanges) {
             boolean newAtLeastOneGeoipProcessor = hasAtLeastOneGeoipProcessor(event.state());

+ 34 - 5
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskState.java

@@ -42,6 +42,11 @@ import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstr
 
 class GeoIpTaskState implements PersistentTaskState, VersionedNamedWriteable {
 
+    // for clarity inside this file, it's useful to have an alias that reads like what we're using it for
+    // rather than what the version is -- previously this was two separate conceptual versions, but it's not
+    // especially useful to make that distinction in the TransportVersions class itself
+    private static final TransportVersion INCLUDE_SHA256 = TransportVersions.ENTERPRISE_GEOIP_DOWNLOADER;
+
     private static final ParseField DATABASES = new ParseField("databases");
 
     static final GeoIpTaskState EMPTY = new GeoIpTaskState(Map.of());
@@ -71,7 +76,16 @@ class GeoIpTaskState implements PersistentTaskState, VersionedNamedWriteable {
     }
 
     GeoIpTaskState(StreamInput input) throws IOException {
-        databases = input.readImmutableMap(in -> new Metadata(in.readLong(), in.readVInt(), in.readVInt(), in.readString(), in.readLong()));
+        databases = input.readImmutableMap(
+            in -> new Metadata(
+                in.readLong(),
+                in.readVInt(),
+                in.readVInt(),
+                in.readString(),
+                in.readLong(),
+                in.getTransportVersion().onOrAfter(INCLUDE_SHA256) ? input.readOptionalString() : null
+            )
+        );
     }
 
     public GeoIpTaskState put(String name, Metadata metadata) {
@@ -129,16 +143,21 @@ class GeoIpTaskState implements PersistentTaskState, VersionedNamedWriteable {
             o.writeVInt(v.lastChunk);
             o.writeString(v.md5);
             o.writeLong(v.lastCheck);
+            if (o.getTransportVersion().onOrAfter(INCLUDE_SHA256)) {
+                o.writeOptionalString(v.sha256);
+            }
         });
     }
 
-    record Metadata(long lastUpdate, int firstChunk, int lastChunk, String md5, long lastCheck) implements ToXContentObject {
+    record Metadata(long lastUpdate, int firstChunk, int lastChunk, String md5, long lastCheck, @Nullable String sha256)
+        implements
+            ToXContentObject {
 
         /**
          * An empty Metadata object useful for getOrDefault -type calls. Crucially, the 'lastChunk' is -1, so it's safe to use
          * with logic that says the new firstChunk is the old lastChunk + 1.
          */
-        static Metadata EMPTY = new Metadata(-1, -1, -1, "", -1);
+        static Metadata EMPTY = new Metadata(-1, -1, -1, "", -1, null);
 
         private static final String NAME = GEOIP_DOWNLOADER + "-metadata";
         private static final ParseField LAST_CHECK = new ParseField("last_check");
@@ -146,6 +165,7 @@ class GeoIpTaskState implements PersistentTaskState, VersionedNamedWriteable {
         private static final ParseField FIRST_CHUNK = new ParseField("first_chunk");
         private static final ParseField LAST_CHUNK = new ParseField("last_chunk");
         private static final ParseField MD5 = new ParseField("md5");
+        private static final ParseField SHA256 = new ParseField("sha256");
 
         private static final ConstructingObjectParser<Metadata, Void> PARSER = new ConstructingObjectParser<>(
             NAME,
@@ -155,7 +175,8 @@ class GeoIpTaskState implements PersistentTaskState, VersionedNamedWriteable {
                 (int) args[1],
                 (int) args[2],
                 (String) args[3],
-                (long) (args[4] == null ? args[0] : args[4])
+                (long) (args[4] == null ? args[0] : args[4]),
+                (String) args[5]
             )
         );
 
@@ -165,6 +186,7 @@ class GeoIpTaskState implements PersistentTaskState, VersionedNamedWriteable {
             PARSER.declareInt(constructorArg(), LAST_CHUNK);
             PARSER.declareString(constructorArg(), MD5);
             PARSER.declareLong(optionalConstructorArg(), LAST_CHECK);
+            PARSER.declareString(optionalConstructorArg(), SHA256);
         }
 
         public static Metadata fromXContent(XContentParser parser) {
@@ -179,11 +201,15 @@ class GeoIpTaskState implements PersistentTaskState, VersionedNamedWriteable {
             Objects.requireNonNull(md5);
         }
 
+        Metadata(long lastUpdate, int firstChunk, int lastChunk, String md5, long lastCheck) {
+            this(lastUpdate, firstChunk, lastChunk, md5, lastCheck, null);
+        }
+
         public boolean isCloseToExpiration() {
             return Instant.ofEpochMilli(lastCheck).isBefore(Instant.now().minus(25, ChronoUnit.DAYS));
         }
 
-        public boolean isValid(Settings settings) {
+        public boolean isNewEnough(Settings settings) {
             TimeValue valid = settings.getAsTime("ingest.geoip.database_validity", TimeValue.timeValueDays(30));
             return Instant.ofEpochMilli(lastCheck).isAfter(Instant.now().minus(valid.getMillis(), ChronoUnit.MILLIS));
         }
@@ -197,6 +223,9 @@ class GeoIpTaskState implements PersistentTaskState, VersionedNamedWriteable {
                 builder.field(FIRST_CHUNK.getPreferredName(), firstChunk);
                 builder.field(LAST_CHUNK.getPreferredName(), lastChunk);
                 builder.field(MD5.getPreferredName(), md5);
+                if (sha256 != null) { // only serialize if not null, for prettiness reasons
+                    builder.field(SHA256.getPreferredName(), sha256);
+                }
             }
             builder.endObject();
             return builder;

+ 26 - 0
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/HttpClient.java

@@ -24,6 +24,7 @@ import java.net.URL;
 import java.security.AccessController;
 import java.security.PrivilegedActionException;
 import java.security.PrivilegedExceptionAction;
+import java.util.Arrays;
 import java.util.Objects;
 
 import static java.net.HttpURLConnection.HTTP_MOVED_PERM;
@@ -34,6 +35,31 @@ import static java.net.HttpURLConnection.HTTP_SEE_OTHER;
 
 class HttpClient {
 
+    /**
+     * A PasswordAuthenticationHolder is just a wrapper around a PasswordAuthentication to implement AutoCloseable.
+     * This construction makes it possible to use a PasswordAuthentication in a try-with-resources statement, which
+     * makes it easier to ensure cleanup of the PasswordAuthentication is performed after it's finished being used.
+     */
+    static final class PasswordAuthenticationHolder implements AutoCloseable {
+        private PasswordAuthentication auth;
+
+        PasswordAuthenticationHolder(String username, char[] passwordChars) {
+            this.auth = new PasswordAuthentication(username, passwordChars); // clones the passed-in chars
+        }
+
+        public PasswordAuthentication get() {
+            Objects.requireNonNull(auth);
+            return auth;
+        }
+
+        @Override
+        public void close() {
+            final PasswordAuthentication clear = this.auth;
+            this.auth = null; // set to null and then clear it
+            Arrays.fill(clear.getPassword(), '\0'); // zero out the password chars
+        }
+    }
+
     // a private sentinel value for representing the idea that there's no auth for some request.
     // this allows us to have a not-null requirement on the methods that do accept an auth.
     // if you don't want auth, then don't use those methods. ;)

+ 157 - 0
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpMetadata.java

@@ -0,0 +1,157 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.ingest.geoip;
+
+import org.elasticsearch.TransportVersion;
+import org.elasticsearch.TransportVersions;
+import org.elasticsearch.cluster.Diff;
+import org.elasticsearch.cluster.DiffableUtils;
+import org.elasticsearch.cluster.NamedDiff;
+import org.elasticsearch.cluster.metadata.Metadata;
+import org.elasticsearch.common.collect.Iterators;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.ChunkedToXContentHelper;
+import org.elasticsearch.ingest.geoip.direct.DatabaseConfigurationMetadata;
+import org.elasticsearch.xcontent.ConstructingObjectParser;
+import org.elasticsearch.xcontent.ParseField;
+import org.elasticsearch.xcontent.ToXContent;
+import org.elasticsearch.xcontent.XContentParser;
+
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+
+/**
+ * Holds the ingest-geoip databases that are available in the cluster state.
+ */
+public final class IngestGeoIpMetadata implements Metadata.Custom {
+
+    public static final String TYPE = "ingest_geoip";
+    private static final ParseField DATABASES_FIELD = new ParseField("databases");
+
+    public static final IngestGeoIpMetadata EMPTY = new IngestGeoIpMetadata(Map.of());
+
+    @SuppressWarnings("unchecked")
+    private static final ConstructingObjectParser<IngestGeoIpMetadata, Void> PARSER = new ConstructingObjectParser<>(
+        "ingest_geoip_metadata",
+        a -> new IngestGeoIpMetadata(
+            ((List<DatabaseConfigurationMetadata>) a[0]).stream().collect(Collectors.toMap((m) -> m.database().id(), Function.identity()))
+        )
+    );
+    static {
+        PARSER.declareNamedObjects(ConstructingObjectParser.constructorArg(), (p, c, n) -> DatabaseConfigurationMetadata.parse(p, n), v -> {
+            throw new IllegalArgumentException("ordered " + DATABASES_FIELD.getPreferredName() + " are not supported");
+        }, DATABASES_FIELD);
+    }
+
+    private final Map<String, DatabaseConfigurationMetadata> databases;
+
+    public IngestGeoIpMetadata(Map<String, DatabaseConfigurationMetadata> databases) {
+        this.databases = Map.copyOf(databases);
+    }
+
+    @Override
+    public String getWriteableName() {
+        return TYPE;
+    }
+
+    @Override
+    public TransportVersion getMinimalSupportedVersion() {
+        return TransportVersions.ENTERPRISE_GEOIP_DOWNLOADER;
+    }
+
+    public Map<String, DatabaseConfigurationMetadata> getDatabases() {
+        return databases;
+    }
+
+    public IngestGeoIpMetadata(StreamInput in) throws IOException {
+        this.databases = in.readMap(StreamInput::readString, DatabaseConfigurationMetadata::new);
+    }
+
+    @Override
+    public void writeTo(StreamOutput out) throws IOException {
+        out.writeMap(databases, StreamOutput::writeWriteable);
+    }
+
+    public static IngestGeoIpMetadata fromXContent(XContentParser parser) throws IOException {
+        return PARSER.parse(parser, null);
+    }
+
+    @Override
+    public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params ignored) {
+        return Iterators.concat(ChunkedToXContentHelper.xContentValuesMap(DATABASES_FIELD.getPreferredName(), databases));
+    }
+
+    @Override
+    public EnumSet<Metadata.XContentContext> context() {
+        return Metadata.ALL_CONTEXTS;
+    }
+
+    @Override
+    public Diff<Metadata.Custom> diff(Metadata.Custom before) {
+        return new GeoIpMetadataDiff((IngestGeoIpMetadata) before, this);
+    }
+
+    static class GeoIpMetadataDiff implements NamedDiff<Metadata.Custom> {
+
+        final Diff<Map<String, DatabaseConfigurationMetadata>> databases;
+
+        GeoIpMetadataDiff(IngestGeoIpMetadata before, IngestGeoIpMetadata after) {
+            this.databases = DiffableUtils.diff(before.databases, after.databases, DiffableUtils.getStringKeySerializer());
+        }
+
+        GeoIpMetadataDiff(StreamInput in) throws IOException {
+            databases = DiffableUtils.readJdkMapDiff(
+                in,
+                DiffableUtils.getStringKeySerializer(),
+                DatabaseConfigurationMetadata::new,
+                DatabaseConfigurationMetadata::readDiffFrom
+            );
+        }
+
+        @Override
+        public Metadata.Custom apply(Metadata.Custom part) {
+            return new IngestGeoIpMetadata(databases.apply(((IngestGeoIpMetadata) part).databases));
+        }
+
+        @Override
+        public void writeTo(StreamOutput out) throws IOException {
+            databases.writeTo(out);
+        }
+
+        @Override
+        public String getWriteableName() {
+            return TYPE;
+        }
+
+        @Override
+        public TransportVersion getMinimalSupportedVersion() {
+            return TransportVersions.ENTERPRISE_GEOIP_DOWNLOADER;
+        }
+    }
+
+    @Override
+    public boolean equals(Object o) {
+        if (this == o) return true;
+        if (o == null || getClass() != o.getClass()) return false;
+        IngestGeoIpMetadata that = (IngestGeoIpMetadata) o;
+        return Objects.equals(databases, that.databases);
+    }
+
+    @Override
+    public int hashCode() {
+        return Objects.hash(databases);
+    }
+}

+ 68 - 7
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java

@@ -12,8 +12,10 @@ import org.apache.lucene.util.SetOnce;
 import org.elasticsearch.action.ActionRequest;
 import org.elasticsearch.action.ActionResponse;
 import org.elasticsearch.client.internal.Client;
+import org.elasticsearch.cluster.NamedDiff;
 import org.elasticsearch.cluster.metadata.IndexMetadata;
 import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
+import org.elasticsearch.cluster.metadata.Metadata;
 import org.elasticsearch.cluster.node.DiscoveryNodes;
 import org.elasticsearch.cluster.service.ClusterService;
 import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
@@ -25,8 +27,18 @@ import org.elasticsearch.common.settings.SettingsFilter;
 import org.elasticsearch.common.settings.SettingsModule;
 import org.elasticsearch.features.NodeFeature;
 import org.elasticsearch.indices.SystemIndexDescriptor;
+import org.elasticsearch.ingest.EnterpriseGeoIpTask.EnterpriseGeoIpTaskParams;
 import org.elasticsearch.ingest.IngestService;
 import org.elasticsearch.ingest.Processor;
+import org.elasticsearch.ingest.geoip.direct.DeleteDatabaseConfigurationAction;
+import org.elasticsearch.ingest.geoip.direct.GetDatabaseConfigurationAction;
+import org.elasticsearch.ingest.geoip.direct.PutDatabaseConfigurationAction;
+import org.elasticsearch.ingest.geoip.direct.RestDeleteDatabaseConfigurationAction;
+import org.elasticsearch.ingest.geoip.direct.RestGetDatabaseConfigurationAction;
+import org.elasticsearch.ingest.geoip.direct.RestPutDatabaseConfigurationAction;
+import org.elasticsearch.ingest.geoip.direct.TransportDeleteDatabaseConfigurationAction;
+import org.elasticsearch.ingest.geoip.direct.TransportGetDatabaseConfigurationAction;
+import org.elasticsearch.ingest.geoip.direct.TransportPutDatabaseConfigurationAction;
 import org.elasticsearch.ingest.geoip.stats.GeoIpDownloaderStats;
 import org.elasticsearch.ingest.geoip.stats.GeoIpStatsAction;
 import org.elasticsearch.ingest.geoip.stats.GeoIpStatsTransportAction;
@@ -38,6 +50,7 @@ import org.elasticsearch.plugins.ActionPlugin;
 import org.elasticsearch.plugins.IngestPlugin;
 import org.elasticsearch.plugins.PersistentTaskPlugin;
 import org.elasticsearch.plugins.Plugin;
+import org.elasticsearch.plugins.ReloadablePlugin;
 import org.elasticsearch.plugins.SystemIndexPlugin;
 import org.elasticsearch.rest.RestController;
 import org.elasticsearch.rest.RestHandler;
@@ -57,13 +70,21 @@ import java.util.function.Predicate;
 import java.util.function.Supplier;
 
 import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME;
+import static org.elasticsearch.ingest.EnterpriseGeoIpTask.ENTERPRISE_GEOIP_DOWNLOADER;
 import static org.elasticsearch.ingest.IngestService.INGEST_ORIGIN;
 import static org.elasticsearch.ingest.geoip.GeoIpDownloader.DATABASES_INDEX;
 import static org.elasticsearch.ingest.geoip.GeoIpDownloader.DATABASES_INDEX_PATTERN;
 import static org.elasticsearch.ingest.geoip.GeoIpDownloader.GEOIP_DOWNLOADER;
 import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder;
 
-public class IngestGeoIpPlugin extends Plugin implements IngestPlugin, SystemIndexPlugin, Closeable, PersistentTaskPlugin, ActionPlugin {
+public class IngestGeoIpPlugin extends Plugin
+    implements
+        IngestPlugin,
+        SystemIndexPlugin,
+        Closeable,
+        PersistentTaskPlugin,
+        ActionPlugin,
+        ReloadablePlugin {
     public static final Setting<Long> CACHE_SIZE = Setting.longSetting("ingest.geoip.cache_size", 1000, 0, Setting.Property.NodeScope);
     private static final int GEOIP_INDEX_MAPPINGS_VERSION = 1;
     /**
@@ -78,6 +99,7 @@ public class IngestGeoIpPlugin extends Plugin implements IngestPlugin, SystemInd
     private final SetOnce<IngestService> ingestService = new SetOnce<>();
     private final SetOnce<DatabaseNodeService> databaseRegistry = new SetOnce<>();
     private GeoIpDownloaderTaskExecutor geoIpDownloaderTaskExecutor;
+    private EnterpriseGeoIpDownloaderTaskExecutor enterpriseGeoIpDownloaderTaskExecutor;
 
     @Override
     public List<Setting<?>> getSettings() {
@@ -86,7 +108,8 @@ public class IngestGeoIpPlugin extends Plugin implements IngestPlugin, SystemInd
             GeoIpDownloaderTaskExecutor.EAGER_DOWNLOAD_SETTING,
             GeoIpDownloaderTaskExecutor.ENABLED_SETTING,
             GeoIpDownloader.ENDPOINT_SETTING,
-            GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING
+            GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING,
+            EnterpriseGeoIpDownloaderTaskExecutor.MAXMIND_LICENSE_KEY_SETTING
         );
     }
 
@@ -123,7 +146,16 @@ public class IngestGeoIpPlugin extends Plugin implements IngestPlugin, SystemInd
             services.threadPool()
         );
         geoIpDownloaderTaskExecutor.init();
-        return List.of(databaseRegistry.get(), geoIpDownloaderTaskExecutor);
+
+        enterpriseGeoIpDownloaderTaskExecutor = new EnterpriseGeoIpDownloaderTaskExecutor(
+            services.client(),
+            new HttpClient(),
+            services.clusterService(),
+            services.threadPool()
+        );
+        enterpriseGeoIpDownloaderTaskExecutor.init();
+
+        return List.of(databaseRegistry.get(), geoIpDownloaderTaskExecutor, enterpriseGeoIpDownloaderTaskExecutor);
     }
 
     @Override
@@ -139,12 +171,17 @@ public class IngestGeoIpPlugin extends Plugin implements IngestPlugin, SystemInd
         SettingsModule settingsModule,
         IndexNameExpressionResolver expressionResolver
     ) {
-        return List.of(geoIpDownloaderTaskExecutor);
+        return List.of(geoIpDownloaderTaskExecutor, enterpriseGeoIpDownloaderTaskExecutor);
     }
 
     @Override
     public List<ActionHandler<? extends ActionRequest, ? extends ActionResponse>> getActions() {
-        return List.of(new ActionHandler<>(GeoIpStatsAction.INSTANCE, GeoIpStatsTransportAction.class));
+        return List.of(
+            new ActionHandler<>(GeoIpStatsAction.INSTANCE, GeoIpStatsTransportAction.class),
+            new ActionHandler<>(GetDatabaseConfigurationAction.INSTANCE, TransportGetDatabaseConfigurationAction.class),
+            new ActionHandler<>(DeleteDatabaseConfigurationAction.INSTANCE, TransportDeleteDatabaseConfigurationAction.class),
+            new ActionHandler<>(PutDatabaseConfigurationAction.INSTANCE, TransportPutDatabaseConfigurationAction.class)
+        );
     }
 
     @Override
@@ -159,22 +196,41 @@ public class IngestGeoIpPlugin extends Plugin implements IngestPlugin, SystemInd
         Supplier<DiscoveryNodes> nodesInCluster,
         Predicate<NodeFeature> clusterSupportsFeature
     ) {
-        return List.of(new RestGeoIpStatsAction());
+        return List.of(
+            new RestGeoIpStatsAction(),
+            new RestGetDatabaseConfigurationAction(),
+            new RestDeleteDatabaseConfigurationAction(),
+            new RestPutDatabaseConfigurationAction()
+        );
     }
 
     @Override
     public List<NamedXContentRegistry.Entry> getNamedXContent() {
         return List.of(
             new NamedXContentRegistry.Entry(PersistentTaskParams.class, new ParseField(GEOIP_DOWNLOADER), GeoIpTaskParams::fromXContent),
-            new NamedXContentRegistry.Entry(PersistentTaskState.class, new ParseField(GEOIP_DOWNLOADER), GeoIpTaskState::fromXContent)
+            new NamedXContentRegistry.Entry(PersistentTaskState.class, new ParseField(GEOIP_DOWNLOADER), GeoIpTaskState::fromXContent),
+            new NamedXContentRegistry.Entry(
+                PersistentTaskParams.class,
+                new ParseField(ENTERPRISE_GEOIP_DOWNLOADER),
+                EnterpriseGeoIpTaskParams::fromXContent
+            ),
+            new NamedXContentRegistry.Entry(
+                PersistentTaskState.class,
+                new ParseField(ENTERPRISE_GEOIP_DOWNLOADER),
+                EnterpriseGeoIpTaskState::fromXContent
+            )
         );
     }
 
     @Override
     public List<NamedWriteableRegistry.Entry> getNamedWriteables() {
         return List.of(
+            new NamedWriteableRegistry.Entry(Metadata.Custom.class, IngestGeoIpMetadata.TYPE, IngestGeoIpMetadata::new),
+            new NamedWriteableRegistry.Entry(NamedDiff.class, IngestGeoIpMetadata.TYPE, IngestGeoIpMetadata.GeoIpMetadataDiff::new),
             new NamedWriteableRegistry.Entry(PersistentTaskState.class, GEOIP_DOWNLOADER, GeoIpTaskState::new),
             new NamedWriteableRegistry.Entry(PersistentTaskParams.class, GEOIP_DOWNLOADER, GeoIpTaskParams::new),
+            new NamedWriteableRegistry.Entry(PersistentTaskState.class, ENTERPRISE_GEOIP_DOWNLOADER, EnterpriseGeoIpTaskState::new),
+            new NamedWriteableRegistry.Entry(PersistentTaskParams.class, ENTERPRISE_GEOIP_DOWNLOADER, EnterpriseGeoIpTaskParams::new),
             new NamedWriteableRegistry.Entry(Task.Status.class, GEOIP_DOWNLOADER, GeoIpDownloaderStats::new)
         );
     }
@@ -235,4 +291,9 @@ public class IngestGeoIpPlugin extends Plugin implements IngestPlugin, SystemInd
             throw new UncheckedIOException("Failed to build mappings for " + DATABASES_INDEX, e);
         }
     }
+
+    @Override
+    public void reload(Settings settings) {
+        enterpriseGeoIpDownloaderTaskExecutor.reload(settings);
+    }
 }

+ 209 - 0
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfiguration.java

@@ -0,0 +1,209 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.ingest.geoip.direct;
+
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.cluster.metadata.MetadataCreateIndexService;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Writeable;
+import org.elasticsearch.xcontent.ConstructingObjectParser;
+import org.elasticsearch.xcontent.ParseField;
+import org.elasticsearch.xcontent.ToXContentObject;
+import org.elasticsearch.xcontent.XContentBuilder;
+import org.elasticsearch.xcontent.XContentParser;
+
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.util.Objects;
+import java.util.Set;
+import java.util.regex.Pattern;
+
+/**
+ * A database configuration is an identified (has an id) configuration of a named geoip location database to download,
+ * and the identifying information/configuration to download the named database from some database provider.
+ * <p>
+ * That is, it has an id e.g. "my_db_config_1" and it says "download the file named XXXX from SomeCompany, and here's the
+ * magic token to use to do that."
+ */
+public record DatabaseConfiguration(String id, String name, Maxmind maxmind) implements Writeable, ToXContentObject {
+
+    // id is a user selected signifier like 'my_domain_db'
+    // name is the name of a file that can be downloaded (like 'GeoIP2-Domain')
+
+    // a configuration will have a 'type' like "maxmind", and that might have some more details,
+    // for now, though the important thing is that the json has to have it even though we don't model it meaningfully in this class
+
+    public DatabaseConfiguration {
+        // these are invariants, not actual validation
+        Objects.requireNonNull(id);
+        Objects.requireNonNull(name);
+        Objects.requireNonNull(maxmind);
+    }
+
+    /**
+     * An alphanumeric, followed by 0-126 alphanumerics, dashes, or underscores. That is, 1-127 alphanumerics, dashes, or underscores,
+     * but a leading dash or underscore isn't allowed (we're reserving leading dashes and underscores [and other odd characters] for
+     * Elastic and the future).
+     */
+    private static final Pattern ID_PATTERN = Pattern.compile("\\p{Alnum}[_\\-\\p{Alnum}]{0,126}");
+
+    public static final Set<String> MAXMIND_NAMES = Set.of(
+        "GeoIP2-Anonymous-IP",
+        "GeoIP2-City",
+        "GeoIP2-Connection-Type",
+        "GeoIP2-Country",
+        "GeoIP2-Domain",
+        "GeoIP2-Enterprise",
+        "GeoIP2-ISP"
+
+        // in order to prevent a conflict between the (ordinary) geoip downloader and the enterprise geoip downloader,
+        // the enterprise geoip downloader is limited only to downloading the commercial files that the (ordinary) geoip downloader
+        // doesn't support out of the box -- in the future if we would like to relax this constraint, then we'll need to resolve that
+        // conflict at the same time.
+
+        // "GeoLite2-ASN",
+        // "GeoLite2-City",
+        // "GeoLite2-Country"
+    );
+
+    private static final ParseField NAME = new ParseField("name");
+    private static final ParseField MAXMIND = new ParseField("maxmind");
+
+    private static final ConstructingObjectParser<DatabaseConfiguration, String> PARSER = new ConstructingObjectParser<>(
+        "database",
+        false,
+        (a, id) -> {
+            String name = (String) a[0];
+            Maxmind maxmind = (Maxmind) a[1];
+            return new DatabaseConfiguration(id, name, maxmind);
+        }
+    );
+
+    static {
+        PARSER.declareString(ConstructingObjectParser.constructorArg(), NAME);
+        PARSER.declareObject(ConstructingObjectParser.constructorArg(), (parser, id) -> Maxmind.PARSER.apply(parser, null), MAXMIND);
+    }
+
+    public DatabaseConfiguration(StreamInput in) throws IOException {
+        this(in.readString(), in.readString(), new Maxmind(in));
+    }
+
+    public static DatabaseConfiguration parse(XContentParser parser, String id) {
+        return PARSER.apply(parser, id);
+    }
+
+    @Override
+    public void writeTo(StreamOutput out) throws IOException {
+        out.writeString(id);
+        out.writeString(name);
+        maxmind.writeTo(out);
+    }
+
+    @Override
+    public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+        builder.startObject();
+        builder.field("name", name);
+        builder.field("maxmind", maxmind);
+        builder.endObject();
+        return builder;
+    }
+
+    /**
+     * An id is intended to be alphanumerics, dashes, and underscores (only), but we're reserving leading dashes and underscores for
+     * ourselves in the future, that is, they're not for the ones that users can PUT.
+     */
+    static void validateId(String id) throws IllegalArgumentException {
+        if (Strings.isNullOrEmpty(id)) {
+            throw new IllegalArgumentException("invalid database configuration id [" + id + "]: must not be null or empty");
+        }
+        MetadataCreateIndexService.validateIndexOrAliasName(
+            id,
+            (id1, description) -> new IllegalArgumentException("invalid database configuration id [" + id1 + "]: " + description)
+        );
+        int byteCount = id.getBytes(StandardCharsets.UTF_8).length;
+        if (byteCount > 127) {
+            throw new IllegalArgumentException(
+                "invalid database configuration id [" + id + "]: id is too long, (" + byteCount + " > " + 127 + ")"
+            );
+        }
+        if (ID_PATTERN.matcher(id).matches() == false) {
+            throw new IllegalArgumentException(
+                "invalid database configuration id ["
+                    + id
+                    + "]: id doesn't match required rules (alphanumerics, dashes, and underscores, only)"
+            );
+        }
+    }
+
+    public ActionRequestValidationException validate() {
+        ActionRequestValidationException err = new ActionRequestValidationException();
+
+        // how do we cross the id validation divide here? or do we? it seems unfortunate to not invoke it at all.
+
+        // name validation
+        if (Strings.hasText(name) == false) {
+            err.addValidationError("invalid name [" + name + "]: cannot be empty");
+        }
+
+        if (MAXMIND_NAMES.contains(name) == false) {
+            err.addValidationError("invalid name [" + name + "]: must be a supported name ([" + MAXMIND_NAMES + "])");
+        }
+
+        // important: the name must be unique across all configurations of this same type,
+        // but we validate that in the cluster state update, not here.
+        try {
+            validateId(id);
+        } catch (IllegalArgumentException e) {
+            err.addValidationError(e.getMessage());
+        }
+        return err.validationErrors().isEmpty() ? null : err;
+    }
+
+    public record Maxmind(String accountId) implements Writeable, ToXContentObject {
+
+        public Maxmind {
+            // this is an invariant, not actual validation
+            Objects.requireNonNull(accountId);
+        }
+
+        private static final ParseField ACCOUNT_ID = new ParseField("account_id");
+
+        private static final ConstructingObjectParser<Maxmind, Void> PARSER = new ConstructingObjectParser<>("database", false, (a, id) -> {
+            String accountId = (String) a[0];
+            return new Maxmind(accountId);
+        });
+
+        static {
+            PARSER.declareString(ConstructingObjectParser.constructorArg(), ACCOUNT_ID);
+        }
+
+        public Maxmind(StreamInput in) throws IOException {
+            this(in.readString());
+        }
+
+        public static Maxmind parse(XContentParser parser) {
+            return PARSER.apply(parser, null);
+        }
+
+        @Override
+        public void writeTo(StreamOutput out) throws IOException {
+            out.writeString(accountId);
+        }
+
+        @Override
+        public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+            builder.startObject();
+            builder.field("account_id", accountId);
+            builder.endObject();
+            return builder;
+        }
+    }
+}

+ 84 - 0
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfigurationMetadata.java

@@ -0,0 +1,84 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.ingest.geoip.direct;
+
+import org.elasticsearch.cluster.Diff;
+import org.elasticsearch.cluster.SimpleDiffable;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.xcontent.ConstructingObjectParser;
+import org.elasticsearch.xcontent.ParseField;
+import org.elasticsearch.xcontent.ToXContentObject;
+import org.elasticsearch.xcontent.XContentBuilder;
+import org.elasticsearch.xcontent.XContentParser;
+
+import java.io.IOException;
+
+/**
+ * {@code DatabaseConfigurationMetadata} encapsulates a {@link DatabaseConfiguration} as well as
+ * the additional meta information like version (a monotonically incrementing number), and last modified date.
+ */
+public record DatabaseConfigurationMetadata(DatabaseConfiguration database, long version, long modifiedDate)
+    implements
+        SimpleDiffable<DatabaseConfigurationMetadata>,
+        ToXContentObject {
+
+    public static final ParseField DATABASE = new ParseField("database");
+    public static final ParseField VERSION = new ParseField("version");
+    public static final ParseField MODIFIED_DATE_MILLIS = new ParseField("modified_date_millis");
+    public static final ParseField MODIFIED_DATE = new ParseField("modified_date");
+    // later, things like this:
+    // static final ParseField LAST_SUCCESS = new ParseField("last_success");
+    // static final ParseField LAST_FAILURE = new ParseField("last_failure");
+
+    public static final ConstructingObjectParser<DatabaseConfigurationMetadata, String> PARSER = new ConstructingObjectParser<>(
+        "database_metadata",
+        true,
+        a -> {
+            DatabaseConfiguration database = (DatabaseConfiguration) a[0];
+            return new DatabaseConfigurationMetadata(database, (long) a[1], (long) a[2]);
+        }
+    );
+    static {
+        PARSER.declareObject(ConstructingObjectParser.constructorArg(), DatabaseConfiguration::parse, DATABASE);
+        PARSER.declareLong(ConstructingObjectParser.constructorArg(), VERSION);
+        PARSER.declareLong(ConstructingObjectParser.constructorArg(), MODIFIED_DATE_MILLIS);
+    }
+
+    public static DatabaseConfigurationMetadata parse(XContentParser parser, String name) {
+        return PARSER.apply(parser, name);
+    }
+
+    public DatabaseConfigurationMetadata(StreamInput in) throws IOException {
+        this(new DatabaseConfiguration(in), in.readVLong(), in.readVLong());
+    }
+
+    @Override
+    public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+        // this is cluster state serialization, the id is implicit and doesn't need to included here
+        // (we'll be a in a json map where the id is the key)
+        builder.startObject();
+        builder.field(VERSION.getPreferredName(), version);
+        builder.timeField(MODIFIED_DATE_MILLIS.getPreferredName(), MODIFIED_DATE.getPreferredName(), modifiedDate);
+        builder.field(DATABASE.getPreferredName(), database);
+        builder.endObject();
+        return builder;
+    }
+
+    @Override
+    public void writeTo(StreamOutput out) throws IOException {
+        database.writeTo(out);
+        out.writeVLong(version);
+        out.writeVLong(modifiedDate);
+    }
+
+    public static Diff<DatabaseConfigurationMetadata> readDiffFrom(StreamInput in) throws IOException {
+        return SimpleDiffable.readDiffFrom(DatabaseConfigurationMetadata::new, in);
+    }
+}

+ 70 - 0
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DeleteDatabaseConfigurationAction.java

@@ -0,0 +1,70 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.ingest.geoip.direct;
+
+import org.elasticsearch.action.ActionType;
+import org.elasticsearch.action.support.master.AcknowledgedRequest;
+import org.elasticsearch.action.support.master.AcknowledgedResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.core.TimeValue;
+
+import java.io.IOException;
+import java.util.Objects;
+
+public class DeleteDatabaseConfigurationAction extends ActionType<AcknowledgedResponse> {
+    public static final DeleteDatabaseConfigurationAction INSTANCE = new DeleteDatabaseConfigurationAction();
+    public static final String NAME = "cluster:admin/ingest/geoip/database/delete";
+
+    protected DeleteDatabaseConfigurationAction() {
+        super(NAME);
+    }
+
+    public static class Request extends AcknowledgedRequest<Request> {
+
+        private final String databaseId;
+
+        public Request(StreamInput in) throws IOException {
+            super(in);
+            databaseId = in.readString();
+        }
+
+        public Request(TimeValue masterNodeTimeout, TimeValue ackTimeout, String databaseId) {
+            super(masterNodeTimeout, ackTimeout);
+            this.databaseId = Objects.requireNonNull(databaseId, "id may not be null");
+        }
+
+        public String getDatabaseId() {
+            return this.databaseId;
+        }
+
+        @Override
+        public void writeTo(StreamOutput out) throws IOException {
+            super.writeTo(out);
+            out.writeString(databaseId);
+        }
+
+        @Override
+        public int hashCode() {
+            return databaseId.hashCode();
+        }
+
+        @Override
+        public boolean equals(Object obj) {
+            if (obj == null) {
+                return false;
+            }
+            if (obj.getClass() != getClass()) {
+                return false;
+            }
+            Request other = (Request) obj;
+            return Objects.equals(databaseId, other.databaseId);
+        }
+    }
+}

+ 142 - 0
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/GetDatabaseConfigurationAction.java

@@ -0,0 +1,142 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.ingest.geoip.direct;
+
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.action.ActionType;
+import org.elasticsearch.action.support.master.AcknowledgedRequest;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.core.TimeValue;
+import org.elasticsearch.xcontent.ToXContentObject;
+import org.elasticsearch.xcontent.XContentBuilder;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Objects;
+
+import static org.elasticsearch.ingest.geoip.direct.DatabaseConfigurationMetadata.DATABASE;
+import static org.elasticsearch.ingest.geoip.direct.DatabaseConfigurationMetadata.MODIFIED_DATE;
+import static org.elasticsearch.ingest.geoip.direct.DatabaseConfigurationMetadata.MODIFIED_DATE_MILLIS;
+import static org.elasticsearch.ingest.geoip.direct.DatabaseConfigurationMetadata.VERSION;
+
+public class GetDatabaseConfigurationAction extends ActionType<GetDatabaseConfigurationAction.Response> {
+    public static final GetDatabaseConfigurationAction INSTANCE = new GetDatabaseConfigurationAction();
+    public static final String NAME = "cluster:admin/ingest/geoip/database/get";
+
+    protected GetDatabaseConfigurationAction() {
+        super(NAME);
+    }
+
+    public static class Request extends AcknowledgedRequest<GetDatabaseConfigurationAction.Request> {
+
+        private final String[] databaseIds;
+
+        public Request(TimeValue masterNodeTimeout, TimeValue ackTimeout, String... databaseIds) {
+            super(masterNodeTimeout, ackTimeout);
+            this.databaseIds = Objects.requireNonNull(databaseIds, "ids may not be null");
+        }
+
+        public Request(StreamInput in) throws IOException {
+            super(in);
+            databaseIds = in.readStringArray();
+        }
+
+        public String[] getDatabaseIds() {
+            return this.databaseIds;
+        }
+
+        @Override
+        public void writeTo(StreamOutput out) throws IOException {
+            super.writeTo(out);
+            out.writeStringArray(databaseIds);
+        }
+
+        @Override
+        public int hashCode() {
+            return Arrays.hashCode(databaseIds);
+        }
+
+        @Override
+        public boolean equals(Object obj) {
+            if (obj == null) {
+                return false;
+            }
+            if (obj.getClass() != getClass()) {
+                return false;
+            }
+            Request other = (Request) obj;
+            return Arrays.equals(databaseIds, other.databaseIds);
+        }
+    }
+
+    public static class Response extends ActionResponse implements ToXContentObject {
+
+        private final List<DatabaseConfigurationMetadata> databases;
+
+        public Response(List<DatabaseConfigurationMetadata> databases) {
+            this.databases = List.copyOf(databases); // defensive copy
+        }
+
+        public Response(StreamInput in) throws IOException {
+            this(in.readCollectionAsList(DatabaseConfigurationMetadata::new));
+        }
+
+        public List<DatabaseConfigurationMetadata> getDatabases() {
+            return this.databases;
+        }
+
+        @Override
+        public String toString() {
+            return Strings.toString(this);
+        }
+
+        @Override
+        public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+            builder.startObject();
+            builder.startArray("databases");
+            for (DatabaseConfigurationMetadata item : databases) {
+                DatabaseConfiguration database = item.database();
+                builder.startObject();
+                builder.field("id", database.id()); // serialize including the id -- this is get response serialization
+                builder.field(VERSION.getPreferredName(), item.version());
+                builder.timeField(MODIFIED_DATE_MILLIS.getPreferredName(), MODIFIED_DATE.getPreferredName(), item.modifiedDate());
+                builder.field(DATABASE.getPreferredName(), database);
+                builder.endObject();
+            }
+            builder.endArray();
+            builder.endObject();
+            return builder;
+        }
+
+        @Override
+        public void writeTo(StreamOutput out) throws IOException {
+            out.writeCollection(databases);
+        }
+
+        @Override
+        public int hashCode() {
+            return Objects.hash(databases);
+        }
+
+        @Override
+        public boolean equals(Object obj) {
+            if (obj == null) {
+                return false;
+            }
+            if (obj.getClass() != getClass()) {
+                return false;
+            }
+            Response other = (Response) obj;
+            return databases.equals(other.databases);
+        }
+    }
+}

+ 87 - 0
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/PutDatabaseConfigurationAction.java

@@ -0,0 +1,87 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.ingest.geoip.direct;
+
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.ActionType;
+import org.elasticsearch.action.support.master.AcknowledgedRequest;
+import org.elasticsearch.action.support.master.AcknowledgedResponse;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.core.TimeValue;
+import org.elasticsearch.xcontent.XContentParser;
+
+import java.io.IOException;
+import java.util.Objects;
+
+public class PutDatabaseConfigurationAction extends ActionType<AcknowledgedResponse> {
+    public static final PutDatabaseConfigurationAction INSTANCE = new PutDatabaseConfigurationAction();
+    public static final String NAME = "cluster:admin/ingest/geoip/database/put";
+
+    protected PutDatabaseConfigurationAction() {
+        super(NAME);
+    }
+
+    public static class Request extends AcknowledgedRequest<Request> {
+
+        private final DatabaseConfiguration database;
+
+        public Request(TimeValue masterNodeTimeout, TimeValue ackTimeout, DatabaseConfiguration database) {
+            super(masterNodeTimeout, ackTimeout);
+            this.database = database;
+        }
+
+        public Request(StreamInput in) throws IOException {
+            super(in);
+            database = new DatabaseConfiguration(in);
+        }
+
+        public DatabaseConfiguration getDatabase() {
+            return this.database;
+        }
+
+        public static Request parseRequest(TimeValue masterNodeTimeout, TimeValue ackTimeout, String id, XContentParser parser) {
+            return new Request(masterNodeTimeout, ackTimeout, DatabaseConfiguration.parse(parser, id));
+        }
+
+        @Override
+        public void writeTo(StreamOutput out) throws IOException {
+            super.writeTo(out);
+            database.writeTo(out);
+        }
+
+        @Override
+        public ActionRequestValidationException validate() {
+            return database.validate();
+        }
+
+        @Override
+        public int hashCode() {
+            return Objects.hash(database);
+        }
+
+        @Override
+        public boolean equals(Object obj) {
+            if (obj == null) {
+                return false;
+            }
+            if (obj.getClass() != getClass()) {
+                return false;
+            }
+            Request other = (Request) obj;
+            return database.equals(other.database);
+        }
+
+        @Override
+        public String toString() {
+            return Strings.toString((b, p) -> b.field(database.id(), database));
+        }
+    }
+}

+ 46 - 0
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/RestDeleteDatabaseConfigurationAction.java

@@ -0,0 +1,46 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.ingest.geoip.direct;
+
+import org.elasticsearch.client.internal.node.NodeClient;
+import org.elasticsearch.rest.BaseRestHandler;
+import org.elasticsearch.rest.RestRequest;
+import org.elasticsearch.rest.Scope;
+import org.elasticsearch.rest.ServerlessScope;
+import org.elasticsearch.rest.action.RestToXContentListener;
+
+import java.util.List;
+
+import static org.elasticsearch.rest.RestRequest.Method.DELETE;
+import static org.elasticsearch.rest.RestUtils.getAckTimeout;
+import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout;
+
+@ServerlessScope(Scope.INTERNAL)
+public class RestDeleteDatabaseConfigurationAction extends BaseRestHandler {
+
+    @Override
+    public List<Route> routes() {
+        return List.of(new Route(DELETE, "/_ingest/geoip/database/{id}"));
+    }
+
+    @Override
+    public String getName() {
+        return "geoip_delete_database_configuration";
+    }
+
+    @Override
+    protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) {
+        final var req = new DeleteDatabaseConfigurationAction.Request(
+            getMasterNodeTimeout(request),
+            getAckTimeout(request),
+            request.param("id")
+        );
+        return channel -> client.execute(DeleteDatabaseConfigurationAction.INSTANCE, req, new RestToXContentListener<>(channel));
+    }
+}

+ 47 - 0
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/RestGetDatabaseConfigurationAction.java

@@ -0,0 +1,47 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.ingest.geoip.direct;
+
+import org.elasticsearch.client.internal.node.NodeClient;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.rest.BaseRestHandler;
+import org.elasticsearch.rest.RestRequest;
+import org.elasticsearch.rest.Scope;
+import org.elasticsearch.rest.ServerlessScope;
+import org.elasticsearch.rest.action.RestToXContentListener;
+
+import java.util.List;
+
+import static org.elasticsearch.rest.RestRequest.Method.GET;
+import static org.elasticsearch.rest.RestUtils.getAckTimeout;
+import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout;
+
+@ServerlessScope(Scope.INTERNAL)
+public class RestGetDatabaseConfigurationAction extends BaseRestHandler {
+
+    @Override
+    public List<Route> routes() {
+        return List.of(new Route(GET, "/_ingest/geoip/database"), new Route(GET, "/_ingest/geoip/database/{id}"));
+    }
+
+    @Override
+    public String getName() {
+        return "geoip_get_database_configuration";
+    }
+
+    @Override
+    protected RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) {
+        final var req = new GetDatabaseConfigurationAction.Request(
+            getMasterNodeTimeout(request),
+            getAckTimeout(request),
+            Strings.splitStringByCommaToArray(request.param("id"))
+        );
+        return channel -> client.execute(GetDatabaseConfigurationAction.INSTANCE, req, new RestToXContentListener<>(channel));
+    }
+}

+ 52 - 0
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/RestPutDatabaseConfigurationAction.java

@@ -0,0 +1,52 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.ingest.geoip.direct;
+
+import org.elasticsearch.client.internal.node.NodeClient;
+import org.elasticsearch.ingest.geoip.direct.PutDatabaseConfigurationAction.Request;
+import org.elasticsearch.rest.BaseRestHandler;
+import org.elasticsearch.rest.RestRequest;
+import org.elasticsearch.rest.Scope;
+import org.elasticsearch.rest.ServerlessScope;
+import org.elasticsearch.rest.action.RestToXContentListener;
+
+import java.io.IOException;
+import java.util.List;
+
+import static org.elasticsearch.rest.RestRequest.Method.PUT;
+import static org.elasticsearch.rest.RestUtils.getAckTimeout;
+import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout;
+
+@ServerlessScope(Scope.INTERNAL)
+public class RestPutDatabaseConfigurationAction extends BaseRestHandler {
+
+    @Override
+    public List<Route> routes() {
+        return List.of(new Route(PUT, "/_ingest/geoip/database/{id}"));
+    }
+
+    @Override
+    public String getName() {
+        return "geoip_put_database_configuration";
+    }
+
+    @Override
+    protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException {
+        final Request req;
+        try (var parser = request.contentParser()) {
+            req = PutDatabaseConfigurationAction.Request.parseRequest(
+                getMasterNodeTimeout(request),
+                getAckTimeout(request),
+                request.param("id"),
+                parser
+            );
+        }
+        return channel -> client.execute(PutDatabaseConfigurationAction.INSTANCE, req, new RestToXContentListener<>(channel));
+    }
+}

+ 128 - 0
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportDeleteDatabaseConfigurationAction.java

@@ -0,0 +1,128 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.ingest.geoip.direct;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.elasticsearch.ResourceNotFoundException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.ActionFilters;
+import org.elasticsearch.action.support.master.AcknowledgedResponse;
+import org.elasticsearch.action.support.master.TransportMasterNodeAction;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ClusterStateTaskListener;
+import org.elasticsearch.cluster.SimpleBatchedExecutor;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
+import org.elasticsearch.cluster.metadata.Metadata;
+import org.elasticsearch.cluster.service.ClusterService;
+import org.elasticsearch.cluster.service.MasterServiceTaskQueue;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.util.concurrent.EsExecutors;
+import org.elasticsearch.core.Strings;
+import org.elasticsearch.core.Tuple;
+import org.elasticsearch.ingest.geoip.IngestGeoIpMetadata;
+import org.elasticsearch.ingest.geoip.direct.DeleteDatabaseConfigurationAction.Request;
+import org.elasticsearch.tasks.Task;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.HashMap;
+import java.util.Map;
+
+public class TransportDeleteDatabaseConfigurationAction extends TransportMasterNodeAction<Request, AcknowledgedResponse> {
+
+    private static final Logger logger = LogManager.getLogger(TransportDeleteDatabaseConfigurationAction.class);
+
+    private static final SimpleBatchedExecutor<DeleteDatabaseConfigurationTask, Void> DELETE_TASK_EXECUTOR = new SimpleBatchedExecutor<>() {
+        @Override
+        public Tuple<ClusterState, Void> executeTask(DeleteDatabaseConfigurationTask task, ClusterState clusterState) throws Exception {
+            return Tuple.tuple(task.execute(clusterState), null);
+        }
+
+        @Override
+        public void taskSucceeded(DeleteDatabaseConfigurationTask task, Void unused) {
+            logger.trace("Updated cluster state for deletion of database configuration [{}]", task.databaseId);
+            task.listener.onResponse(AcknowledgedResponse.TRUE);
+        }
+    };
+
+    private final MasterServiceTaskQueue<DeleteDatabaseConfigurationTask> deleteDatabaseConfigurationTaskQueue;
+
+    @Inject
+    public TransportDeleteDatabaseConfigurationAction(
+        TransportService transportService,
+        ClusterService clusterService,
+        ThreadPool threadPool,
+        ActionFilters actionFilters,
+        IndexNameExpressionResolver indexNameExpressionResolver
+    ) {
+        super(
+            DeleteDatabaseConfigurationAction.NAME,
+            transportService,
+            clusterService,
+            threadPool,
+            actionFilters,
+            Request::new,
+            indexNameExpressionResolver,
+            AcknowledgedResponse::readFrom,
+            EsExecutors.DIRECT_EXECUTOR_SERVICE
+        );
+        this.deleteDatabaseConfigurationTaskQueue = clusterService.createTaskQueue(
+            "delete-geoip-database-configuration-state-update",
+            Priority.NORMAL,
+            DELETE_TASK_EXECUTOR
+        );
+    }
+
+    @Override
+    protected void masterOperation(Task task, Request request, ClusterState state, ActionListener<AcknowledgedResponse> listener)
+        throws Exception {
+        final String id = request.getDatabaseId();
+        final IngestGeoIpMetadata geoIpMeta = state.metadata().custom(IngestGeoIpMetadata.TYPE, IngestGeoIpMetadata.EMPTY);
+        if (geoIpMeta.getDatabases().containsKey(id) == false) {
+            throw new ResourceNotFoundException("Database configuration not found: {}", id);
+        }
+        deleteDatabaseConfigurationTaskQueue.submitTask(
+            Strings.format("delete-geoip-database-configuration-[%s]", id),
+            new DeleteDatabaseConfigurationTask(listener, id),
+            null
+        );
+    }
+
+    private record DeleteDatabaseConfigurationTask(ActionListener<AcknowledgedResponse> listener, String databaseId)
+        implements
+            ClusterStateTaskListener {
+
+        ClusterState execute(ClusterState currentState) throws Exception {
+            final IngestGeoIpMetadata geoIpMeta = currentState.metadata().custom(IngestGeoIpMetadata.TYPE, IngestGeoIpMetadata.EMPTY);
+
+            logger.debug("deleting database configuration [{}]", databaseId);
+            Map<String, DatabaseConfigurationMetadata> databases = new HashMap<>(geoIpMeta.getDatabases());
+            databases.remove(databaseId);
+
+            Metadata currentMeta = currentState.metadata();
+            return ClusterState.builder(currentState)
+                .metadata(Metadata.builder(currentMeta).putCustom(IngestGeoIpMetadata.TYPE, new IngestGeoIpMetadata(databases)))
+                .build();
+        }
+
+        @Override
+        public void onFailure(Exception e) {
+            listener.onFailure(e);
+        }
+    }
+
+    @Override
+    protected ClusterBlockException checkBlock(Request request, ClusterState state) {
+        return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
+    }
+}

+ 109 - 0
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportGetDatabaseConfigurationAction.java

@@ -0,0 +1,109 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.ingest.geoip.direct;
+
+import org.elasticsearch.ResourceNotFoundException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.ActionFilters;
+import org.elasticsearch.action.support.master.TransportMasterNodeAction;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
+import org.elasticsearch.cluster.service.ClusterService;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.util.concurrent.EsExecutors;
+import org.elasticsearch.ingest.geoip.IngestGeoIpMetadata;
+import org.elasticsearch.tasks.Task;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+public class TransportGetDatabaseConfigurationAction extends TransportMasterNodeAction<
+    GetDatabaseConfigurationAction.Request,
+    GetDatabaseConfigurationAction.Response> {
+
+    @Inject
+    public TransportGetDatabaseConfigurationAction(
+        TransportService transportService,
+        ClusterService clusterService,
+        ThreadPool threadPool,
+        ActionFilters actionFilters,
+        IndexNameExpressionResolver indexNameExpressionResolver
+    ) {
+        super(
+            GetDatabaseConfigurationAction.NAME,
+            transportService,
+            clusterService,
+            threadPool,
+            actionFilters,
+            GetDatabaseConfigurationAction.Request::new,
+            indexNameExpressionResolver,
+            GetDatabaseConfigurationAction.Response::new,
+            EsExecutors.DIRECT_EXECUTOR_SERVICE
+        );
+    }
+
+    @Override
+    protected void masterOperation(
+        final Task task,
+        final GetDatabaseConfigurationAction.Request request,
+        final ClusterState state,
+        final ActionListener<GetDatabaseConfigurationAction.Response> listener
+    ) {
+        final Set<String> ids;
+        if (request.getDatabaseIds().length == 0) {
+            // if we did not ask for a specific name, then return all databases
+            ids = Set.of("*");
+        } else {
+            ids = new LinkedHashSet<>(Arrays.asList(request.getDatabaseIds()));
+        }
+
+        if (ids.size() > 1 && ids.stream().anyMatch(Regex::isSimpleMatchPattern)) {
+            throw new IllegalArgumentException(
+                "wildcard only supports a single value, please use comma-separated values or a single wildcard value"
+            );
+        }
+
+        final IngestGeoIpMetadata geoIpMeta = state.metadata().custom(IngestGeoIpMetadata.TYPE, IngestGeoIpMetadata.EMPTY);
+        List<DatabaseConfigurationMetadata> results = new ArrayList<>();
+
+        for (String id : ids) {
+            if (Regex.isSimpleMatchPattern(id)) {
+                for (Map.Entry<String, DatabaseConfigurationMetadata> entry : geoIpMeta.getDatabases().entrySet()) {
+                    if (Regex.simpleMatch(id, entry.getKey())) {
+                        results.add(entry.getValue());
+                    }
+                }
+            } else {
+                DatabaseConfigurationMetadata meta = geoIpMeta.getDatabases().get(id);
+                if (meta == null) {
+                    listener.onFailure(new ResourceNotFoundException("database configuration not found: {}", id));
+                    return;
+                } else {
+                    results.add(meta);
+                }
+            }
+        }
+
+        listener.onResponse(new GetDatabaseConfigurationAction.Response(results));
+    }
+
+    @Override
+    protected ClusterBlockException checkBlock(GetDatabaseConfigurationAction.Request request, ClusterState state) {
+        return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ);
+    }
+}

+ 178 - 0
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportPutDatabaseConfigurationAction.java

@@ -0,0 +1,178 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.ingest.geoip.direct;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.ActionFilters;
+import org.elasticsearch.action.support.master.AcknowledgedResponse;
+import org.elasticsearch.action.support.master.TransportMasterNodeAction;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ClusterStateTaskListener;
+import org.elasticsearch.cluster.SimpleBatchedExecutor;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
+import org.elasticsearch.cluster.metadata.Metadata;
+import org.elasticsearch.cluster.service.ClusterService;
+import org.elasticsearch.cluster.service.MasterServiceTaskQueue;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.util.concurrent.EsExecutors;
+import org.elasticsearch.core.Nullable;
+import org.elasticsearch.core.Strings;
+import org.elasticsearch.core.Tuple;
+import org.elasticsearch.ingest.geoip.IngestGeoIpMetadata;
+import org.elasticsearch.ingest.geoip.direct.PutDatabaseConfigurationAction.Request;
+import org.elasticsearch.tasks.Task;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.time.Instant;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Optional;
+
+public class TransportPutDatabaseConfigurationAction extends TransportMasterNodeAction<Request, AcknowledgedResponse> {
+
+    private static final Logger logger = LogManager.getLogger(TransportPutDatabaseConfigurationAction.class);
+
+    private static final SimpleBatchedExecutor<UpdateDatabaseConfigurationTask, Void> UPDATE_TASK_EXECUTOR = new SimpleBatchedExecutor<>() {
+        @Override
+        public Tuple<ClusterState, Void> executeTask(UpdateDatabaseConfigurationTask task, ClusterState clusterState) throws Exception {
+            return Tuple.tuple(task.execute(clusterState), null);
+        }
+
+        @Override
+        public void taskSucceeded(UpdateDatabaseConfigurationTask task, Void unused) {
+            logger.trace("Updated cluster state for creation-or-update of database configuration [{}]", task.database.id());
+            task.listener.onResponse(AcknowledgedResponse.TRUE);
+        }
+    };
+
+    private final MasterServiceTaskQueue<UpdateDatabaseConfigurationTask> updateDatabaseConfigurationTaskQueue;
+
+    @Inject
+    public TransportPutDatabaseConfigurationAction(
+        TransportService transportService,
+        ClusterService clusterService,
+        ThreadPool threadPool,
+        ActionFilters actionFilters,
+        IndexNameExpressionResolver indexNameExpressionResolver
+    ) {
+        super(
+            PutDatabaseConfigurationAction.NAME,
+            transportService,
+            clusterService,
+            threadPool,
+            actionFilters,
+            Request::new,
+            indexNameExpressionResolver,
+            AcknowledgedResponse::readFrom,
+            EsExecutors.DIRECT_EXECUTOR_SERVICE
+        );
+        this.updateDatabaseConfigurationTaskQueue = clusterService.createTaskQueue(
+            "update-geoip-database-configuration-state-update",
+            Priority.NORMAL,
+            UPDATE_TASK_EXECUTOR
+        );
+    }
+
+    @Override
+    protected void masterOperation(Task task, Request request, ClusterState state, ActionListener<AcknowledgedResponse> listener) {
+        final String id = request.getDatabase().id();
+        updateDatabaseConfigurationTaskQueue.submitTask(
+            Strings.format("update-geoip-database-configuration-[%s]", id),
+            new UpdateDatabaseConfigurationTask(listener, request.getDatabase()),
+            null
+        );
+    }
+
+    /**
+     * Returns 'true' if the database configuration is effectually the same, and thus can be a no-op update.
+     */
+    static boolean isNoopUpdate(@Nullable DatabaseConfigurationMetadata existingDatabase, DatabaseConfiguration newDatabase) {
+        if (existingDatabase == null) {
+            return false;
+        } else {
+            return newDatabase.equals(existingDatabase.database());
+        }
+    }
+
+    static void validatePrerequisites(DatabaseConfiguration database, ClusterState state) {
+        // we need to verify that the database represents a unique file (name) among the various databases for this same provider
+        IngestGeoIpMetadata geoIpMeta = state.metadata().custom(IngestGeoIpMetadata.TYPE, IngestGeoIpMetadata.EMPTY);
+
+        Optional<DatabaseConfiguration> sameName = geoIpMeta.getDatabases()
+            .values()
+            .stream()
+            .map(DatabaseConfigurationMetadata::database)
+            // .filter(d -> d.type().equals(database.type())) // of the same type (right now the type is always just 'maxmind')
+            .filter(d -> d.id().equals(database.id()) == false) // and a different id
+            .filter(d -> d.name().equals(database.name())) // but has the same name!
+            .findFirst();
+
+        sameName.ifPresent(d -> {
+            throw new IllegalArgumentException(
+                Strings.format("database [%s] is already being downloaded via configuration [%s]", database.name(), d.id())
+            );
+        });
+    }
+
+    private record UpdateDatabaseConfigurationTask(ActionListener<AcknowledgedResponse> listener, DatabaseConfiguration database)
+        implements
+            ClusterStateTaskListener {
+
+        ClusterState execute(ClusterState currentState) throws Exception {
+            IngestGeoIpMetadata geoIpMeta = currentState.metadata().custom(IngestGeoIpMetadata.TYPE, IngestGeoIpMetadata.EMPTY);
+
+            String id = database.id();
+            final DatabaseConfigurationMetadata existingDatabase = geoIpMeta.getDatabases().get(id);
+            // double-check for no-op in the state update task, in case it was changed/reset in the meantime
+            if (isNoopUpdate(existingDatabase, database)) {
+                return currentState;
+            }
+
+            validatePrerequisites(database, currentState);
+
+            Map<String, DatabaseConfigurationMetadata> databases = new HashMap<>(geoIpMeta.getDatabases());
+            databases.put(
+                id,
+                new DatabaseConfigurationMetadata(
+                    database,
+                    existingDatabase == null ? 1 : existingDatabase.version() + 1,
+                    Instant.now().toEpochMilli()
+                )
+            );
+            geoIpMeta = new IngestGeoIpMetadata(databases);
+
+            if (existingDatabase == null) {
+                logger.debug("adding new database configuration [{}]", id);
+            } else {
+                logger.debug("updating existing database configuration [{}]", id);
+            }
+
+            Metadata currentMeta = currentState.metadata();
+            return ClusterState.builder(currentState)
+                .metadata(Metadata.builder(currentMeta).putCustom(IngestGeoIpMetadata.TYPE, geoIpMeta))
+                .build();
+        }
+
+        @Override
+        public void onFailure(Exception e) {
+            listener.onFailure(e);
+        }
+    }
+
+    @Override
+    protected ClusterBlockException checkBlock(Request request, ClusterState state) {
+        return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
+    }
+}

+ 538 - 0
modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderTests.java

@@ -0,0 +1,538 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.ingest.geoip;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.action.ActionType;
+import org.elasticsearch.action.DocWriteRequest.OpType;
+import org.elasticsearch.action.DocWriteResponse;
+import org.elasticsearch.action.admin.indices.flush.FlushAction;
+import org.elasticsearch.action.admin.indices.flush.FlushRequest;
+import org.elasticsearch.action.admin.indices.refresh.RefreshAction;
+import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.action.index.TransportIndexAction;
+import org.elasticsearch.action.support.broadcast.BroadcastResponse;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlocks;
+import org.elasticsearch.cluster.metadata.IndexMetadata;
+import org.elasticsearch.cluster.service.ClusterService;
+import org.elasticsearch.common.hash.MessageDigests;
+import org.elasticsearch.common.settings.ClusterSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.core.Tuple;
+import org.elasticsearch.ingest.EnterpriseGeoIpTask;
+import org.elasticsearch.ingest.geoip.direct.DatabaseConfiguration;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.persistent.PersistentTasksCustomMetadata;
+import org.elasticsearch.persistent.PersistentTasksService;
+import org.elasticsearch.telemetry.metric.MeterRegistry;
+import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.test.client.NoOpClient;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.xcontent.XContentType;
+import org.hamcrest.Matchers;
+import org.junit.After;
+import org.junit.Before;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.PasswordAuthentication;
+import java.nio.charset.StandardCharsets;
+import java.security.MessageDigest;
+import java.time.Instant;
+import java.time.temporal.ChronoUnit;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.BiConsumer;
+
+import static org.elasticsearch.ingest.geoip.DatabaseNodeServiceTests.createClusterState;
+import static org.elasticsearch.ingest.geoip.EnterpriseGeoIpDownloader.MAX_CHUNK_SIZE;
+import static org.elasticsearch.tasks.TaskId.EMPTY_TASK_ID;
+import static org.hamcrest.Matchers.equalTo;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyString;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verifyNoInteractions;
+import static org.mockito.Mockito.when;
+
+public class EnterpriseGeoIpDownloaderTests extends ESTestCase {
+
+    private HttpClient httpClient;
+    private ClusterService clusterService;
+    private ThreadPool threadPool;
+    private MockClient client;
+    private EnterpriseGeoIpDownloader geoIpDownloader;
+
+    @Before
+    public void setup() throws IOException {
+        httpClient = mock(HttpClient.class);
+        when(httpClient.getBytes(any(), anyString())).thenReturn(
+            "e4a3411cdd7b21eaf18675da5a7f9f360d33c6882363b2c19c38715834c9e836  GeoIP2-City_20240709.tar.gz".getBytes(StandardCharsets.UTF_8)
+        );
+        clusterService = mock(ClusterService.class);
+        threadPool = new ThreadPool(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test").build(), MeterRegistry.NOOP);
+        when(clusterService.getClusterSettings()).thenReturn(
+            new ClusterSettings(Settings.EMPTY, Set.of(GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING))
+        );
+        ClusterState state = createClusterState(new PersistentTasksCustomMetadata(1L, Map.of()));
+        when(clusterService.state()).thenReturn(state);
+        client = new MockClient(threadPool);
+        geoIpDownloader = new EnterpriseGeoIpDownloader(
+            client,
+            httpClient,
+            clusterService,
+            threadPool,
+            1,
+            "",
+            "",
+            "",
+            EMPTY_TASK_ID,
+            Map.of(),
+            () -> GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getDefault(Settings.EMPTY),
+            (input) -> new HttpClient.PasswordAuthenticationHolder("name", "password".toCharArray())
+        ) {
+            {
+                EnterpriseGeoIpTask.EnterpriseGeoIpTaskParams geoIpTaskParams = mock(EnterpriseGeoIpTask.EnterpriseGeoIpTaskParams.class);
+                when(geoIpTaskParams.getWriteableName()).thenReturn(EnterpriseGeoIpTask.ENTERPRISE_GEOIP_DOWNLOADER);
+                init(new PersistentTasksService(clusterService, threadPool, client), null, null, 0);
+            }
+        };
+    }
+
+    @After
+    public void tearDown() throws Exception {
+        super.tearDown();
+        threadPool.shutdownNow();
+    }
+
+    public void testGetChunkEndOfStream() throws IOException {
+        byte[] chunk = EnterpriseGeoIpDownloader.getChunk(new InputStream() {
+            @Override
+            public int read() {
+                return -1;
+            }
+        });
+        assertArrayEquals(new byte[0], chunk);
+        chunk = EnterpriseGeoIpDownloader.getChunk(new ByteArrayInputStream(new byte[0]));
+        assertArrayEquals(new byte[0], chunk);
+    }
+
+    public void testGetChunkLessThanChunkSize() throws IOException {
+        ByteArrayInputStream is = new ByteArrayInputStream(new byte[] { 1, 2, 3, 4 });
+        byte[] chunk = EnterpriseGeoIpDownloader.getChunk(is);
+        assertArrayEquals(new byte[] { 1, 2, 3, 4 }, chunk);
+        chunk = EnterpriseGeoIpDownloader.getChunk(is);
+        assertArrayEquals(new byte[0], chunk);
+
+    }
+
+    public void testGetChunkExactlyChunkSize() throws IOException {
+        byte[] bigArray = new byte[MAX_CHUNK_SIZE];
+        for (int i = 0; i < MAX_CHUNK_SIZE; i++) {
+            bigArray[i] = (byte) i;
+        }
+        ByteArrayInputStream is = new ByteArrayInputStream(bigArray);
+        byte[] chunk = EnterpriseGeoIpDownloader.getChunk(is);
+        assertArrayEquals(bigArray, chunk);
+        chunk = EnterpriseGeoIpDownloader.getChunk(is);
+        assertArrayEquals(new byte[0], chunk);
+    }
+
+    public void testGetChunkMoreThanChunkSize() throws IOException {
+        byte[] bigArray = new byte[MAX_CHUNK_SIZE * 2];
+        for (int i = 0; i < MAX_CHUNK_SIZE * 2; i++) {
+            bigArray[i] = (byte) i;
+        }
+        byte[] smallArray = new byte[MAX_CHUNK_SIZE];
+        System.arraycopy(bigArray, 0, smallArray, 0, MAX_CHUNK_SIZE);
+        ByteArrayInputStream is = new ByteArrayInputStream(bigArray);
+        byte[] chunk = EnterpriseGeoIpDownloader.getChunk(is);
+        assertArrayEquals(smallArray, chunk);
+        System.arraycopy(bigArray, MAX_CHUNK_SIZE, smallArray, 0, MAX_CHUNK_SIZE);
+        chunk = EnterpriseGeoIpDownloader.getChunk(is);
+        assertArrayEquals(smallArray, chunk);
+        chunk = EnterpriseGeoIpDownloader.getChunk(is);
+        assertArrayEquals(new byte[0], chunk);
+    }
+
+    public void testGetChunkRethrowsIOException() {
+        expectThrows(IOException.class, () -> EnterpriseGeoIpDownloader.getChunk(new InputStream() {
+            @Override
+            public int read() throws IOException {
+                throw new IOException();
+            }
+        }));
+    }
+
+    public void testIndexChunksNoData() throws IOException {
+        client.addHandler(FlushAction.INSTANCE, (FlushRequest request, ActionListener<BroadcastResponse> flushResponseActionListener) -> {
+            assertArrayEquals(new String[] { EnterpriseGeoIpDownloader.DATABASES_INDEX }, request.indices());
+            flushResponseActionListener.onResponse(mock(BroadcastResponse.class));
+        });
+        client.addHandler(
+            RefreshAction.INSTANCE,
+            (RefreshRequest request, ActionListener<BroadcastResponse> flushResponseActionListener) -> {
+                assertArrayEquals(new String[] { EnterpriseGeoIpDownloader.DATABASES_INDEX }, request.indices());
+                flushResponseActionListener.onResponse(mock(BroadcastResponse.class));
+            }
+        );
+
+        InputStream empty = new ByteArrayInputStream(new byte[0]);
+        assertEquals(
+            Tuple.tuple(0, "d41d8cd98f00b204e9800998ecf8427e"),
+            geoIpDownloader.indexChunks(
+                "test",
+                empty,
+                0,
+                MessageDigests.sha256(),
+                "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+                0
+            )
+        );
+    }
+
+    public void testIndexChunksMd5Mismatch() {
+        client.addHandler(FlushAction.INSTANCE, (FlushRequest request, ActionListener<BroadcastResponse> flushResponseActionListener) -> {
+            assertArrayEquals(new String[] { EnterpriseGeoIpDownloader.DATABASES_INDEX }, request.indices());
+            flushResponseActionListener.onResponse(mock(BroadcastResponse.class));
+        });
+        client.addHandler(
+            RefreshAction.INSTANCE,
+            (RefreshRequest request, ActionListener<BroadcastResponse> flushResponseActionListener) -> {
+                assertArrayEquals(new String[] { EnterpriseGeoIpDownloader.DATABASES_INDEX }, request.indices());
+                flushResponseActionListener.onResponse(mock(BroadcastResponse.class));
+            }
+        );
+
+        IOException exception = expectThrows(
+            IOException.class,
+            () -> geoIpDownloader.indexChunks("test", new ByteArrayInputStream(new byte[0]), 0, MessageDigests.sha256(), "123123", 0)
+        );
+        assertEquals(
+            "checksum mismatch, expected [123123], actual [e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855]",
+            exception.getMessage()
+        );
+    }
+
+    public void testIndexChunks() throws IOException {
+        byte[] bigArray = new byte[MAX_CHUNK_SIZE + 20];
+        for (int i = 0; i < MAX_CHUNK_SIZE + 20; i++) {
+            bigArray[i] = (byte) i;
+        }
+        byte[][] chunksData = new byte[2][];
+        chunksData[0] = new byte[MAX_CHUNK_SIZE];
+        System.arraycopy(bigArray, 0, chunksData[0], 0, MAX_CHUNK_SIZE);
+        chunksData[1] = new byte[20];
+        System.arraycopy(bigArray, MAX_CHUNK_SIZE, chunksData[1], 0, 20);
+
+        AtomicInteger chunkIndex = new AtomicInteger();
+
+        client.addHandler(TransportIndexAction.TYPE, (IndexRequest request, ActionListener<DocWriteResponse> listener) -> {
+            int chunk = chunkIndex.getAndIncrement();
+            assertEquals(OpType.CREATE, request.opType());
+            assertThat(request.id(), Matchers.startsWith("test_" + (chunk + 15) + "_"));
+            assertEquals(XContentType.SMILE, request.getContentType());
+            Map<String, Object> source = request.sourceAsMap();
+            assertEquals("test", source.get("name"));
+            assertArrayEquals(chunksData[chunk], (byte[]) source.get("data"));
+            assertEquals(chunk + 15, source.get("chunk"));
+            listener.onResponse(mock(IndexResponse.class));
+        });
+        client.addHandler(FlushAction.INSTANCE, (FlushRequest request, ActionListener<BroadcastResponse> flushResponseActionListener) -> {
+            assertArrayEquals(new String[] { EnterpriseGeoIpDownloader.DATABASES_INDEX }, request.indices());
+            flushResponseActionListener.onResponse(mock(BroadcastResponse.class));
+        });
+        client.addHandler(
+            RefreshAction.INSTANCE,
+            (RefreshRequest request, ActionListener<BroadcastResponse> flushResponseActionListener) -> {
+                assertArrayEquals(new String[] { EnterpriseGeoIpDownloader.DATABASES_INDEX }, request.indices());
+                flushResponseActionListener.onResponse(mock(BroadcastResponse.class));
+            }
+        );
+
+        InputStream big = new ByteArrayInputStream(bigArray);
+        assertEquals(
+            Tuple.tuple(17, "a67563dfa8f3cba8b8cff61eb989a749"),
+            geoIpDownloader.indexChunks(
+                "test",
+                big,
+                15,
+                MessageDigests.sha256(),
+                "f2304545f224ff9ffcc585cb0a993723f911e03beb552cc03937dd443e931eab",
+                0
+            )
+        );
+
+        assertEquals(2, chunkIndex.get());
+    }
+
+    public void testProcessDatabaseNew() throws IOException {
+        ByteArrayInputStream bais = new ByteArrayInputStream(new byte[0]);
+        when(httpClient.get(any(), any())).thenReturn(bais);
+        AtomicBoolean indexedChunks = new AtomicBoolean(false);
+        geoIpDownloader = new EnterpriseGeoIpDownloader(
+            client,
+            httpClient,
+            clusterService,
+            threadPool,
+            1,
+            "",
+            "",
+            "",
+            EMPTY_TASK_ID,
+            Map.of(),
+            () -> GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getDefault(Settings.EMPTY),
+            (input) -> new HttpClient.PasswordAuthenticationHolder("name", "password".toCharArray())
+        ) {
+            @Override
+            protected void updateTimestamp(String name, GeoIpTaskState.Metadata metadata) {
+                fail();
+            }
+
+            @Override
+            Tuple<Integer, String> indexChunks(
+                String name,
+                InputStream is,
+                int chunk,
+                MessageDigest digest,
+                String expectedMd5,
+                long start
+            ) {
+                assertSame(bais, is);
+                assertEquals(0, chunk);
+                indexedChunks.set(true);
+                return Tuple.tuple(11, expectedMd5);
+            }
+
+            @Override
+            void updateTaskState() {
+                assertEquals(0, state.getDatabases().get("test.mmdb").firstChunk());
+                assertEquals(10, state.getDatabases().get("test.mmdb").lastChunk());
+            }
+
+            @Override
+            void deleteOldChunks(String name, int firstChunk) {
+                assertEquals("test.mmdb", name);
+                assertEquals(0, firstChunk);
+            }
+        };
+
+        geoIpDownloader.setState(EnterpriseGeoIpTaskState.EMPTY);
+        PasswordAuthentication auth = new PasswordAuthentication("name", "password".toCharArray());
+        String id = randomIdentifier();
+        DatabaseConfiguration databaseConfiguration = new DatabaseConfiguration(id, "test", new DatabaseConfiguration.Maxmind("name"));
+        geoIpDownloader.processDatabase(auth, databaseConfiguration);
+        assertThat(indexedChunks.get(), equalTo(true));
+    }
+
+    public void testProcessDatabaseUpdate() throws IOException {
+        ByteArrayInputStream bais = new ByteArrayInputStream(new byte[0]);
+        when(httpClient.get(any(), any())).thenReturn(bais);
+        AtomicBoolean indexedChunks = new AtomicBoolean(false);
+        geoIpDownloader = new EnterpriseGeoIpDownloader(
+            client,
+            httpClient,
+            clusterService,
+            threadPool,
+            1,
+            "",
+            "",
+            "",
+            EMPTY_TASK_ID,
+            Map.of(),
+            () -> GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getDefault(Settings.EMPTY),
+            (input) -> new HttpClient.PasswordAuthenticationHolder("name", "password".toCharArray())
+        ) {
+            @Override
+            protected void updateTimestamp(String name, GeoIpTaskState.Metadata metadata) {
+                fail();
+            }
+
+            @Override
+            Tuple<Integer, String> indexChunks(
+                String name,
+                InputStream is,
+                int chunk,
+                MessageDigest digest,
+                String expectedMd5,
+                long start
+            ) {
+                assertSame(bais, is);
+                assertEquals(9, chunk);
+                indexedChunks.set(true);
+                return Tuple.tuple(1, expectedMd5);
+            }
+
+            @Override
+            void updateTaskState() {
+                assertEquals(9, state.getDatabases().get("test.mmdb").firstChunk());
+                assertEquals(10, state.getDatabases().get("test.mmdb").lastChunk());
+            }
+
+            @Override
+            void deleteOldChunks(String name, int firstChunk) {
+                assertEquals("test.mmdb", name);
+                assertEquals(9, firstChunk);
+            }
+        };
+
+        geoIpDownloader.setState(EnterpriseGeoIpTaskState.EMPTY.put("test.mmdb", new GeoIpTaskState.Metadata(0, 5, 8, "0", 0)));
+        PasswordAuthentication auth = new PasswordAuthentication("name", "password".toCharArray());
+        String id = randomIdentifier();
+        DatabaseConfiguration databaseConfiguration = new DatabaseConfiguration(id, "test", new DatabaseConfiguration.Maxmind("name"));
+        geoIpDownloader.processDatabase(auth, databaseConfiguration);
+        assertThat(indexedChunks.get(), equalTo(true));
+    }
+
+    public void testProcessDatabaseSame() throws IOException {
+        GeoIpTaskState.Metadata metadata = new GeoIpTaskState.Metadata(
+            0,
+            4,
+            10,
+            "1",
+            0,
+            "e4a3411cdd7b21eaf18675da5a7f9f360d33c6882363b2c19c38715834c9e836"
+        );
+        EnterpriseGeoIpTaskState taskState = EnterpriseGeoIpTaskState.EMPTY.put("test.mmdb", metadata);
+        ByteArrayInputStream bais = new ByteArrayInputStream(new byte[0]);
+        when(httpClient.get(any(), any())).thenReturn(bais);
+
+        geoIpDownloader = new EnterpriseGeoIpDownloader(
+            client,
+            httpClient,
+            clusterService,
+            threadPool,
+            1,
+            "",
+            "",
+            "",
+            EMPTY_TASK_ID,
+            Map.of(),
+            () -> GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getDefault(Settings.EMPTY),
+            (input) -> new HttpClient.PasswordAuthenticationHolder("name", "password".toCharArray())
+        ) {
+            @Override
+            protected void updateTimestamp(String name, GeoIpTaskState.Metadata newMetadata) {
+                assertEquals(metadata, newMetadata);
+                assertEquals("test.mmdb", name);
+            }
+
+            @Override
+            Tuple<Integer, String> indexChunks(
+                String name,
+                InputStream is,
+                int chunk,
+                MessageDigest digest,
+                String expectedChecksum,
+                long start
+            ) {
+                fail();
+                return Tuple.tuple(0, expectedChecksum);
+            }
+
+            @Override
+            void updateTaskState() {
+                fail();
+            }
+
+            @Override
+            void deleteOldChunks(String name, int firstChunk) {
+                fail();
+            }
+        };
+        geoIpDownloader.setState(taskState);
+        PasswordAuthentication auth = new PasswordAuthentication("name", "password".toCharArray());
+        String id = randomIdentifier();
+        DatabaseConfiguration databaseConfiguration = new DatabaseConfiguration(id, "test", new DatabaseConfiguration.Maxmind("name"));
+        geoIpDownloader.processDatabase(auth, databaseConfiguration);
+    }
+
+    public void testUpdateDatabasesWriteBlock() {
+        ClusterState state = createClusterState(new PersistentTasksCustomMetadata(1L, Map.of()));
+        var geoIpIndex = state.getMetadata().getIndicesLookup().get(EnterpriseGeoIpDownloader.DATABASES_INDEX).getWriteIndex().getName();
+        state = ClusterState.builder(state)
+            .blocks(new ClusterBlocks.Builder().addIndexBlock(geoIpIndex, IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK))
+            .build();
+        when(clusterService.state()).thenReturn(state);
+        var e = expectThrows(ClusterBlockException.class, () -> geoIpDownloader.updateDatabases());
+        assertThat(
+            e.getMessage(),
+            equalTo(
+                "index ["
+                    + geoIpIndex
+                    + "] blocked by: [TOO_MANY_REQUESTS/12/disk usage exceeded flood-stage watermark, "
+                    + "index has read-only-allow-delete block];"
+            )
+        );
+        verifyNoInteractions(httpClient);
+    }
+
+    public void testUpdateDatabasesIndexNotReady() throws IOException {
+        ClusterState state = createClusterState(new PersistentTasksCustomMetadata(1L, Map.of()), true);
+        var geoIpIndex = state.getMetadata().getIndicesLookup().get(EnterpriseGeoIpDownloader.DATABASES_INDEX).getWriteIndex().getName();
+        state = ClusterState.builder(state)
+            .blocks(new ClusterBlocks.Builder().addIndexBlock(geoIpIndex, IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK))
+            .build();
+        when(clusterService.state()).thenReturn(state);
+        geoIpDownloader.updateDatabases();
+        verifyNoInteractions(httpClient);
+    }
+
+    private GeoIpTaskState.Metadata newGeoIpTaskStateMetadata(boolean expired) {
+        Instant lastChecked;
+        if (expired) {
+            lastChecked = Instant.now().minus(randomIntBetween(31, 100), ChronoUnit.DAYS);
+        } else {
+            lastChecked = Instant.now().minus(randomIntBetween(0, 29), ChronoUnit.DAYS);
+        }
+        return new GeoIpTaskState.Metadata(0, 0, 0, randomAlphaOfLength(20), lastChecked.toEpochMilli());
+    }
+
+    private static class MockClient extends NoOpClient {
+
+        private final Map<ActionType<?>, BiConsumer<? extends ActionRequest, ? extends ActionListener<?>>> handlers = new HashMap<>();
+
+        private MockClient(ThreadPool threadPool) {
+            super(threadPool);
+        }
+
+        public <Response extends ActionResponse, Request extends ActionRequest> void addHandler(
+            ActionType<Response> action,
+            BiConsumer<Request, ActionListener<Response>> listener
+        ) {
+            handlers.put(action, listener);
+        }
+
+        @SuppressWarnings("unchecked")
+        @Override
+        protected <Request extends ActionRequest, Response extends ActionResponse> void doExecute(
+            ActionType<Response> action,
+            Request request,
+            ActionListener<Response> listener
+        ) {
+            if (handlers.containsKey(action)) {
+                BiConsumer<ActionRequest, ActionListener<?>> biConsumer = (BiConsumer<ActionRequest, ActionListener<?>>) handlers.get(
+                    action
+                );
+                biConsumer.accept(request, listener);
+            } else {
+                throw new IllegalStateException("unexpected action called [" + action.name() + "]");
+            }
+        }
+    }
+}

+ 72 - 0
modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpTaskStateSerializationTests.java

@@ -0,0 +1,72 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.ingest.geoip;
+
+import org.elasticsearch.common.io.stream.Writeable;
+import org.elasticsearch.test.AbstractXContentSerializingTestCase;
+import org.elasticsearch.xcontent.XContentParser;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+public class EnterpriseGeoIpTaskStateSerializationTests extends AbstractXContentSerializingTestCase<GeoIpTaskState> {
+    @Override
+    protected GeoIpTaskState doParseInstance(XContentParser parser) throws IOException {
+        return GeoIpTaskState.fromXContent(parser);
+    }
+
+    @Override
+    protected Writeable.Reader<GeoIpTaskState> instanceReader() {
+        return GeoIpTaskState::new;
+    }
+
+    @Override
+    protected GeoIpTaskState createTestInstance() {
+        GeoIpTaskState state = GeoIpTaskState.EMPTY;
+        int databaseCount = randomInt(20);
+        for (int i = 0; i < databaseCount; i++) {
+            state = state.put(randomAlphaOfLengthBetween(5, 10), createRandomMetadata());
+        }
+        return state;
+    }
+
+    @Override
+    protected GeoIpTaskState mutateInstance(GeoIpTaskState instance) {
+        Map<String, GeoIpTaskState.Metadata> databases = new HashMap<>(instance.getDatabases());
+        switch (between(0, 2)) {
+            case 0:
+                String databaseName = randomValueOtherThanMany(databases::containsKey, () -> randomAlphaOfLengthBetween(5, 10));
+                databases.put(databaseName, createRandomMetadata());
+                return new GeoIpTaskState(databases);
+            case 1:
+                if (databases.size() > 0) {
+                    String randomDatabaseName = databases.keySet().iterator().next();
+                    databases.put(randomDatabaseName, createRandomMetadata());
+                } else {
+                    databases.put(randomAlphaOfLengthBetween(5, 10), createRandomMetadata());
+                }
+                return new GeoIpTaskState(databases);
+            case 2:
+                if (databases.size() > 0) {
+                    String randomDatabaseName = databases.keySet().iterator().next();
+                    databases.remove(randomDatabaseName);
+                } else {
+                    databases.put(randomAlphaOfLengthBetween(5, 10), createRandomMetadata());
+                }
+                return new GeoIpTaskState(databases);
+            default:
+                throw new AssertionError("failure, got illegal switch case");
+        }
+    }
+
+    private GeoIpTaskState.Metadata createRandomMetadata() {
+        return new GeoIpTaskState.Metadata(randomLong(), randomInt(), randomInt(), randomAlphaOfLength(32), randomLong());
+    }
+}

+ 49 - 0
modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java

@@ -426,6 +426,55 @@ public class GeoIpDownloaderTests extends ESTestCase {
         assertEquals(0, stats.getFailedDownloads());
     }
 
+    public void testCleanDatabases() throws IOException {
+        ByteArrayInputStream bais = new ByteArrayInputStream(new byte[0]);
+        when(httpClient.get("http://a.b/t1")).thenReturn(bais);
+
+        final AtomicInteger count = new AtomicInteger(0);
+
+        geoIpDownloader = new GeoIpDownloader(
+            client,
+            httpClient,
+            clusterService,
+            threadPool,
+            Settings.EMPTY,
+            1,
+            "",
+            "",
+            "",
+            EMPTY_TASK_ID,
+            Map.of(),
+            () -> GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getDefault(Settings.EMPTY),
+            () -> GeoIpDownloaderTaskExecutor.EAGER_DOWNLOAD_SETTING.getDefault(Settings.EMPTY),
+            () -> true
+        ) {
+            @Override
+            void updateDatabases() throws IOException {
+                // noop
+            }
+
+            @Override
+            void deleteOldChunks(String name, int firstChunk) {
+                count.incrementAndGet();
+                assertEquals("test.mmdb", name);
+                assertEquals(21, firstChunk);
+            }
+
+            @Override
+            void updateTaskState() {
+                // noop
+            }
+        };
+
+        geoIpDownloader.setState(GeoIpTaskState.EMPTY.put("test.mmdb", new GeoIpTaskState.Metadata(10, 10, 20, "md5", 20)));
+        geoIpDownloader.runDownloader();
+        geoIpDownloader.runDownloader();
+        GeoIpDownloaderStats stats = geoIpDownloader.getStatus();
+        assertEquals(1, stats.getExpiredDatabases());
+        assertEquals(2, count.get()); // somewhat surprising, not necessarily wrong
+        assertEquals(18, geoIpDownloader.state.getDatabases().get("test.mmdb").lastCheck()); // highly surprising, seems wrong
+    }
+
     @SuppressWarnings("unchecked")
     public void testUpdateTaskState() {
         geoIpDownloader = new GeoIpDownloader(

+ 91 - 0
modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/IngestGeoIpMetadataTests.java

@@ -0,0 +1,91 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.ingest.geoip;
+
+import org.elasticsearch.common.io.stream.Writeable;
+import org.elasticsearch.ingest.geoip.direct.DatabaseConfiguration;
+import org.elasticsearch.ingest.geoip.direct.DatabaseConfigurationMetadata;
+import org.elasticsearch.test.AbstractChunkedSerializingTestCase;
+import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.xcontent.XContentParser;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+public class IngestGeoIpMetadataTests extends AbstractChunkedSerializingTestCase<IngestGeoIpMetadata> {
+    @Override
+    protected IngestGeoIpMetadata doParseInstance(XContentParser parser) throws IOException {
+        return IngestGeoIpMetadata.fromXContent(parser);
+    }
+
+    @Override
+    protected Writeable.Reader<IngestGeoIpMetadata> instanceReader() {
+        return IngestGeoIpMetadata::new;
+    }
+
+    @Override
+    protected IngestGeoIpMetadata createTestInstance() {
+        return randomIngestGeoIpMetadata();
+    }
+
+    @Override
+    protected IngestGeoIpMetadata mutateInstance(IngestGeoIpMetadata instance) throws IOException {
+        Map<String, DatabaseConfigurationMetadata> databases = new HashMap<>(instance.getDatabases());
+        switch (between(0, 2)) {
+            case 0 -> {
+                String databaseId = randomValueOtherThanMany(databases::containsKey, ESTestCase::randomIdentifier);
+                databases.put(databaseId, randomDatabaseConfigurationMetadata(databaseId));
+                return new IngestGeoIpMetadata(databases);
+            }
+            case 1 -> {
+                if (databases.size() > 0) {
+                    String randomDatabaseId = databases.keySet().iterator().next();
+                    databases.put(randomDatabaseId, randomDatabaseConfigurationMetadata(randomDatabaseId));
+                } else {
+                    String databaseId = randomIdentifier();
+                    databases.put(databaseId, randomDatabaseConfigurationMetadata(databaseId));
+                }
+                return new IngestGeoIpMetadata(databases);
+            }
+            case 2 -> {
+                if (databases.size() > 0) {
+                    String randomDatabaseId = databases.keySet().iterator().next();
+                    databases.remove(randomDatabaseId);
+                } else {
+                    String databaseId = randomIdentifier();
+                    databases.put(databaseId, randomDatabaseConfigurationMetadata(databaseId));
+                }
+                return new IngestGeoIpMetadata(databases);
+            }
+            default -> throw new AssertionError("failure, got illegal switch case");
+        }
+    }
+
+    private IngestGeoIpMetadata randomIngestGeoIpMetadata() {
+        Map<String, DatabaseConfigurationMetadata> databases = new HashMap<>();
+        for (int i = 0; i < randomIntBetween(0, 20); i++) {
+            String databaseId = randomIdentifier();
+            databases.put(databaseId, randomDatabaseConfigurationMetadata(databaseId));
+        }
+        return new IngestGeoIpMetadata(databases);
+    }
+
+    private DatabaseConfigurationMetadata randomDatabaseConfigurationMetadata(String id) {
+        return new DatabaseConfigurationMetadata(
+            randomDatabaseConfiguration(id),
+            randomNonNegativeLong(),
+            randomPositiveTimeValue().millis()
+        );
+    }
+
+    private DatabaseConfiguration randomDatabaseConfiguration(String id) {
+        return new DatabaseConfiguration(id, randomAlphaOfLength(10), new DatabaseConfiguration.Maxmind(randomAlphaOfLength(10)));
+    }
+}

+ 74 - 0
modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfigurationMetadataTests.java

@@ -0,0 +1,74 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.ingest.geoip.direct;
+
+import org.elasticsearch.common.io.stream.Writeable;
+import org.elasticsearch.test.AbstractXContentSerializingTestCase;
+import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.xcontent.XContentParser;
+
+import java.io.IOException;
+
+import static org.elasticsearch.ingest.geoip.direct.DatabaseConfiguration.MAXMIND_NAMES;
+import static org.elasticsearch.ingest.geoip.direct.DatabaseConfigurationTests.randomDatabaseConfiguration;
+
+public class DatabaseConfigurationMetadataTests extends AbstractXContentSerializingTestCase<DatabaseConfigurationMetadata> {
+
+    private String id;
+
+    @Override
+    protected DatabaseConfigurationMetadata doParseInstance(XContentParser parser) throws IOException {
+        return DatabaseConfigurationMetadata.parse(parser, id);
+    }
+
+    @Override
+    protected DatabaseConfigurationMetadata createTestInstance() {
+        id = randomAlphaOfLength(5);
+        return randomDatabaseConfigurationMetadata(id);
+    }
+
+    public static DatabaseConfigurationMetadata randomDatabaseConfigurationMetadata(String id) {
+        return new DatabaseConfigurationMetadata(
+            new DatabaseConfiguration(id, randomFrom(MAXMIND_NAMES), new DatabaseConfiguration.Maxmind(randomAlphaOfLength(5))),
+            randomNonNegativeLong(),
+            randomPositiveTimeValue().millis()
+        );
+    }
+
+    @Override
+    protected DatabaseConfigurationMetadata mutateInstance(DatabaseConfigurationMetadata instance) {
+        switch (between(0, 2)) {
+            case 0:
+                return new DatabaseConfigurationMetadata(
+                    randomValueOtherThan(instance.database(), () -> randomDatabaseConfiguration(randomAlphaOfLength(5))),
+                    instance.version(),
+                    instance.modifiedDate()
+                );
+            case 1:
+                return new DatabaseConfigurationMetadata(
+                    instance.database(),
+                    randomValueOtherThan(instance.version(), ESTestCase::randomNonNegativeLong),
+                    instance.modifiedDate()
+                );
+            case 2:
+                return new DatabaseConfigurationMetadata(
+                    instance.database(),
+                    instance.version(),
+                    randomValueOtherThan(instance.modifiedDate(), () -> ESTestCase.randomPositiveTimeValue().millis())
+                );
+            default:
+                throw new AssertionError("failure, got illegal switch case");
+        }
+    }
+
+    @Override
+    protected Writeable.Reader<DatabaseConfigurationMetadata> instanceReader() {
+        return DatabaseConfigurationMetadata::new;
+    }
+}

+ 86 - 0
modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfigurationTests.java

@@ -0,0 +1,86 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.ingest.geoip.direct;
+
+import org.elasticsearch.common.io.stream.Writeable;
+import org.elasticsearch.ingest.geoip.direct.DatabaseConfiguration.Maxmind;
+import org.elasticsearch.test.AbstractXContentSerializingTestCase;
+import org.elasticsearch.xcontent.XContentParser;
+
+import java.io.IOException;
+import java.util.Set;
+
+import static org.elasticsearch.ingest.geoip.direct.DatabaseConfiguration.MAXMIND_NAMES;
+
+public class DatabaseConfigurationTests extends AbstractXContentSerializingTestCase<DatabaseConfiguration> {
+
+    private String id;
+
+    @Override
+    protected DatabaseConfiguration doParseInstance(XContentParser parser) throws IOException {
+        return DatabaseConfiguration.parse(parser, id);
+    }
+
+    @Override
+    protected DatabaseConfiguration createTestInstance() {
+        id = randomAlphaOfLength(5);
+        return randomDatabaseConfiguration(id);
+    }
+
+    public static DatabaseConfiguration randomDatabaseConfiguration(String id) {
+        return new DatabaseConfiguration(id, randomFrom(MAXMIND_NAMES), new Maxmind(randomAlphaOfLength(5)));
+    }
+
+    @Override
+    protected DatabaseConfiguration mutateInstance(DatabaseConfiguration instance) {
+        switch (between(0, 2)) {
+            case 0:
+                return new DatabaseConfiguration(instance.id() + randomAlphaOfLength(2), instance.name(), instance.maxmind());
+            case 1:
+                return new DatabaseConfiguration(
+                    instance.id(),
+                    randomValueOtherThan(instance.name(), () -> randomFrom(MAXMIND_NAMES)),
+                    instance.maxmind()
+                );
+            case 2:
+                return new DatabaseConfiguration(
+                    instance.id(),
+                    instance.name(),
+                    new Maxmind(instance.maxmind().accountId() + randomAlphaOfLength(2))
+                );
+            default:
+                throw new AssertionError("failure, got illegal switch case");
+        }
+    }
+
+    @Override
+    protected Writeable.Reader<DatabaseConfiguration> instanceReader() {
+        return DatabaseConfiguration::new;
+    }
+
+    public void testValidateId() {
+        Set<String> invalidIds = Set.of("-foo", "_foo", "foo,bar", "foo bar", "foo*bar", "foo.bar");
+        for (String id : invalidIds) {
+            expectThrows(IllegalArgumentException.class, "expected exception for " + id, () -> DatabaseConfiguration.validateId(id));
+        }
+        Set<String> validIds = Set.of("f-oo", "f_oo", "foobar");
+        for (String id : validIds) {
+            DatabaseConfiguration.validateId(id);
+        }
+        // Note: the code checks for byte length, but randomAlphoOfLength is only using characters in the ascii subset
+        String longId = randomAlphaOfLength(128);
+        expectThrows(IllegalArgumentException.class, "expected exception for " + longId, () -> DatabaseConfiguration.validateId(longId));
+        String longestAllowedId = randomAlphaOfLength(127);
+        DatabaseConfiguration.validateId(longestAllowedId);
+        String shortId = randomAlphaOfLengthBetween(1, 127);
+        DatabaseConfiguration.validateId(shortId);
+        expectThrows(IllegalArgumentException.class, "expected exception for empty string", () -> DatabaseConfiguration.validateId(""));
+        expectThrows(IllegalArgumentException.class, "expected exception for null string", () -> DatabaseConfiguration.validateId(null));
+    }
+}

+ 69 - 0
modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/TransportPutDatabaseConfigurationActionTests.java

@@ -0,0 +1,69 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.ingest.geoip.direct;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.Metadata;
+import org.elasticsearch.ingest.geoip.IngestGeoIpMetadata;
+import org.elasticsearch.test.ESTestCase;
+
+import java.util.HashMap;
+import java.util.Map;
+
+public class TransportPutDatabaseConfigurationActionTests extends ESTestCase {
+
+    public void testValidatePrerequisites() {
+        // Test that we reject two configurations with the same database name but different ids:
+        String name = randomAlphaOfLengthBetween(1, 50);
+        IngestGeoIpMetadata ingestGeoIpMetadata = randomIngestGeoIpMetadata(name);
+        ClusterState state = ClusterState.builder(ClusterState.EMPTY_STATE)
+            .metadata(Metadata.builder(Metadata.EMPTY_METADATA).putCustom(IngestGeoIpMetadata.TYPE, ingestGeoIpMetadata))
+            .build();
+        DatabaseConfiguration databaseConfiguration = randomDatabaseConfiguration(randomIdentifier(), name);
+        expectThrows(
+            IllegalArgumentException.class,
+            () -> TransportPutDatabaseConfigurationAction.validatePrerequisites(databaseConfiguration, state)
+        );
+
+        // Test that we do not reject two configurations with different database names:
+        String differentName = randomValueOtherThan(name, () -> randomAlphaOfLengthBetween(1, 50));
+        DatabaseConfiguration databaseConfigurationForDifferentName = randomDatabaseConfiguration(randomIdentifier(), differentName);
+        TransportPutDatabaseConfigurationAction.validatePrerequisites(databaseConfigurationForDifferentName, state);
+
+        // Test that we do not reject a configuration if none already exists:
+        TransportPutDatabaseConfigurationAction.validatePrerequisites(databaseConfiguration, ClusterState.EMPTY_STATE);
+
+        // Test that we do not reject a configuration if one with the same database name AND id already exists:
+        DatabaseConfiguration databaseConfigurationSameNameSameId = ingestGeoIpMetadata.getDatabases()
+            .values()
+            .iterator()
+            .next()
+            .database();
+        TransportPutDatabaseConfigurationAction.validatePrerequisites(databaseConfigurationSameNameSameId, state);
+    }
+
+    private IngestGeoIpMetadata randomIngestGeoIpMetadata(String name) {
+        Map<String, DatabaseConfigurationMetadata> databases = new HashMap<>();
+        String databaseId = randomIdentifier();
+        databases.put(databaseId, randomDatabaseConfigurationMetadata(databaseId, name));
+        return new IngestGeoIpMetadata(databases);
+    }
+
+    private DatabaseConfigurationMetadata randomDatabaseConfigurationMetadata(String id, String name) {
+        return new DatabaseConfigurationMetadata(
+            randomDatabaseConfiguration(id, name),
+            randomNonNegativeLong(),
+            randomPositiveTimeValue().millis()
+        );
+    }
+
+    private DatabaseConfiguration randomDatabaseConfiguration(String id, String name) {
+        return new DatabaseConfiguration(id, name, new DatabaseConfiguration.Maxmind(randomAlphaOfLength(10)));
+    }
+}

+ 5 - 0
modules/ingest-geoip/src/yamlRestTest/java/org/elasticsearch/ingest/geoip/IngestGeoIpClientYamlTestSuiteIT.java

@@ -46,7 +46,12 @@ public class IngestGeoIpClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase
         .module("reindex")
         .module("ingest-geoip")
         .systemProperty("ingest.geoip.downloader.enabled.default", "true")
+        // sets the plain (geoip.elastic.co) downloader endpoint, which is used in these tests
         .setting("ingest.geoip.downloader.endpoint", () -> fixture.getAddress(), s -> useFixture)
+        // also sets the enterprise downloader maxmind endpoint, to make sure we do not accidentally hit the real endpoint from tests
+        // note: it's not important that the downloading actually work at this point -- the rest tests (so far) don't exercise
+        // the downloading code because of license reasons -- but if they did, then it would be important that we're hitting a fixture
+        .systemProperty("ingest.geoip.downloader.maxmind.endpoint.default", () -> fixture.getAddress(), s -> useFixture)
         .build();
 
     @ClassRule

+ 72 - 0
modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/40_geoip_databases.yml

@@ -0,0 +1,72 @@
+setup:
+  - requires:
+      cluster_features: ["geoip.downloader.database.configuration"]
+      reason: "geoip downloader database configuration APIs added in 8.15"
+
+---
+"Test adding, getting, and removing geoip databases":
+  - do:
+      ingest.put_geoip_database:
+        id: "my_database_1"
+        body:  >
+          {
+            "name": "GeoIP2-City",
+            "maxmind": {
+              "account_id": "1234"
+            }
+          }
+  - match: { acknowledged: true }
+
+  - do:
+      ingest.put_geoip_database:
+        id: "my_database_1"
+        body:  >
+          {
+            "name": "GeoIP2-Country",
+            "maxmind": {
+              "account_id": "4321"
+            }
+          }
+  - match: { acknowledged: true }
+
+  - do:
+      ingest.put_geoip_database:
+        id: "my_database_2"
+        body:  >
+          {
+            "name": "GeoIP2-City",
+            "maxmind": {
+              "account_id": "1234"
+            }
+          }
+  - match: { acknowledged: true }
+
+  - do:
+      ingest.get_geoip_database:
+        id: "my_database_1"
+  - length: { databases: 1 }
+  - match: { databases.0.id: "my_database_1" }
+  - gte: { databases.0.modified_date_millis: 0 }
+  - match: { databases.0.database.name: "GeoIP2-Country" }
+  - match: { databases.0.database.maxmind.account_id: "4321" }
+
+  - do:
+      ingest.get_geoip_database: {}
+  - length: { databases: 2 }
+
+  - do:
+      ingest.get_geoip_database:
+        id: "my_database_1,my_database_2"
+  - length: { databases: 2 }
+
+  - do:
+      ingest.delete_geoip_database:
+        id: "my_database_1"
+
+  - do:
+      ingest.get_geoip_database: {}
+  - length: { databases: 1 }
+  - match: { databases.0.id: "my_database_2" }
+  - gte: { databases.0.modified_date_millis: 0 }
+  - match: { databases.0.database.name: "GeoIP2-City" }
+  - match: { databases.0.database.maxmind.account_id: "1234" }

+ 31 - 0
rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_geoip_database.json

@@ -0,0 +1,31 @@
+{
+  "ingest.delete_geoip_database":{
+    "documentation":{
+      "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/TODO.html",
+      "description":"Deletes a geoip database configuration"
+    },
+    "stability":"stable",
+    "visibility":"public",
+    "headers":{
+      "accept": [ "application/json"]
+    },
+    "url":{
+      "paths":[
+        {
+          "path":"/_ingest/geoip/database/{id}",
+          "methods":[
+            "DELETE"
+          ],
+          "parts":{
+            "id":{
+              "type":"list",
+              "description":"A comma-separated list of geoip database configurations to delete"
+            }
+          }
+        }
+      ]
+    },
+    "params":{
+    }
+  }
+}

+ 37 - 0
rest-api-spec/src/main/resources/rest-api-spec/api/ingest.get_geoip_database.json

@@ -0,0 +1,37 @@
+{
+  "ingest.get_geoip_database":{
+    "documentation":{
+      "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/TODO.html",
+      "description":"Returns geoip database configuration."
+    },
+    "stability":"stable",
+    "visibility":"public",
+    "headers":{
+      "accept": [ "application/json"]
+    },
+    "url":{
+      "paths":[
+        {
+          "path":"/_ingest/geoip/database",
+          "methods":[
+            "GET"
+          ]
+        },
+        {
+          "path":"/_ingest/geoip/database/{id}",
+          "methods":[
+            "GET"
+          ],
+          "parts":{
+            "id":{
+              "type":"list",
+              "description":"A comma-separated list of geoip database configurations to get; use `*` to get all geoip database configurations"
+            }
+          }
+        }
+      ]
+    },
+    "params":{
+    }
+  }
+}

+ 35 - 0
rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_geoip_database.json

@@ -0,0 +1,35 @@
+{
+  "ingest.put_geoip_database":{
+    "documentation":{
+      "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/TODO.html",
+      "description":"Puts the configuration for a geoip database to be downloaded"
+    },
+    "stability":"stable",
+    "visibility":"public",
+    "headers":{
+      "accept": [ "application/json"]
+    },
+    "url":{
+      "paths":[
+        {
+          "path":"/_ingest/geoip/database/{id}",
+          "methods":[
+            "PUT"
+          ],
+          "parts":{
+            "id":{
+              "type":"string",
+              "description":"The id of the database configuration"
+            }
+          }
+        }
+      ]
+    },
+    "params":{
+    },
+    "body":{
+      "description":"The database configuration definition",
+      "required":true
+    }
+  }
+}

+ 2 - 0
server/src/main/java/module-info.java

@@ -429,6 +429,7 @@ module org.elasticsearch.server {
             org.elasticsearch.indices.IndicesFeatures,
             org.elasticsearch.action.admin.cluster.allocation.AllocationStatsFeatures,
             org.elasticsearch.index.mapper.MapperFeatures,
+            org.elasticsearch.ingest.IngestGeoIpFeatures,
             org.elasticsearch.search.SearchFeatures,
             org.elasticsearch.script.ScriptFeatures,
             org.elasticsearch.search.retriever.RetrieversFeatures,
@@ -462,4 +463,5 @@ module org.elasticsearch.server {
             org.elasticsearch.serverless.shardhealth,
             org.elasticsearch.serverless.apifiltering;
     exports org.elasticsearch.lucene.spatial;
+
 }

+ 1 - 0
server/src/main/java/org/elasticsearch/TransportVersions.java

@@ -214,6 +214,7 @@ public class TransportVersions {
     public static final TransportVersion INDEX_REQUEST_UPDATE_BY_SCRIPT_ORIGIN = def(8_705_00_0);
     public static final TransportVersion ML_INFERENCE_COHERE_UNUSED_RERANK_SETTINGS_REMOVED = def(8_706_00_0);
     public static final TransportVersion ENRICH_CACHE_STATS_SIZE_ADDED = def(8_707_00_0);
+    public static final TransportVersion ENTERPRISE_GEOIP_DOWNLOADER = def(8_708_00_0);
 
     /*
      * STOP! READ THIS FIRST! No, really,

+ 86 - 0
server/src/main/java/org/elasticsearch/ingest/EnterpriseGeoIpTask.java

@@ -0,0 +1,86 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.ingest;
+
+import org.elasticsearch.TransportVersion;
+import org.elasticsearch.TransportVersions;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.features.NodeFeature;
+import org.elasticsearch.persistent.PersistentTaskParams;
+import org.elasticsearch.xcontent.ObjectParser;
+import org.elasticsearch.xcontent.XContentBuilder;
+import org.elasticsearch.xcontent.XContentParser;
+
+import java.io.IOException;
+
+/**
+ * As a relatively minor hack, this class holds the string constant that defines both the id
+ * and the name of the task for the new ip geolocation database downloader feature. It also provides the
+ * PersistentTaskParams that are necessary to start the task and to run it.
+ * <p>
+ * Defining this in Elasticsearch itself gives us a reasonably tidy version of things where we don't
+ * end up with strange inter-module dependencies. It's not ideal, but it works fine.
+ */
+public final class EnterpriseGeoIpTask {
+
+    private EnterpriseGeoIpTask() {
+        // utility class
+    }
+
+    public static final String ENTERPRISE_GEOIP_DOWNLOADER = "enterprise-geoip-downloader";
+    public static final NodeFeature GEOIP_DOWNLOADER_DATABASE_CONFIGURATION = new NodeFeature("geoip.downloader.database.configuration");
+
+    public static class EnterpriseGeoIpTaskParams implements PersistentTaskParams {
+
+        public static final ObjectParser<EnterpriseGeoIpTaskParams, Void> PARSER = new ObjectParser<>(
+            ENTERPRISE_GEOIP_DOWNLOADER,
+            true,
+            EnterpriseGeoIpTaskParams::new
+        );
+
+        public EnterpriseGeoIpTaskParams() {}
+
+        public EnterpriseGeoIpTaskParams(StreamInput in) {}
+
+        @Override
+        public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+            builder.startObject();
+            builder.endObject();
+            return builder;
+        }
+
+        @Override
+        public String getWriteableName() {
+            return ENTERPRISE_GEOIP_DOWNLOADER;
+        }
+
+        @Override
+        public TransportVersion getMinimalSupportedVersion() {
+            return TransportVersions.ENTERPRISE_GEOIP_DOWNLOADER;
+        }
+
+        @Override
+        public void writeTo(StreamOutput out) {}
+
+        public static EnterpriseGeoIpTaskParams fromXContent(XContentParser parser) {
+            return PARSER.apply(parser, null);
+        }
+
+        @Override
+        public int hashCode() {
+            return 0;
+        }
+
+        @Override
+        public boolean equals(Object obj) {
+            return obj instanceof EnterpriseGeoIpTaskParams;
+        }
+    }
+}

+ 22 - 0
server/src/main/java/org/elasticsearch/ingest/IngestGeoIpFeatures.java

@@ -0,0 +1,22 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.ingest;
+
+import org.elasticsearch.features.FeatureSpecification;
+import org.elasticsearch.features.NodeFeature;
+
+import java.util.Set;
+
+import static org.elasticsearch.ingest.EnterpriseGeoIpTask.GEOIP_DOWNLOADER_DATABASE_CONFIGURATION;
+
+public class IngestGeoIpFeatures implements FeatureSpecification {
+    public Set<NodeFeature> getFeatures() {
+        return Set.of(GEOIP_DOWNLOADER_DATABASE_CONFIGURATION);
+    }
+}

+ 1 - 0
server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification

@@ -14,6 +14,7 @@ org.elasticsearch.rest.RestFeatures
 org.elasticsearch.indices.IndicesFeatures
 org.elasticsearch.action.admin.cluster.allocation.AllocationStatsFeatures
 org.elasticsearch.index.mapper.MapperFeatures
+org.elasticsearch.ingest.IngestGeoIpFeatures
 org.elasticsearch.search.SearchFeatures
 org.elasticsearch.search.retriever.RetrieversFeatures
 org.elasticsearch.script.ScriptFeatures

+ 3 - 3
server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java

@@ -1826,9 +1826,9 @@ public class IngestServiceTests extends ESTestCase {
         for (int i = 0; i < numRequest; i++) {
             IndexRequest indexRequest = new IndexRequest("_index").id("_id").setPipeline(pipelineId).setFinalPipeline("_none");
             indexRequest.source(xContentType, "field1", "value1");
-            boolean shouldListExecutedPiplines = randomBoolean();
-            executedPipelinesExpected.add(shouldListExecutedPiplines);
-            indexRequest.setListExecutedPipelines(shouldListExecutedPiplines);
+            boolean shouldListExecutedPipelines = randomBoolean();
+            executedPipelinesExpected.add(shouldListExecutedPipelines);
+            indexRequest.setListExecutedPipelines(shouldListExecutedPipelines);
             bulkRequest.add(indexRequest);
         }
 

+ 125 - 0
test/fixtures/geoip-fixture/src/main/java/fixture/geoip/EnterpriseGeoIpHttpFixture.java

@@ -0,0 +1,125 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package fixture.geoip;
+
+import com.sun.net.httpserver.HttpServer;
+
+import org.elasticsearch.common.hash.MessageDigests;
+import org.junit.rules.ExternalResource;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.io.UncheckedIOException;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.StandardCopyOption;
+import java.security.MessageDigest;
+
+/**
+ * This fixture is used to simulate a maxmind-provided server for downloading maxmind geoip database files from the
+ * EnterpriseGeoIpDownloader. It can be used by integration tests so that they don't actually hit maxmind servers.
+ */
+public class EnterpriseGeoIpHttpFixture extends ExternalResource {
+
+    private final Path source;
+    private final boolean enabled;
+    private final String[] databaseTypes;
+    private HttpServer server;
+
+    /*
+     * The values in databaseTypes must be in DatabaseConfiguration.MAXMIND_NAMES, and must be one of the databases copied in the
+     * copyFiles method of thisi class.
+     */
+    public EnterpriseGeoIpHttpFixture(boolean enabled, String... databaseTypes) {
+        this.enabled = enabled;
+        this.databaseTypes = databaseTypes;
+        try {
+            this.source = Files.createTempDirectory("source");
+        } catch (IOException e) {
+            throw new UncheckedIOException(e);
+        }
+    }
+
+    public String getAddress() {
+        return "http://" + server.getAddress().getHostString() + ":" + server.getAddress().getPort() + "/";
+    }
+
+    @Override
+    protected void before() throws Throwable {
+        if (enabled) {
+            copyFiles();
+            this.server = HttpServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0);
+
+            // for expediency reasons, it is handy to have this test fixture be able to serve the dual purpose of actually stubbing
+            // out the download protocol for downloading files from maxmind (see the looped context creation after this stanza), as
+            // we as to serve an empty response for the geoip.elastic.co service here
+            this.server.createContext("/", exchange -> {
+                String response = "[]"; // an empty json array
+                exchange.sendResponseHeaders(200, response.length());
+                try (OutputStream os = exchange.getResponseBody()) {
+                    os.write(response.getBytes(StandardCharsets.UTF_8));
+                }
+            });
+
+            // register the file types for the download fixture
+            for (String databaseType : databaseTypes) {
+                createContextForEnterpriseDatabase(databaseType);
+            }
+
+            server.start();
+        }
+    }
+
+    private void createContextForEnterpriseDatabase(String databaseType) {
+        this.server.createContext("/" + databaseType + "/download", exchange -> {
+            exchange.sendResponseHeaders(200, 0);
+            if (exchange.getRequestURI().toString().contains("sha256")) {
+                MessageDigest sha256 = MessageDigests.sha256();
+                try (InputStream inputStream = GeoIpHttpFixture.class.getResourceAsStream("/geoip-fixture/" + databaseType + ".tgz")) {
+                    sha256.update(inputStream.readAllBytes());
+                }
+                exchange.getResponseBody()
+                    .write(
+                        (MessageDigests.toHexString(sha256.digest()) + "  " + databaseType + "_20240709.tar.gz").getBytes(
+                            StandardCharsets.UTF_8
+                        )
+                    );
+            } else {
+                try (
+                    OutputStream outputStream = exchange.getResponseBody();
+                    InputStream inputStream = GeoIpHttpFixture.class.getResourceAsStream("/geoip-fixture/" + databaseType + ".tgz")
+                ) {
+                    inputStream.transferTo(outputStream);
+                }
+            }
+            exchange.getResponseBody().close();
+        });
+    }
+
+    @Override
+    protected void after() {
+        if (enabled) {
+            server.stop(0);
+        }
+    }
+
+    private void copyFiles() throws Exception {
+        for (String databaseType : databaseTypes) {
+            Files.copy(
+                GeoIpHttpFixture.class.getResourceAsStream("/geoip-fixture/GeoIP2-City.tgz"),
+                source.resolve(databaseType + ".tgz"),
+                StandardCopyOption.REPLACE_EXISTING
+            );
+        }
+    }
+}

BIN
test/fixtures/geoip-fixture/src/main/resources/geoip-fixture/GeoIP2-City.tgz


+ 1 - 0
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java

@@ -87,6 +87,7 @@ public final class XPackField {
 
     /** Name constant for the redact processor feature. */
     public static final String REDACT_PROCESSOR = "redact_processor";
+    public static final String ENTERPRISE_GEOIP_DOWNLOADER = "enterprise_geoip_downloader";
     /** Name for Universal Profiling. */
     public static final String UNIVERSAL_PROFILING = "universal_profiling";
 

+ 19 - 0
x-pack/plugin/geoip-enterprise-downloader/build.gradle

@@ -0,0 +1,19 @@
+apply plugin: 'elasticsearch.internal-es-plugin'
+apply plugin: 'elasticsearch.internal-yaml-rest-test'
+apply plugin: 'elasticsearch.internal-cluster-test'
+esplugin {
+  name 'x-pack-geoip-enterprise-downloader'
+  description 'Elasticsearch Expanded Pack Plugin - Geoip Enterprise Downloader'
+  classname 'org.elasticsearch.xpack.geoip.EnterpriseDownloaderPlugin'
+  extendedPlugins = ['x-pack-core']
+}
+base {
+  archivesName = 'x-pack-geoip-enterprise-downloader'
+}
+
+dependencies {
+  compileOnly project(path: xpackModule('core'))
+  testImplementation(testArtifact(project(xpackModule('core'))))
+}
+
+addQaCheckDependencies(project)

+ 48 - 0
x-pack/plugin/geoip-enterprise-downloader/src/main/java/org/elasticsearch/xpack/geoip/EnterpriseDownloaderPlugin.java

@@ -0,0 +1,48 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+package org.elasticsearch.xpack.geoip;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.license.XPackLicenseState;
+import org.elasticsearch.plugins.Plugin;
+import org.elasticsearch.xpack.core.XPackPlugin;
+
+import java.util.Collection;
+import java.util.List;
+
+/**
+ * This plugin is used to start the enterprise geoip downloader task (See {@link org.elasticsearch.ingest.EnterpriseGeoIpTask}). That task
+ * requires having a platinum license. But the geoip code is in a non-xpack module that doesn't know about licensing. This plugin has a
+ * license listener that will start the task if the license is valid, and will stop the task if it becomes invalid. This lets us enforce
+ * the license without having to either put license logic into a non-xpack module, or put a lot of shared geoip code (much of which does
+ * not require a platinum license) into xpack.
+ */
+public class EnterpriseDownloaderPlugin extends Plugin {
+
+    private final Settings settings;
+    private EnterpriseGeoIpDownloaderLicenseListener enterpriseGeoIpDownloaderLicenseListener;
+
+    public EnterpriseDownloaderPlugin(final Settings settings) {
+        this.settings = settings;
+    }
+
+    protected XPackLicenseState getLicenseState() {
+        return XPackPlugin.getSharedLicenseState();
+    }
+
+    @Override
+    public Collection<?> createComponents(PluginServices services) {
+        enterpriseGeoIpDownloaderLicenseListener = new EnterpriseGeoIpDownloaderLicenseListener(
+            services.client(),
+            services.clusterService(),
+            services.threadPool(),
+            getLicenseState()
+        );
+        enterpriseGeoIpDownloaderLicenseListener.init();
+        return List.of(enterpriseGeoIpDownloaderLicenseListener);
+    }
+}

+ 145 - 0
x-pack/plugin/geoip-enterprise-downloader/src/main/java/org/elasticsearch/xpack/geoip/EnterpriseGeoIpDownloaderLicenseListener.java

@@ -0,0 +1,145 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+package org.elasticsearch.xpack.geoip;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.ResourceAlreadyExistsException;
+import org.elasticsearch.ResourceNotFoundException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.client.internal.Client;
+import org.elasticsearch.cluster.ClusterChangedEvent;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ClusterStateListener;
+import org.elasticsearch.cluster.service.ClusterService;
+import org.elasticsearch.core.TimeValue;
+import org.elasticsearch.core.UpdateForV9;
+import org.elasticsearch.ingest.EnterpriseGeoIpTask.EnterpriseGeoIpTaskParams;
+import org.elasticsearch.license.License;
+import org.elasticsearch.license.LicenseStateListener;
+import org.elasticsearch.license.LicensedFeature;
+import org.elasticsearch.license.XPackLicenseState;
+import org.elasticsearch.persistent.PersistentTasksCustomMetadata;
+import org.elasticsearch.persistent.PersistentTasksService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.RemoteTransportException;
+import org.elasticsearch.xpack.core.XPackField;
+
+import java.util.Objects;
+
+import static org.elasticsearch.ingest.EnterpriseGeoIpTask.ENTERPRISE_GEOIP_DOWNLOADER;
+
+public class EnterpriseGeoIpDownloaderLicenseListener implements LicenseStateListener, ClusterStateListener {
+    private static final Logger logger = LogManager.getLogger(EnterpriseGeoIpDownloaderLicenseListener.class);
+    // Note: This custom type is GeoIpMetadata.TYPE, but that class is not exposed to this plugin
+    static final String INGEST_GEOIP_CUSTOM_METADATA_TYPE = "ingest_geoip";
+
+    private final PersistentTasksService persistentTasksService;
+    private final ClusterService clusterService;
+    private final XPackLicenseState licenseState;
+    private static final LicensedFeature.Momentary ENTERPRISE_GEOIP_FEATURE = LicensedFeature.momentary(
+        null,
+        XPackField.ENTERPRISE_GEOIP_DOWNLOADER,
+        License.OperationMode.PLATINUM
+    );
+    private volatile boolean licenseIsValid = false;
+    private volatile boolean hasIngestGeoIpMetadata = false;
+
+    protected EnterpriseGeoIpDownloaderLicenseListener(
+        Client client,
+        ClusterService clusterService,
+        ThreadPool threadPool,
+        XPackLicenseState licenseState
+    ) {
+        this.persistentTasksService = new PersistentTasksService(clusterService, threadPool, client);
+        this.clusterService = clusterService;
+        this.licenseState = licenseState;
+    }
+
+    @UpdateForV9 // use MINUS_ONE once that means no timeout
+    private static final TimeValue MASTER_TIMEOUT = TimeValue.MAX_VALUE;
+    private volatile boolean licenseStateListenerRegistered;
+
+    public void init() {
+        listenForLicenseStateChanges();
+        clusterService.addListener(this);
+    }
+
+    void listenForLicenseStateChanges() {
+        assert licenseStateListenerRegistered == false : "listenForLicenseStateChanges() should only be called once";
+        licenseStateListenerRegistered = true;
+        licenseState.addListener(this);
+    }
+
+    @Override
+    public void licenseStateChanged() {
+        licenseIsValid = ENTERPRISE_GEOIP_FEATURE.checkWithoutTracking(licenseState);
+        maybeUpdateTaskState(clusterService.state());
+    }
+
+    @Override
+    public void clusterChanged(ClusterChangedEvent event) {
+        hasIngestGeoIpMetadata = event.state().metadata().custom(INGEST_GEOIP_CUSTOM_METADATA_TYPE) != null;
+        final boolean ingestGeoIpCustomMetaChangedInEvent = event.metadataChanged()
+            && event.changedCustomMetadataSet().contains(INGEST_GEOIP_CUSTOM_METADATA_TYPE);
+        final boolean masterNodeChanged = Objects.equals(
+            event.state().nodes().getMasterNode(),
+            event.previousState().nodes().getMasterNode()
+        ) == false;
+        /*
+         * We don't want to potentially start the task on every cluster state change, so only maybeUpdateTaskState if this cluster change
+         * event involved the modification of custom geoip metadata OR a master node change
+         */
+        if (ingestGeoIpCustomMetaChangedInEvent || (masterNodeChanged && hasIngestGeoIpMetadata)) {
+            maybeUpdateTaskState(event.state());
+        }
+    }
+
+    private void maybeUpdateTaskState(ClusterState state) {
+        // We should only start/stop task from single node, master is the best as it will go through it anyway
+        if (state.nodes().isLocalNodeElectedMaster()) {
+            if (licenseIsValid) {
+                if (hasIngestGeoIpMetadata) {
+                    ensureTaskStarted();
+                }
+            } else {
+                ensureTaskStopped();
+            }
+        }
+    }
+
+    private void ensureTaskStarted() {
+        assert licenseIsValid : "Task should never be started without valid license";
+        persistentTasksService.sendStartRequest(
+            ENTERPRISE_GEOIP_DOWNLOADER,
+            ENTERPRISE_GEOIP_DOWNLOADER,
+            new EnterpriseGeoIpTaskParams(),
+            MASTER_TIMEOUT,
+            ActionListener.wrap(r -> logger.debug("Started enterprise geoip downloader task"), e -> {
+                Throwable t = e instanceof RemoteTransportException ? ExceptionsHelper.unwrapCause(e) : e;
+                if (t instanceof ResourceAlreadyExistsException == false) {
+                    logger.error("failed to create enterprise geoip downloader task", e);
+                }
+            })
+        );
+    }
+
+    private void ensureTaskStopped() {
+        ActionListener<PersistentTasksCustomMetadata.PersistentTask<?>> listener = ActionListener.wrap(
+            r -> logger.debug("Stopped enterprise geoip downloader task"),
+            e -> {
+                Throwable t = e instanceof RemoteTransportException ? ExceptionsHelper.unwrapCause(e) : e;
+                if (t instanceof ResourceNotFoundException == false) {
+                    logger.error("failed to remove enterprise geoip downloader task", e);
+                }
+            }
+        );
+        persistentTasksService.sendRemoveRequest(ENTERPRISE_GEOIP_DOWNLOADER, MASTER_TIMEOUT, listener);
+    }
+}

+ 219 - 0
x-pack/plugin/geoip-enterprise-downloader/src/test/java/org/elasticsearch/xpack/geoip/EnterpriseGeoIpDownloaderLicenseListenerTests.java

@@ -0,0 +1,219 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+package org.elasticsearch.xpack.geoip;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.action.ActionType;
+import org.elasticsearch.cluster.ClusterChangedEvent;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetadata;
+import org.elasticsearch.cluster.metadata.Metadata;
+import org.elasticsearch.cluster.node.DiscoveryNodeUtils;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.service.ClusterService;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexVersion;
+import org.elasticsearch.license.License;
+import org.elasticsearch.license.TestUtils;
+import org.elasticsearch.license.XPackLicenseState;
+import org.elasticsearch.license.internal.XPackLicenseStatus;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.persistent.PersistentTasksCustomMetadata;
+import org.elasticsearch.persistent.RemovePersistentTaskAction;
+import org.elasticsearch.persistent.StartPersistentTaskAction;
+import org.elasticsearch.telemetry.metric.MeterRegistry;
+import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.test.client.NoOpClient;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.junit.After;
+import org.junit.Before;
+
+import java.util.Map;
+import java.util.UUID;
+
+import static org.elasticsearch.xpack.geoip.EnterpriseGeoIpDownloaderLicenseListener.INGEST_GEOIP_CUSTOM_METADATA_TYPE;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+public class EnterpriseGeoIpDownloaderLicenseListenerTests extends ESTestCase {
+
+    private ThreadPool threadPool;
+
+    @Before
+    public void setup() {
+        threadPool = new ThreadPool(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test").build(), MeterRegistry.NOOP);
+    }
+
+    @After
+    public void tearDown() throws Exception {
+        super.tearDown();
+        threadPool.shutdownNow();
+    }
+
+    public void testAllConditionsMetOnStart() {
+        // Should never start if not master node, even if all other conditions have been met
+        final XPackLicenseState licenseState = getAlwaysValidLicense();
+        ClusterService clusterService = createClusterService(true, false);
+        TaskStartAndRemoveMockClient client = new TaskStartAndRemoveMockClient(threadPool, true, false);
+        EnterpriseGeoIpDownloaderLicenseListener listener = new EnterpriseGeoIpDownloaderLicenseListener(
+            client,
+            clusterService,
+            threadPool,
+            licenseState
+        );
+        listener.init();
+        listener.licenseStateChanged();
+        listener.clusterChanged(new ClusterChangedEvent("test", createClusterState(true, true), clusterService.state()));
+        client.assertTaskStartHasBeenCalled();
+    }
+
+    public void testLicenseChanges() {
+        final TestUtils.UpdatableLicenseState licenseState = new TestUtils.UpdatableLicenseState();
+        licenseState.update(new XPackLicenseStatus(License.OperationMode.TRIAL, false, ""));
+        ClusterService clusterService = createClusterService(true, true);
+        TaskStartAndRemoveMockClient client = new TaskStartAndRemoveMockClient(threadPool, false, true);
+        EnterpriseGeoIpDownloaderLicenseListener listener = new EnterpriseGeoIpDownloaderLicenseListener(
+            client,
+            clusterService,
+            threadPool,
+            licenseState
+        );
+        listener.init();
+        listener.licenseStateChanged();
+        listener.clusterChanged(new ClusterChangedEvent("test", clusterService.state(), clusterService.state()));
+        client.expectStartTask = true;
+        client.expectRemoveTask = false;
+        licenseState.update(new XPackLicenseStatus(License.OperationMode.TRIAL, true, ""));
+        listener.licenseStateChanged();
+        client.assertTaskStartHasBeenCalled();
+        client.expectStartTask = false;
+        client.expectRemoveTask = true;
+        licenseState.update(new XPackLicenseStatus(License.OperationMode.TRIAL, false, ""));
+        listener.licenseStateChanged();
+        client.assertTaskRemoveHasBeenCalled();
+    }
+
+    public void testDatabaseChanges() {
+        final XPackLicenseState licenseState = getAlwaysValidLicense();
+        ClusterService clusterService = createClusterService(true, false);
+        TaskStartAndRemoveMockClient client = new TaskStartAndRemoveMockClient(threadPool, false, false);
+        EnterpriseGeoIpDownloaderLicenseListener listener = new EnterpriseGeoIpDownloaderLicenseListener(
+            client,
+            clusterService,
+            threadPool,
+            licenseState
+        );
+        listener.init();
+        listener.licenseStateChanged();
+        listener.clusterChanged(new ClusterChangedEvent("test", clusterService.state(), clusterService.state()));
+        // add a geoip database, so the task ought to be started:
+        client.expectStartTask = true;
+        listener.clusterChanged(new ClusterChangedEvent("test", createClusterState(true, true), clusterService.state()));
+        client.assertTaskStartHasBeenCalled();
+        // Now we remove the geoip databases. The task ought to just be left alone.
+        client.expectStartTask = false;
+        client.expectRemoveTask = false;
+        listener.clusterChanged(new ClusterChangedEvent("test", createClusterState(true, false), clusterService.state()));
+    }
+
+    public void testMasterChanges() {
+        // Should never start if not master node, even if all other conditions have been met
+        final XPackLicenseState licenseState = getAlwaysValidLicense();
+        ClusterService clusterService = createClusterService(false, false);
+        TaskStartAndRemoveMockClient client = new TaskStartAndRemoveMockClient(threadPool, false, false);
+        EnterpriseGeoIpDownloaderLicenseListener listener = new EnterpriseGeoIpDownloaderLicenseListener(
+            client,
+            clusterService,
+            threadPool,
+            licenseState
+        );
+        listener.init();
+        listener.licenseStateChanged();
+        listener.clusterChanged(new ClusterChangedEvent("test", createClusterState(false, true), clusterService.state()));
+        client.expectStartTask = true;
+        listener.clusterChanged(new ClusterChangedEvent("test", createClusterState(true, true), clusterService.state()));
+    }
+
+    private XPackLicenseState getAlwaysValidLicense() {
+        return new XPackLicenseState(() -> 0);
+    }
+
+    private ClusterService createClusterService(boolean isMasterNode, boolean hasGeoIpDatabases) {
+        ClusterService clusterService = mock(ClusterService.class);
+        ClusterState state = createClusterState(isMasterNode, hasGeoIpDatabases);
+        when(clusterService.state()).thenReturn(state);
+        return clusterService;
+    }
+
+    private ClusterState createClusterState(boolean isMasterNode, boolean hasGeoIpDatabases) {
+        String indexName = randomAlphaOfLength(5);
+        Index index = new Index(indexName, UUID.randomUUID().toString());
+        IndexMetadata.Builder idxMeta = IndexMetadata.builder(index.getName())
+            .settings(indexSettings(IndexVersion.current(), 1, 0).put("index.uuid", index.getUUID()));
+        String nodeId = ESTestCase.randomAlphaOfLength(8);
+        DiscoveryNodes.Builder discoveryNodesBuilder = DiscoveryNodes.builder().add(DiscoveryNodeUtils.create(nodeId)).localNodeId(nodeId);
+        if (isMasterNode) {
+            discoveryNodesBuilder.masterNodeId(nodeId);
+        }
+        ClusterState.Builder clusterStateBuilder = ClusterState.builder(new ClusterName("name"));
+        if (hasGeoIpDatabases) {
+            PersistentTasksCustomMetadata tasksCustomMetadata = new PersistentTasksCustomMetadata(1L, Map.of());
+            clusterStateBuilder.metadata(Metadata.builder().putCustom(INGEST_GEOIP_CUSTOM_METADATA_TYPE, tasksCustomMetadata).put(idxMeta));
+        }
+        return clusterStateBuilder.nodes(discoveryNodesBuilder).build();
+    }
+
+    private static class TaskStartAndRemoveMockClient extends NoOpClient {
+
+        boolean expectStartTask;
+        boolean expectRemoveTask;
+        private boolean taskStartCalled = false;
+        private boolean taskRemoveCalled = false;
+
+        private TaskStartAndRemoveMockClient(ThreadPool threadPool, boolean expectStartTask, boolean expectRemoveTask) {
+            super(threadPool);
+            this.expectStartTask = expectStartTask;
+            this.expectRemoveTask = expectRemoveTask;
+        }
+
+        @Override
+        protected <Request extends ActionRequest, Response extends ActionResponse> void doExecute(
+            ActionType<Response> action,
+            Request request,
+            ActionListener<Response> listener
+        ) {
+            if (action.equals(StartPersistentTaskAction.INSTANCE)) {
+                if (expectStartTask) {
+                    taskStartCalled = true;
+                } else {
+                    fail("Should not start task");
+                }
+            } else if (action.equals(RemovePersistentTaskAction.INSTANCE)) {
+                if (expectRemoveTask) {
+                    taskRemoveCalled = true;
+                } else {
+                    fail("Should not remove task");
+                }
+            } else {
+                throw new IllegalStateException("unexpected action called [" + action.name() + "]");
+            }
+        }
+
+        void assertTaskStartHasBeenCalled() {
+            assertTrue(taskStartCalled);
+        }
+
+        void assertTaskRemoveHasBeenCalled() {
+            assertTrue(taskRemoveCalled);
+        }
+    }
+}

+ 3 - 0
x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java

@@ -39,6 +39,9 @@ public class Constants {
         "cluster:admin/indices/dangling/find",
         "cluster:admin/indices/dangling/import",
         "cluster:admin/indices/dangling/list",
+        "cluster:admin/ingest/geoip/database/delete",
+        "cluster:admin/ingest/geoip/database/get",
+        "cluster:admin/ingest/geoip/database/put",
         "cluster:admin/ingest/pipeline/delete",
         "cluster:admin/ingest/pipeline/get",
         "cluster:admin/ingest/pipeline/put",