Browse Source

Deprecation of transient cluster settings (#78794)

This PR changes uses of transient cluster settings to
persistent cluster settings. 

The PR also deprecates the transient settings usage.

Relates to #49540
Nikola Grcevski 4 years ago
parent
commit
055c770083
26 changed files with 264 additions and 107 deletions
  1. 3 2
      client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java
  2. 4 2
      client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java
  3. 37 20
      client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java
  4. 14 10
      docs/reference/cluster/update-settings.asciidoc
  5. 2 2
      docs/reference/data-streams/change-mappings-and-settings.asciidoc
  6. 6 6
      docs/reference/how-to/fix-common-cluster-issues.asciidoc
  7. 2 2
      docs/reference/how-to/size-your-shards.asciidoc
  8. 34 34
      docs/reference/ilm/ilm-with-existing-indices.asciidoc
  9. 1 1
      docs/reference/ingest/processors/grok.asciidoc
  10. 2 2
      docs/reference/modules/cluster/allocation_filtering.asciidoc
  11. 1 1
      docs/reference/modules/cluster/disk_allocator.asciidoc
  12. 1 1
      docs/reference/modules/cluster/misc.asciidoc
  13. 4 4
      docs/reference/modules/network/tracers.asciidoc
  14. 3 0
      docs/reference/setup/configuration.asciidoc
  15. 12 12
      docs/reference/setup/important-settings/path-settings.asciidoc
  16. 1 1
      docs/reference/setup/logging-config.asciidoc
  17. 1 0
      rest-api-spec/build.gradle
  18. 9 0
      rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.put_settings/10_basic.yml
  19. 21 0
      server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java
  20. 14 1
      server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterUpdateSettingsAction.java
  21. 11 0
      test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java
  22. 2 2
      x-pack/docs/en/security/troubleshooting.asciidoc
  23. 25 0
      x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/ClusterDeprecationChecks.java
  24. 3 2
      x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java
  25. 49 0
      x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/ClusterDeprecationChecksTests.java
  26. 2 2
      x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java

+ 3 - 2
client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java

@@ -74,9 +74,10 @@ public class ClusterClientIT extends ESRestHighLevelClientTestCase {
         ClusterUpdateSettingsRequest setRequest = new ClusterUpdateSettingsRequest();
         setRequest.transientSettings(transientSettings);
         setRequest.persistentSettings(map);
+        RequestOptions options = RequestOptions.DEFAULT.toBuilder().setWarningsHandler(WarningsHandler.PERMISSIVE).build();
 
         ClusterUpdateSettingsResponse setResponse = execute(setRequest, highLevelClient().cluster()::putSettings,
-            highLevelClient().cluster()::putSettingsAsync);
+            highLevelClient().cluster()::putSettingsAsync, options);
 
         assertAcked(setResponse);
         assertThat(setResponse.getTransientSettings().get(transientSettingKey), notNullValue());
@@ -98,7 +99,7 @@ public class ClusterClientIT extends ESRestHighLevelClientTestCase {
         resetRequest.persistentSettings("{\"" + persistentSettingKey + "\": null }", XContentType.JSON);
 
         ClusterUpdateSettingsResponse resetResponse = execute(resetRequest, highLevelClient().cluster()::putSettings,
-            highLevelClient().cluster()::putSettingsAsync);
+            highLevelClient().cluster()::putSettingsAsync, options);
 
         assertThat(resetResponse.getTransientSettings().get(transientSettingKey), equalTo(null));
         assertThat(resetResponse.getPersistentSettings().get(persistentSettingKey), equalTo(null));

+ 4 - 2
client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java

@@ -240,8 +240,9 @@ public abstract class ESRestHighLevelClientTestCase extends ESRestTestCase {
         ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest();
         request.persistentSettings(persistentSettings);
         request.transientSettings(transientSettings);
+        RequestOptions options = RequestOptions.DEFAULT.toBuilder().setWarningsHandler(WarningsHandler.PERMISSIVE).build();
         assertTrue(execute(
-            request, highLevelClient().cluster()::putSettings, highLevelClient().cluster()::putSettingsAsync).isAcknowledged());
+            request, highLevelClient().cluster()::putSettings, highLevelClient().cluster()::putSettingsAsync, options).isAcknowledged());
     }
 
     protected void putConflictPipeline() throws IOException {
@@ -317,8 +318,9 @@ public abstract class ESRestHighLevelClientTestCase extends ESRestTestCase {
 
         ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest();
         updateSettingsRequest.persistentSettings(singletonMap("cluster.remote." + remoteClusterName + ".seeds", transportAddress));
+        RequestOptions options = RequestOptions.DEFAULT.toBuilder().setWarningsHandler(WarningsHandler.PERMISSIVE).build();
         ClusterUpdateSettingsResponse updateSettingsResponse =
-                restHighLevelClient.cluster().putSettings(updateSettingsRequest, RequestOptions.DEFAULT);
+                restHighLevelClient.cluster().putSettings(updateSettingsRequest, options);
         assertThat(updateSettingsResponse.isAcknowledged(), is(true));
 
         assertBusy(() -> {

+ 37 - 20
client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java

@@ -21,6 +21,7 @@ import org.elasticsearch.action.support.master.AcknowledgedResponse;
 import org.elasticsearch.client.ESRestHighLevelClientTestCase;
 import org.elasticsearch.client.RequestOptions;
 import org.elasticsearch.client.RestHighLevelClient;
+import org.elasticsearch.client.WarningsHandler;
 import org.elasticsearch.client.cluster.RemoteConnectionInfo;
 import org.elasticsearch.client.cluster.RemoteInfoRequest;
 import org.elasticsearch.client.cluster.RemoteInfoResponse;
@@ -35,6 +36,7 @@ import org.elasticsearch.cluster.health.ClusterShardHealth;
 import org.elasticsearch.cluster.metadata.AliasMetadata;
 import org.elasticsearch.cluster.metadata.ComponentTemplate;
 import org.elasticsearch.cluster.metadata.Template;
+import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
 import org.elasticsearch.common.Priority;
 import org.elasticsearch.common.compress.CompressedXContent;
 import org.elasticsearch.common.settings.Settings;
@@ -70,38 +72,48 @@ public class ClusterClientDocumentationIT extends ESRestHighLevelClientTestCase
         // end::put-settings-request
 
         // tag::put-settings-create-settings
-        String persistentSettingKey =
+        String transientSettingKey =
                 RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey();
-        int persistentSettingValue = 10;
-        Settings persistentSettings =
+        int transientSettingValue = 10;
+        Settings transientSettings =
                 Settings.builder()
-                .put(persistentSettingKey, persistentSettingValue, ByteSizeUnit.BYTES)
+                .put(transientSettingKey, transientSettingValue, ByteSizeUnit.BYTES)
                 .build(); // <1>
+
+        String persistentSettingKey =
+                EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey();
+        String persistentSettingValue =
+                EnableAllocationDecider.Allocation.NONE.name();
+        Settings persistentSettings =
+                Settings.builder()
+                .put(persistentSettingKey, persistentSettingValue)
+                .build(); // <2>
         // end::put-settings-create-settings
 
         // tag::put-settings-request-cluster-settings
-        request.persistentSettings(persistentSettings); // <1>
+        request.transientSettings(transientSettings); // <1>
+        request.persistentSettings(persistentSettings); // <2>
         // end::put-settings-request-cluster-settings
 
         {
             // tag::put-settings-settings-builder
-            Settings.Builder persistentSettingsBuilder =
+            Settings.Builder transientSettingsBuilder =
                     Settings.builder()
-                    .put(persistentSettingKey, persistentSettingValue, ByteSizeUnit.BYTES);
-            request.persistentSettings(persistentSettingsBuilder); // <1>
+                    .put(transientSettingKey, transientSettingValue, ByteSizeUnit.BYTES);
+            request.transientSettings(transientSettingsBuilder); // <1>
             // end::put-settings-settings-builder
         }
         {
             // tag::put-settings-settings-map
             Map<String, Object> map = new HashMap<>();
-            map.put(persistentSettingKey
-                    , persistentSettingValue + ByteSizeUnit.BYTES.getSuffix());
-            request.persistentSettings(map); // <1>
+            map.put(transientSettingKey
+                    , transientSettingValue + ByteSizeUnit.BYTES.getSuffix());
+            request.transientSettings(map); // <1>
             // end::put-settings-settings-map
         }
         {
             // tag::put-settings-settings-source
-            request.persistentSettings(
+            request.transientSettings(
                     "{\"indices.recovery.max_bytes_per_sec\": \"10b\"}"
                     , XContentType.JSON); // <1>
             // end::put-settings-settings-source
@@ -116,21 +128,26 @@ public class ClusterClientDocumentationIT extends ESRestHighLevelClientTestCase
         request.masterNodeTimeout("1m"); // <2>
         // end::put-settings-request-masterTimeout
 
+        RequestOptions options = RequestOptions.DEFAULT.toBuilder().setWarningsHandler(WarningsHandler.PERMISSIVE).build();
         // tag::put-settings-execute
-        ClusterUpdateSettingsResponse response = client.cluster().putSettings(request, RequestOptions.DEFAULT);
+        ClusterUpdateSettingsResponse response = client.cluster().putSettings(request, options);
         // end::put-settings-execute
 
         // tag::put-settings-response
         boolean acknowledged = response.isAcknowledged(); // <1>
-        Settings persistentSettingsResponse = response.getPersistentSettings(); // <2>
+        Settings transientSettingsResponse = response.getTransientSettings(); // <2>
+        Settings persistentSettingsResponse = response.getPersistentSettings(); // <3>
         // end::put-settings-response
         assertTrue(acknowledged);
-        assertThat(persistentSettingsResponse.get(persistentSettingKey), equalTo(persistentSettingValue + ByteSizeUnit.BYTES.getSuffix()));
-
-        // tag::put-settings-request-reset-persistent
-        request.persistentSettings(Settings.builder().putNull(persistentSettingKey).build()); // <1>
-        // tag::put-settings-request-reset-persistent
-        ClusterUpdateSettingsResponse resetResponse = client.cluster().putSettings(request, RequestOptions.DEFAULT);
+        assertThat(transientSettingsResponse.get(transientSettingKey),
+            equalTo(transientSettingValue + ByteSizeUnit.BYTES.getSuffix()));
+        assertThat(persistentSettingsResponse.get(persistentSettingKey), equalTo(persistentSettingValue));
+
+        // tag::put-settings-request-reset-transient
+        request.transientSettings(Settings.builder().putNull(transientSettingKey).build()); // <1>
+        // tag::put-settings-request-reset-transient
+        request.persistentSettings(Settings.builder().putNull(persistentSettingKey));
+        ClusterUpdateSettingsResponse resetResponse = client.cluster().putSettings(request, options);
 
         assertTrue(resetResponse.isAcknowledged());
     }

+ 14 - 10
docs/reference/cluster/update-settings.asciidoc

@@ -4,7 +4,7 @@
 <titleabbrev>Cluster update settings</titleabbrev>
 ++++
 
-Updates cluster-wide settings. 
+Updates cluster-wide settings.
 
 
 [[cluster-update-settings-api-request]]
@@ -21,21 +21,21 @@ Updates cluster-wide settings.
 [[cluster-update-settings-api-desc]]
 ==== {api-description-title}
 
-With specifications in the request body, this API call can update cluster 
-settings. Updates to settings can be persistent, meaning they apply across 
+With specifications in the request body, this API call can update cluster
+settings. Updates to settings can be persistent, meaning they apply across
 restarts, or transient, where they don't survive a full cluster restart.
 
-You can reset persistent or transient settings by assigning a `null` value. If a 
-transient setting is reset, the first one of these values that is defined is 
+You can reset persistent or transient settings by assigning a `null` value. If a
+transient setting is reset, the first one of these values that is defined is
 applied:
 
 * the persistent setting
 * the setting in the configuration file
-* the default value. 
+* the default value.
 
 The order of precedence for cluster settings is:
 
-1. transient cluster settings 
+1. transient cluster settings
 2. persistent cluster settings
 3. settings in the `elasticsearch.yml` configuration file.
 
@@ -45,6 +45,8 @@ the setting is the same on all nodes. If, on the other hand, you define differen
 settings on different nodes by accident using the configuration file, it is very
 difficult to notice these discrepancies.
 
+NOTE: Transient settings are deprecated and will be removed in a future release.
+Prefer using persistent cluster settings instead.
 
 [[cluster-update-settings-api-query-params]]
 ==== {api-query-parms-title}
@@ -52,7 +54,7 @@ difficult to notice these discrepancies.
 include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=flat-settings]
 
 `include_defaults`::
-    (Optional, Boolean) If `true`, returns all default cluster settings. 
+    (Optional, Boolean) If `true`, returns all default cluster settings.
     Defaults to `false`.
 
 include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
@@ -85,9 +87,9 @@ PUT /_cluster/settings?flat_settings=true
   }
 }
 --------------------------------------------------
+// TEST[warning:[transient settings removal] Updating cluster settings through transientSettings is deprecated. Use persistent settings instead.]
 
-
-The response to an update returns the changed setting, as in this response to 
+The response to an update returns the changed setting, as in this response to
 the transient example:
 
 [source,console-result]
@@ -114,6 +116,7 @@ PUT /_cluster/settings
   }
 }
 --------------------------------------------------
+// TEST[warning:[transient settings removal] Updating cluster settings through transientSettings is deprecated. Use persistent settings instead.]
 
 
 The response does not include settings that have been reset:
@@ -141,3 +144,4 @@ PUT /_cluster/settings
   }
 }
 --------------------------------------------------
+// TEST[warning:[transient settings removal] Updating cluster settings through transientSettings is deprecated. Use persistent settings instead.]

+ 2 - 2
docs/reference/data-streams/change-mappings-and-settings.asciidoc

@@ -521,7 +521,7 @@ lowers the `indices.lifecycle.poll_interval` setting to `1m` (one minute).
 ----
 PUT /_cluster/settings
 {
-  "transient": {
+  "persistent": {
     "indices.lifecycle.poll_interval": "1m"
   }
 }
@@ -658,7 +658,7 @@ The following update cluster settings API request resets the
 ----
 PUT /_cluster/settings
 {
-  "transient": {
+  "persistent": {
     "indices.lifecycle.poll_interval": null
   }
 }

+ 6 - 6
docs/reference/how-to/fix-common-cluster-issues.asciidoc

@@ -48,8 +48,8 @@ watermarks and remove the write block.
 [source,console]
 ----
 PUT _cluster/settings
-{ 
-  "transient": {
+{
+  "persistent": {
     "cluster.routing.allocation.disk.watermark.low": "90%",
     "cluster.routing.allocation.disk.watermark.high": "95%",
     "cluster.routing.allocation.disk.watermark.flood_stage": "97%"
@@ -57,7 +57,7 @@ PUT _cluster/settings
 }
 
 PUT */_settings?expand_wildcards=all
-{ 
+{
   "index.blocks.read_only_allow_delete": null
 }
 ----
@@ -79,8 +79,8 @@ When a long-term solution is in place, reset or reconfigure the disk watermarks.
 [source,console]
 ----
 PUT _cluster/settings
-{ 
-  "transient": {
+{
+  "persistent": {
     "cluster.routing.allocation.disk.watermark.low": null,
     "cluster.routing.allocation.disk.watermark.high": null,
     "cluster.routing.allocation.disk.watermark.flood_stage": null
@@ -208,7 +208,7 @@ include::{es-repo-dir}/tab-widgets/cpu-usage-widget.asciidoc[]
 **Check hot threads**
 
 If a node has high CPU usage, use the <<cluster-nodes-hot-threads,nodes hot
-threads API>> to check for resource-intensive threads running on the node. 
+threads API>> to check for resource-intensive threads running on the node.
 
 [source,console]
 ----

+ 2 - 2
docs/reference/how-to/size-your-shards.asciidoc

@@ -329,7 +329,7 @@ cluster settings API>> and retry the action.
 ----
 PUT _cluster/settings
 {
-  "transient" : {
+  "persistent" : {
     "cluster.max_shards_per_node": 1200
   }
 }
@@ -353,7 +353,7 @@ When a long-term solution is in place, we recommend you reset the
 ----
 PUT _cluster/settings
 {
-  "transient" : {
+  "persistent" : {
     "cluster.max_shards_per_node": null
   }
 }

+ 34 - 34
docs/reference/ilm/ilm-with-existing-indices.asciidoc

@@ -6,51 +6,51 @@
 If you've been using Curator or some other mechanism to manage periodic indices,
 you have a couple options when migrating to {ilm-init}:
 
-* Set up your index templates to use an {ilm-init} policy to manage your new indices. 
+* Set up your index templates to use an {ilm-init} policy to manage your new indices.
 Once {ilm-init} is managing your current write index, you can apply an appropriate policy to your old indices.
 
-* Reindex into an {ilm-init}-managed index. 
+* Reindex into an {ilm-init}-managed index.
 
 NOTE: Starting in Curator version 5.7, Curator ignores {ilm-init} managed indices.
 
 [discrete]
 [[ilm-existing-indices-apply]]
-=== Apply policies to existing time series indices 
+=== Apply policies to existing time series indices
 
 The simplest way to transition to managing your periodic indices with {ilm-init} is
-to <<apply-policy-template, configure an index template>> to apply a lifecycle policy to new indices. 
-Once the index you are writing to is being managed by {ilm-init}, 
+to <<apply-policy-template, configure an index template>> to apply a lifecycle policy to new indices.
+Once the index you are writing to is being managed by {ilm-init},
 you can <<apply-policy-multiple, manually apply a policy>> to your older indices.
 
-Define a separate policy for your older indices that omits the rollover action. 
-Rollover is used to manage where new data goes, so isn't applicable. 
+Define a separate policy for your older indices that omits the rollover action.
+Rollover is used to manage where new data goes, so isn't applicable.
 
-Keep in mind that policies applied to existing indices compare the `min_age` for each phase to 
+Keep in mind that policies applied to existing indices compare the `min_age` for each phase to
 the original creation date of the index, and might proceed through multiple phases immediately.
-If your policy performs resource-intensive operations like force merge, 
+If your policy performs resource-intensive operations like force merge,
 you don't want to have a lot of indices performing those operations all at once
-when you switch over to {ilm-init}. 
+when you switch over to {ilm-init}.
 
-You can specify different `min_age` values in the policy you use for existing indices, 
-or set <<index-lifecycle-origination-date, `index.lifecycle.origination_date`>> 
-to control how the index age is calculated. 
+You can specify different `min_age` values in the policy you use for existing indices,
+or set <<index-lifecycle-origination-date, `index.lifecycle.origination_date`>>
+to control how the index age is calculated.
 
-Once all pre-{ilm-init} indices have been aged out and removed, 
+Once all pre-{ilm-init} indices have been aged out and removed,
 you can delete the policy you used to manage them.
 
 NOTE: If you are using {beats} or {ls}, enabling {ilm-init} in version 7.0 and onward
-sets up {ilm-init} to manage new indices automatically. 
-If you are using {beats} through {ls}, 
-you might need to change your {ls} output configuration and invoke the {beats} setup 
+sets up {ilm-init} to manage new indices automatically.
+If you are using {beats} through {ls},
+you might need to change your {ls} output configuration and invoke the {beats} setup
 to use {ilm-init} for new data.
 
 [discrete]
 [[ilm-existing-indices-reindex]]
 === Reindex into a managed index
 
-An alternative to <<ilm-with-existing-periodic-indices,applying policies to existing indices>> is to 
+An alternative to <<ilm-with-existing-periodic-indices,applying policies to existing indices>> is to
 reindex your data into an {ilm-init}-managed index.
-You might want to do this if creating periodic indices with very small amounts of data  
+You might want to do this if creating periodic indices with very small amounts of data
 has led to excessive shard counts, or if continually indexing into the same index has led to large shards
 and performance issues.
 
@@ -58,24 +58,24 @@ First, you need to set up the new {ilm-init}-managed index:
 
 . Update your index template to include the necessary {ilm-init} settings.
 . Bootstrap an initial index as the write index.
-. Stop writing to the old indices and index new documents using the alias that points to bootstrapped index. 
+. Stop writing to the old indices and index new documents using the alias that points to bootstrapped index.
 
 To reindex into the managed index:
 
 . Pause indexing new documents if you do not want to mix new and old data in the {ilm-init}-managed index.
-Mixing old and new data in one index is safe, 
+Mixing old and new data in one index is safe,
 but a combined index needs to be retained until you are ready to delete the new data.
 
-. Reduce the {ilm-init} poll interval to ensure that the index doesn't 
+. Reduce the {ilm-init} poll interval to ensure that the index doesn't
 grow too large while waiting for the rollover check.
-By default, {ilm-init} checks to see what actions need to be taken every 10 minutes. 
+By default, {ilm-init} checks to see what actions need to be taken every 10 minutes.
 +
 --
 [source,console]
 -----------------------
 PUT _cluster/settings
 {
-  "transient": {
+  "persistent": {
     "indices.lifecycle.poll_interval": "1m" <1>
   }
 }
@@ -84,13 +84,13 @@ PUT _cluster/settings
 <1> Check once a minute to see if {ilm-init} actions such as rollover need to be performed.
 --
 
-. Reindex your data using the <<docs-reindex,reindex API>>. 
-If you want to partition the data in the order in which it was originally indexed, 
-you can run separate reindex requests. 
+. Reindex your data using the <<docs-reindex,reindex API>>.
+If you want to partition the data in the order in which it was originally indexed,
+you can run separate reindex requests.
 +
 --
-IMPORTANT: Documents retain their original IDs. If you don't use automatically generated document IDs, 
-and are reindexing from multiple source indices, you might need to do additional processing to 
+IMPORTANT: Documents retain their original IDs. If you don't use automatically generated document IDs,
+and are reindexing from multiple source indices, you might need to do additional processing to
 ensure that document IDs don't conflict. One way to do this is to use a
 <<reindex-scripts,script>> in the reindex call to append the original index name
 to the document ID.
@@ -174,19 +174,19 @@ POST _reindex
 <1> Matches your existing indices. Using the prefix for
     the new indices makes using this index pattern much easier.
 <2> The alias that points to your bootstrapped index.
-<3> Halts reindexing if multiple documents have the same ID. 
-    This is recommended to prevent accidentally overwriting documents 
+<3> Halts reindexing if multiple documents have the same ID.
+    This is recommended to prevent accidentally overwriting documents
     if documents in different source indices have the same ID.
 --
 
-. When reindexing is complete, set the {ilm-init} poll interval back to its default value to 
+. When reindexing is complete, set the {ilm-init} poll interval back to its default value to
 prevent unnecessary load on the master node:
 +
 [source,console]
 -----------------------
 PUT _cluster/settings
 {
-  "transient": {
+  "persistent": {
     "indices.lifecycle.poll_interval": null
   }
 }
@@ -198,5 +198,5 @@ PUT _cluster/settings
 +
 Querying using this alias will now search your new data and all of the reindexed data.
 
-. Once you have verified that all of the reindexed data is available in the new managed indices, 
+. Once you have verified that all of the reindexed data is available in the new managed indices,
 you can safely remove the old indices.

+ 1 - 1
docs/reference/ingest/processors/grok.asciidoc

@@ -338,7 +338,7 @@ server log.
 --------------------------------------------------
 PUT _cluster/settings
 {
-  "transient": {
+  "persistent": {
     "logger.org.elasticsearch.ingest.common.GrokProcessor": "debug"
   }
 }

+ 2 - 2
docs/reference/modules/cluster/allocation_filtering.asciidoc

@@ -22,7 +22,7 @@ it down, you could create a filter that excludes the node by its IP address:
 --------------------------------------------------
 PUT _cluster/settings
 {
-  "transient" : {
+  "persistent" : {
     "cluster.routing.allocation.exclude._ip" : "10.0.0.1"
   }
 }
@@ -70,7 +70,7 @@ You can use wildcards when specifying attribute values, for example:
 ------------------------
 PUT _cluster/settings
 {
-  "transient": {
+  "persistent": {
     "cluster.routing.allocation.exclude._ip": "192.168.2.*"
   }
 }

+ 1 - 1
docs/reference/modules/cluster/disk_allocator.asciidoc

@@ -145,7 +145,7 @@ gigabytes free, and updating the information about the cluster every minute:
 --------------------------------------------------
 PUT _cluster/settings
 {
-  "transient": {
+  "persistent": {
     "cluster.routing.allocation.disk.watermark.low": "100gb",
     "cluster.routing.allocation.disk.watermark.high": "50gb",
     "cluster.routing.allocation.disk.watermark.flood_stage": "10gb",

+ 1 - 1
docs/reference/modules/cluster/misc.asciidoc

@@ -159,7 +159,7 @@ The settings which control logging can be updated <<dynamic-cluster-setting,dyna
 -------------------------------
 PUT /_cluster/settings
 {
-  "transient": {
+  "persistent": {
     "logger.org.elasticsearch.indices.recovery": "DEBUG"
   }
 }

+ 4 - 4
docs/reference/modules/network/tracers.asciidoc

@@ -16,7 +16,7 @@ the `org.elasticsearch.http.HttpTracer` logger to `TRACE`:
 --------------------------------------------------
 PUT _cluster/settings
 {
-   "transient" : {
+   "persistent" : {
       "logger.org.elasticsearch.http.HttpTracer" : "TRACE"
    }
 }
@@ -29,7 +29,7 @@ exclude wildcard patterns. By default every request will be traced.
 --------------------------------------------------
 PUT _cluster/settings
 {
-   "transient" : {
+   "persistent" : {
       "http.tracer.include" : "*",
       "http.tracer.exclude" : ""
    }
@@ -47,7 +47,7 @@ requests and responses. Activate the tracer by setting the level of the
 --------------------------------------------------
 PUT _cluster/settings
 {
-   "transient" : {
+   "persistent" : {
       "logger.org.elasticsearch.transport.TransportService.tracer" : "TRACE"
    }
 }
@@ -61,7 +61,7 @@ fault detection pings:
 --------------------------------------------------
 PUT _cluster/settings
 {
-   "transient" : {
+   "persistent" : {
       "transport.tracer.include" : "*",
       "transport.tracer.exclude" : "internal:coordination/fault_detection/*"
    }

+ 3 - 0
docs/reference/setup/configuration.asciidoc

@@ -140,6 +140,9 @@ settings API and use `elasticsearch.yml` only for local configurations. Using
 the cluster update settings API ensures the setting is the same on all nodes. If
 you accidentally configure different settings in `elasticsearch.yml` on
 different nodes, it can be difficult to notice discrepancies.
+
+NOTE: Transient settings are deprecated and will be removed in a future release.
+Prefer using persistent cluster settings instead.
 --
 
 [[static-cluster-setting]]

+ 12 - 12
docs/reference/setup/important-settings/path-settings.asciidoc

@@ -57,9 +57,9 @@ Storage Spaces on Windows. If you wish to use multiple data paths on a single
 machine then you must run one node for each data path.
 
 If you currently use multiple data paths in a
-{ref}/high-availability-cluster-design.html[highly available cluster] then you 
-can migrate to a setup that uses a single path for each node without downtime 
-using a process similar to a 
+{ref}/high-availability-cluster-design.html[highly available cluster] then you
+can migrate to a setup that uses a single path for each node without downtime
+using a process similar to a
 {ref}/restart-cluster.html#restart-cluster-rolling[rolling restart]: shut each
 node down in turn and replace it with one or more nodes each configured to use
 a single data path. In more detail, for each node that currently has multiple
@@ -76,18 +76,18 @@ migrating to a single-data-path setup before starting to upgrade.
 --------------------------------------------------
 PUT _cluster/settings
 {
-  "transient": {
+  "persistent": {
     "cluster.routing.allocation.exclude._name": "target-node-name"
   }
 }
 --------------------------------------------------
 +
-You can use the {ref}/cat-allocation.html[cat allocation API] to track progress 
+You can use the {ref}/cat-allocation.html[cat allocation API] to track progress
 of this data migration. If some shards do not migrate then the
-{ref}/cluster-allocation-explain.html[cluster allocation explain API] will help 
+{ref}/cluster-allocation-explain.html[cluster allocation explain API] will help
 you to determine why.
 
-3. Follow the steps in the 
+3. Follow the steps in the
 {ref}/restart-cluster.html#restart-cluster-rolling[rolling restart process]
 up to and including shutting the target node down.
 
@@ -100,7 +100,7 @@ of every shard assigned to at least one of the other nodes in your cluster.
 --------------------------------------------------
 PUT _cluster/settings
 {
-  "transient": {
+  "persistent": {
     "cluster.routing.allocation.exclude._name": null
   }
 }
@@ -118,7 +118,7 @@ has sufficient space for the data that it will hold.
 `path.data` setting pointing at a separate data path.
 
 9. Start the new nodes and follow the rest of the
-{ref}/restart-cluster.html#restart-cluster-rolling[rolling restart process] for 
+{ref}/restart-cluster.html#restart-cluster-rolling[rolling restart process] for
 them.
 
 10. Ensure your cluster health is `green`, so that every shard has been
@@ -126,9 +126,9 @@ assigned.
 
 You can alternatively add some number of single-data-path nodes to your
 cluster, migrate all your data over to these new nodes using
-{ref}/modules-cluster.html#cluster-shard-allocation-filtering[allocation filters], 
-and then remove the old nodes from the cluster. This approach will temporarily 
-double the size of your cluster so it will only work if you have the capacity to 
+{ref}/modules-cluster.html#cluster-shard-allocation-filtering[allocation filters],
+and then remove the old nodes from the cluster. This approach will temporarily
+double the size of your cluster so it will only work if you have the capacity to
 expand your cluster like this.
 
 If you currently use multiple data paths but your cluster is not highly

+ 1 - 1
docs/reference/setup/logging-config.asciidoc

@@ -155,7 +155,7 @@ only intended for expert use.
 ----
 PUT /_cluster/settings
 {
-  "transient": {
+  "persistent": {
     "logger.org.elasticsearch.discovery": "DEBUG"
   }
 }

+ 1 - 0
rest-api-spec/build.gradle

@@ -84,6 +84,7 @@ tasks.named("yamlRestTestV7CompatTransform").configure { task ->
 
   task.replaceValueInMatch("_type", "_doc")
   task.addAllowedWarningRegex("\\[types removal\\].*")
+  task.addAllowedWarning("[transient settings removal] Updating cluster settings through transientSettings is deprecated. Use persistent settings instead.")
   task.replaceValueInMatch("nodes.\$node_id.roles.8", "ml", "node_info role test")
   task.replaceValueInMatch("nodes.\$node_id.roles.9", "remote_cluster_client", "node_info role test")
   task.removeMatch("nodes.\$node_id.roles.10", "node_info role test")

+ 9 - 0
rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.put_settings/10_basic.yml

@@ -1,6 +1,13 @@
 ---
 "Test put and reset transient settings":
+  - skip:
+      version: " - 7.99.99"
+      reason:  "transient settings deprecation"
+      features: "warnings"
+
   - do:
+      warnings:
+        - "[transient settings removal] Updating cluster settings through transientSettings is deprecated. Use persistent settings instead."
       cluster.put_settings:
         body:
           transient:
@@ -16,6 +23,8 @@
   - match: {transient: {cluster.routing.allocation.enable: "none"}}
 
   - do:
+      warnings:
+        - "[transient settings removal] Updating cluster settings through transientSettings is deprecated. Use persistent settings instead."
       cluster.put_settings:
         body:
           transient:

+ 21 - 0
server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java

@@ -64,6 +64,11 @@ public class ClusterUpdateSettingsRequest extends AcknowledgedRequest<ClusterUpd
         return validationException;
     }
 
+    /**
+     * @deprecated Transient settings are in the process of being removed. Use
+     * persistent settings to update your cluster settings instead.
+     */
+    @Deprecated
     public Settings transientSettings() {
         return transientSettings;
     }
@@ -74,7 +79,11 @@ public class ClusterUpdateSettingsRequest extends AcknowledgedRequest<ClusterUpd
 
     /**
      * Sets the transient settings to be updated. They will not survive a full cluster restart
+     *
+     * @deprecated Transient settings are in the process of being removed. Use
+     * persistent settings to update your cluster settings instead.
      */
+    @Deprecated
     public ClusterUpdateSettingsRequest transientSettings(Settings settings) {
         this.transientSettings = settings;
         return this;
@@ -82,7 +91,11 @@ public class ClusterUpdateSettingsRequest extends AcknowledgedRequest<ClusterUpd
 
     /**
      * Sets the transient settings to be updated. They will not survive a full cluster restart
+     *
+     * @deprecated Transient settings are in the process of being removed. Use
+     * persistent settings to update your cluster settings instead.
      */
+    @Deprecated
     public ClusterUpdateSettingsRequest transientSettings(Settings.Builder settings) {
         this.transientSettings = settings.build();
         return this;
@@ -90,7 +103,11 @@ public class ClusterUpdateSettingsRequest extends AcknowledgedRequest<ClusterUpd
 
     /**
      * Sets the source containing the transient settings to be updated. They will not survive a full cluster restart
+     *
+     * @deprecated Transient settings are in the process of being removed. Use
+     * persistent settings to update your cluster settings instead.
      */
+    @Deprecated
     public ClusterUpdateSettingsRequest transientSettings(String source, XContentType xContentType) {
         this.transientSettings = Settings.builder().loadFromSource(source, xContentType).build();
         return this;
@@ -98,7 +115,11 @@ public class ClusterUpdateSettingsRequest extends AcknowledgedRequest<ClusterUpd
 
     /**
      * Sets the transient settings to be updated. They will not survive a full cluster restart
+     *
+     * @deprecated Transient settings are in the process of being removed. Use
+     * persistent settings to update your cluster settings instead.
      */
+    @Deprecated
     public ClusterUpdateSettingsRequest transientSettings(Map<String, ?> source) {
         this.transientSettings = Settings.builder().loadFromMap(source).build();
         return this;

+ 14 - 1
server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterUpdateSettingsAction.java

@@ -11,6 +11,8 @@ package org.elasticsearch.rest.action.admin.cluster;
 import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
 import org.elasticsearch.client.Requests;
 import org.elasticsearch.client.node.NodeClient;
+import org.elasticsearch.common.logging.DeprecationCategory;
+import org.elasticsearch.common.logging.DeprecationLogger;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.xcontent.XContentParser;
 import org.elasticsearch.rest.BaseRestHandler;
@@ -25,6 +27,9 @@ import java.util.Set;
 import static org.elasticsearch.rest.RestRequest.Method.PUT;
 
 public class RestClusterUpdateSettingsAction extends BaseRestHandler {
+    private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestClusterUpdateSettingsAction.class);
+    static final String TRANSIENT_SETTINGS_DEPRECATION_MESSAGE = "[transient settings removal]" +
+        " Updating cluster settings through transientSettings is deprecated. Use persistent settings instead.";
 
     private static final String PERSISTENT = "persistent";
     private static final String TRANSIENT = "transient";
@@ -51,7 +56,15 @@ public class RestClusterUpdateSettingsAction extends BaseRestHandler {
             source = parser.map();
         }
         if (source.containsKey(TRANSIENT)) {
-            clusterUpdateSettingsRequest.transientSettings((Map<String, ?>) source.get(TRANSIENT));
+            Map<String, ?> transientSettings = (Map<String, ?>) source.get(TRANSIENT);
+
+            // We check for empty transient settings map because ClusterUpdateSettingsRequest initializes
+            // each of the settings to an empty collection. When the RestClient is used, we'll get an empty
+            // transient settings map, even if we never set any transient settings.
+            if (transientSettings.isEmpty() == false) {
+                deprecationLogger.warn(DeprecationCategory.SETTINGS, "transient_settings", TRANSIENT_SETTINGS_DEPRECATION_MESSAGE);
+            }
+            clusterUpdateSettingsRequest.transientSettings(transientSettings);
         }
         if (source.containsKey(PERSISTENT)) {
             clusterUpdateSettingsRequest.persistentSettings((Map<String, ?>) source.get(PERSISTENT));

+ 11 - 0
test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java

@@ -920,6 +920,17 @@ public abstract class ESRestTestCase extends ESTestCase {
 
         if (mustClear) {
             Request request = new Request("PUT", "/_cluster/settings");
+
+            request.setOptions(RequestOptions.DEFAULT.toBuilder().setWarningsHandler(warnings -> {
+                if (warnings.isEmpty()) {
+                    return false;
+                } else if (warnings.size() > 1) {
+                    return true;
+                } else {
+                    return warnings.get(0).startsWith("[transient settings removal]") == false;
+                }
+            }));
+
             request.setJsonEntity(Strings.toString(clearCommand));
             adminClient().performRequest(request);
         }

+ 2 - 2
x-pack/docs/en/security/troubleshooting.asciidoc

@@ -700,13 +700,13 @@ the `basic` `authProvider` in {kib}. The process is documented in the
 
 If the previous resolutions do not solve your issue, enable additional
 logging for the SAML realm to troubleshoot further. You can enable debug
-logging by configuring the following transient setting:
+logging by configuring the following persistent setting:
 
 [source, console]
 ----
 PUT /_cluster/settings
 {
-  "transient": {
+  "persistent": {
     "logger.org.elasticsearch.xpack.security.authc.saml": "debug"
   }
 }

+ 25 - 0
x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/ClusterDeprecationChecks.java

@@ -0,0 +1,25 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+package org.elasticsearch.xpack.deprecation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.xpack.core.deprecation.DeprecationIssue;
+
+public class ClusterDeprecationChecks {
+    static DeprecationIssue checkTransientSettingsExistence(ClusterState state) {
+        if (state.metadata().transientSettings().isEmpty() == false) {
+            return new DeprecationIssue(DeprecationIssue.Level.WARNING,
+                "Transient cluster settings are in the process of being removed.",
+                "https://ela.st/es-deprecation-7-transient-cluster-settings",
+                "Use persistent settings to define your cluster settings instead.",
+                false,
+                null);
+        }
+        return null;
+    }
+}

+ 3 - 2
x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java

@@ -38,8 +38,9 @@ public class DeprecationChecks {
     private DeprecationChecks() {
     }
 
-    static List<Function<ClusterState, DeprecationIssue>> CLUSTER_SETTINGS_CHECKS =
-        Collections.emptyList();
+    static List<Function<ClusterState, DeprecationIssue>> CLUSTER_SETTINGS_CHECKS = List.of(
+        ClusterDeprecationChecks::checkTransientSettingsExistence
+    );
 
     static List<BiFunction<Settings, PluginsAndModules, DeprecationIssue>> NODE_SETTINGS_CHECKS = List.of(
         NodeDeprecationChecks::checkSharedDataPathSetting,

+ 49 - 0
x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/ClusterDeprecationChecksTests.java

@@ -0,0 +1,49 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+package org.elasticsearch.xpack.deprecation;
+
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.Metadata;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.xpack.core.deprecation.DeprecationIssue;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class ClusterDeprecationChecksTests extends ESTestCase {
+
+    public void testCheckTransientSettingsExistence() {
+        Settings transientSettings = Settings.builder()
+            .put("indices.recovery.max_bytes_per_sec", "20mb")
+            .build();
+        Metadata metadataWithTransientSettings = Metadata.builder()
+            .transientSettings(transientSettings)
+            .build();
+
+        ClusterState badState = ClusterState.builder(new ClusterName("test")).metadata(metadataWithTransientSettings).build();
+        DeprecationIssue issue = ClusterDeprecationChecks.checkTransientSettingsExistence(badState);
+        assertThat(issue, equalTo(
+            new DeprecationIssue(DeprecationIssue.Level.WARNING,
+                "Transient cluster settings are in the process of being removed.",
+                "https://ela.st/es-deprecation-7-transient-cluster-settings",
+                "Use persistent settings to define your cluster settings instead.",
+                false, null)
+        ));
+
+        Settings persistentSettings = Settings.builder()
+            .put("indices.recovery.max_bytes_per_sec", "20mb")
+            .build();
+        Metadata metadataWithoutTransientSettings = Metadata.builder()
+            .persistentSettings(persistentSettings)
+            .build();
+
+        ClusterState okState = ClusterState.builder(new ClusterName("test")).metadata(metadataWithoutTransientSettings).build();
+        issue = ClusterDeprecationChecks.checkTransientSettingsExistence(okState);
+        assertNull(issue);
+    }
+}

+ 2 - 2
x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java

@@ -115,7 +115,7 @@ public class PyTorchModelIT extends ESRestTestCase {
         Request loggingSettings = new Request("PUT", "_cluster/settings");
         loggingSettings.setJsonEntity("" +
             "{" +
-            "\"transient\" : {\n" +
+            "\"persistent\" : {\n" +
             "        \"logger.org.elasticsearch.xpack.ml.inference.allocation\" : \"TRACE\",\n" +
             "        \"logger.org.elasticsearch.xpack.ml.inference.deployment\" : \"TRACE\",\n" +
             "        \"logger.org.elasticsearch.xpack.ml.process.logging\" : \"TRACE\"\n" +
@@ -131,7 +131,7 @@ public class PyTorchModelIT extends ESRestTestCase {
         Request loggingSettings = new Request("PUT", "_cluster/settings");
         loggingSettings.setJsonEntity("" +
             "{" +
-            "\"transient\" : {\n" +
+            "\"persistent\" : {\n" +
             "        \"logger.org.elasticsearch.xpack.ml.inference.allocation\" :null,\n" +
             "        \"logger.org.elasticsearch.xpack.ml.inference.deployment\" : null,\n" +
             "        \"logger.org.elasticsearch.xpack.ml.process.logging\" : null\n" +