Browse Source

[ML] rename the text structure action and update required permissions (#67388)

This renames the text structure finder action to match the plugin name.
Also, this adds a new reserved role name so that adding specific permissions for this API is simple.
Benjamin Trent 4 years ago
parent
commit
a437af3406
15 changed files with 242 additions and 76 deletions
  1. 2 1
      client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/Role.java
  2. 63 63
      docs/reference/text-structure/apis/find-structure.asciidoc
  3. 1 0
      x-pack/docs/en/rest-api/security/get-builtin-privileges.asciidoc
  4. 9 6
      x-pack/docs/en/security/authorization/privileges.asciidoc
  5. 4 0
      x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilegeResolver.java
  6. 3 1
      x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java
  7. 1 1
      x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/FindFileStructureAction.java
  8. 3 2
      x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java
  9. 1 1
      x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java
  10. 1 1
      x-pack/plugin/src/test/resources/rest-api-spec/test/privileges/11_builtin.yml
  11. 0 0
      x-pack/plugin/text-structure/qa/build.gradle
  12. 32 0
      x-pack/plugin/text-structure/qa/text-structure-with-security/build.gradle
  13. 8 0
      x-pack/plugin/text-structure/qa/text-structure-with-security/roles.yml
  14. 58 0
      x-pack/plugin/text-structure/qa/text-structure-with-security/src/yamlRestTest/java/org/elasticsearch/smoketest/TextStructureWithSecurityIT.java
  15. 56 0
      x-pack/plugin/text-structure/qa/text-structure-with-security/src/yamlRestTest/java/org/elasticsearch/smoketest/TextStructureWithSecurityInsufficientRoleIT.java

+ 2 - 1
client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/Role.java

@@ -302,6 +302,7 @@ public final class Role {
         public static final String MONITOR_TRANSFORM_DEPRECATED = "monitor_data_frame_transforms";
         public static final String MONITOR_TRANSFORM = "monitor_transform";
         public static final String MONITOR_ML = "monitor_ml";
+        public static final String MONITOR_TEXT_STRUCTURE = "monitor_text_structure";
         public static final String MONITOR_WATCHER = "monitor_watcher";
         public static final String MONITOR_ROLLUP = "monitor_rollup";
         public static final String MANAGE = "manage";
@@ -326,7 +327,7 @@ public final class Role {
         public static final String READ_ILM = "read_ilm";
         public static final String MANAGE_ENRICH = "manage_enrich";
         public static final String[] ALL_ARRAY = new String[] { NONE, ALL, MONITOR, MONITOR_TRANSFORM_DEPRECATED, MONITOR_TRANSFORM,
-            MONITOR_ML, MONITOR_WATCHER, MONITOR_ROLLUP, MANAGE, MANAGE_TRANSFORM_DEPRECATED, MANAGE_TRANSFORM,
+            MONITOR_ML, MONITOR_TEXT_STRUCTURE, MONITOR_WATCHER, MONITOR_ROLLUP, MANAGE, MANAGE_TRANSFORM_DEPRECATED, MANAGE_TRANSFORM,
             MANAGE_ML, MANAGE_WATCHER, MANAGE_ROLLUP, MANAGE_INDEX_TEMPLATES, MANAGE_INGEST_PIPELINES, READ_PIPELINE,
             TRANSPORT_CLIENT, MANAGE_SECURITY, MANAGE_SAML, MANAGE_OIDC, MANAGE_TOKEN, MANAGE_PIPELINE, MANAGE_AUTOSCALING, MANAGE_CCR,
             READ_CCR, MANAGE_ILM, READ_ILM, MANAGE_ENRICH };

+ 63 - 63
docs/reference/text-structure/apis/find-structure.asciidoc

@@ -6,8 +6,8 @@
 <titleabbrev>Find structure</titleabbrev>
 ++++
 
-Finds the structure of a text file. The text file must 
-contain data that is suitable to be ingested into the 
+Finds the structure of a text file. The text file must
+contain data that is suitable to be ingested into the
 {stack}.
 
 [discrete]
@@ -16,18 +16,18 @@ contain data that is suitable to be ingested into the
 
 `POST _text_structure/find_structure`
 
-////
 [[find-structure-prereqs]]
 == {api-prereq-title}
 
-//TBD
-////
+* If the {es} {security-features} are enabled, you must have `monitor_text_structure` or
+`monitor` cluster privileges to use this API. See
+<<security-privileges>>.
 
 [discrete]
 [[find-structure-desc]]
 == {api-description-title}
 
-This API provides a starting point for ingesting data into {es} in a format that 
+This API provides a starting point for ingesting data into {es} in a format that
 is suitable for subsequent use with other {stack} functionality.
 
 Unlike other {es} endpoints, the data that is posted to this endpoint does not
@@ -60,67 +60,67 @@ chosen.
 == {api-query-parms-title}
 
 `charset`::
-(Optional, string) The file's character set. It must be a character set that is 
+(Optional, string) The file's character set. It must be a character set that is
 supported by the JVM that {es} uses. For example, `UTF-8`, `UTF-16LE`,
 `windows-1252`, or `EUC-JP`. If this parameter is not specified, the structure
 finder chooses an appropriate character set.
 
 `column_names`::
-(Optional, string) If you have set `format` to `delimited`, you can specify the 
-column names in a comma-separated list. If this parameter is not specified, the 
-structure finder uses the column names from the header row of the file. If the 
+(Optional, string) If you have set `format` to `delimited`, you can specify the
+column names in a comma-separated list. If this parameter is not specified, the
+structure finder uses the column names from the header row of the file. If the
 file does not have a header role, columns are named "column1", "column2",
 "column3", etc.
 
 `delimiter`::
-(Optional, string) If you have set `format` to `delimited`, you can specify the 
-character used to delimit the values in each row. Only a single character is 
-supported; the delimiter cannot have multiple characters. By default, the API 
-considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). 
-In this default scenario, all rows must have the same number of fields for the 
-delimited format to be detected. If you specify a delimiter, up to 10% of the 
+(Optional, string) If you have set `format` to `delimited`, you can specify the
+character used to delimit the values in each row. Only a single character is
+supported; the delimiter cannot have multiple characters. By default, the API
+considers the following possibilities: comma, tab, semi-colon, and pipe (`|`).
+In this default scenario, all rows must have the same number of fields for the
+delimited format to be detected. If you specify a delimiter, up to 10% of the
 rows can have a different number of columns than the first row.
 
 `explain`::
-(Optional, Boolean) If this parameter is set to `true`, the response includes a 
-field named `explanation`, which is an array of strings that indicate how the 
+(Optional, Boolean) If this parameter is set to `true`, the response includes a
+field named `explanation`, which is an array of strings that indicate how the
 structure finder produced its result. The default value is `false`.
 
 `format`::
 (Optional, string) The high level structure of the file. Valid values are
-`ndjson`, `xml`, `delimited`, and `semi_structured_text`. By default, the API 
-chooses the format. In this default scenario, all rows must have the same number 
-of fields for a delimited format to be detected. If the `format` is set to 
-`delimited` and the `delimiter` is not set, however, the API tolerates up to 5% 
+`ndjson`, `xml`, `delimited`, and `semi_structured_text`. By default, the API
+chooses the format. In this default scenario, all rows must have the same number
+of fields for a delimited format to be detected. If the `format` is set to
+`delimited` and the `delimiter` is not set, however, the API tolerates up to 5%
 of rows that have a different number of columns than the first row.
 
 `grok_pattern`::
 (Optional, string) If you have set `format` to `semi_structured_text`, you can
-specify a Grok pattern that is used to extract fields from every message in the 
-file. The name of the timestamp field in the Grok pattern must match what is 
-specified in the `timestamp_field` parameter. If that parameter is not 
+specify a Grok pattern that is used to extract fields from every message in the
+file. The name of the timestamp field in the Grok pattern must match what is
+specified in the `timestamp_field` parameter. If that parameter is not
 specified, the name of the timestamp field in the Grok pattern must match
-"timestamp". If `grok_pattern` is not specified, the structure finder creates a 
+"timestamp". If `grok_pattern` is not specified, the structure finder creates a
 Grok pattern.
 
 `has_header_row`::
 (Optional, Boolean) If you have set `format` to `delimited`, you can use this
-parameter to indicate whether the column names are in the first row of the file. 
-If this parameter is not specified, the structure finder guesses based on the 
+parameter to indicate whether the column names are in the first row of the file.
+If this parameter is not specified, the structure finder guesses based on the
 similarity of the first row of the file to other rows.
 
 `line_merge_size_limit`::
-(Optional, unsigned integer) The maximum number of characters in a message when 
-lines are merged to form messages while analyzing semi-structured files. The 
-default is `10000`. If you have extremely long messages you may need to increase 
-this, but be aware that this may lead to very long processing times if the way 
+(Optional, unsigned integer) The maximum number of characters in a message when
+lines are merged to form messages while analyzing semi-structured files. The
+default is `10000`. If you have extremely long messages you may need to increase
+this, but be aware that this may lead to very long processing times if the way
 to group lines into messages is misdetected.
 
 `lines_to_sample`::
 (Optional, unsigned integer) The number of lines to include in the structural
-analysis, starting from the beginning of the file. The minimum is 2; the default 
-is `1000`. If the value of this parameter is greater than the number of lines in 
-the file, the analysis proceeds (as long as there are at least two lines in the 
+analysis, starting from the beginning of the file. The minimum is 2; the default
+is `1000`. If the value of this parameter is greater than the number of lines in
+the file, the analysis proceeds (as long as there are at least two lines in the
 file) for all of the lines.
 +
 --
@@ -134,11 +134,11 @@ to request analysis of 100000 lines to achieve some variety.
 --
 
 `quote`::
-(Optional, string) If you have set `format` to `delimited`, you can specify the 
-character used to quote the values in each row if they contain newlines or the 
-delimiter character. Only a single character is supported. If this parameter is 
-not specified, the default value is a double quote (`"`). If your delimited file 
-format does not use quoting, a workaround is to set this argument to a character 
+(Optional, string) If you have set `format` to `delimited`, you can specify the
+character used to quote the values in each row if they contain newlines or the
+delimiter character. Only a single character is supported. If this parameter is
+not specified, the default value is a double quote (`"`). If your delimited file
+format does not use quoting, a workaround is to set this argument to a character
 that does not appear anywhere in the sample.
 
 `should_trim_fields`::
@@ -149,12 +149,12 @@ value is `true`. Otherwise, the default value is `false`.
 
 `timeout`::
 (Optional, <<time-units,time units>>) Sets the maximum amount of time that the
-structure analysis make take. If the analysis is still running when the timeout 
+structure analysis make take. If the analysis is still running when the timeout
 expires then it will be aborted. The default value is 25 seconds.
 
 `timestamp_field`::
-(Optional, string) The name of the field that contains the primary timestamp of 
-each record in the file. In particular, if the file were ingested into an index, 
+(Optional, string) The name of the field that contains the primary timestamp of
+each record in the file. In particular, if the file were ingested into an index,
 this is the field that would be used to populate the `@timestamp` field.
 +
 --
@@ -200,8 +200,8 @@ Only a subset of Java time format letter groups are supported:
 Additionally `S` letter groups (fractional seconds) of length one to nine are
 supported providing they occur after `ss` and separated from the `ss` by a `.`,
 `,` or `:`. Spacing and punctuation is also permitted with the exception of `?`,
-newline and carriage return, together with literal text enclosed in single 
-quotes. For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override 
+newline and carriage return, together with literal text enclosed in single
+quotes. For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override
 format.
 
 One valuable use case for this parameter is when the format is semi-structured
@@ -531,8 +531,8 @@ If the request does not encounter errors, you receive the following result:
 // so the fields may get reordered in the JSON the endpoint sees
 
 <1> `num_lines_analyzed` indicates how many lines of the file were analyzed.
-<2> `num_messages_analyzed` indicates how many distinct messages the lines 
-contained. For NDJSON, this value is the same as `num_lines_analyzed`. For other 
+<2> `num_messages_analyzed` indicates how many distinct messages the lines
+contained. For NDJSON, this value is the same as `num_lines_analyzed`. For other
 file formats, messages can span several lines.
 <3> `sample_start` reproduces the first two messages in the file verbatim. This
 may help diagnose parse errors or accidental uploads of the wrong file.
@@ -550,11 +550,11 @@ fields. {es} mappings and ingest pipelines use this format.
 therefore be told the correct timezone by the client.
 <11> `mappings` contains some suitable mappings for an index into which the data
 could be ingested. In this case, the `release_date` field has been given a
-`keyword` type as it is not considered specific enough to convert to the `date` 
+`keyword` type as it is not considered specific enough to convert to the `date`
 type.
 <12> `field_stats` contains the most common values of each field, plus basic
-numeric statistics for the numeric `page_count` field. This information may 
-provide clues that the data needs to be cleaned or transformed prior to use by 
+numeric statistics for the numeric `page_count` field. This information may
+provide clues that the data needs to be cleaned or transformed prior to use by
 other {stack} functionality.
 
 [discrete]
@@ -1456,22 +1456,22 @@ If the request does not encounter errors, you receive the following result:
 // NOTCONSOLE
 
 <1> `num_messages_analyzed` is 2 lower than `num_lines_analyzed` because only
-data records count as messages. The first line contains the column names and in 
+data records count as messages. The first line contains the column names and in
 this sample the second line is blank.
 <2> Unlike the first example, in this case the `format` has been identified as
 `delimited`.
 <3> Because the `format` is `delimited`, the `column_names` field in the output
 lists the column names in the order they appear in the sample.
 <4> `has_header_row` indicates that for this sample the column names were in
-the first row of the sample. (If they hadn't been then it would have been a good 
+the first row of the sample. (If they hadn't been then it would have been a good
 idea to specify them in the `column_names` query parameter.)
 <5> The `delimiter` for this sample is a comma, as it's a CSV file.
-<6> The `quote` character is the default double quote. (The structure finder 
-does not attempt to deduce any other quote character, so if you have a delimited 
-file that's quoted with some other character you must specify it using the 
+<6> The `quote` character is the default double quote. (The structure finder
+does not attempt to deduce any other quote character, so if you have a delimited
+file that's quoted with some other character you must specify it using the
 `quote` query parameter.)
 <7> The `timestamp_field` has been chosen to be `tpep_pickup_datetime`.
-`tpep_dropoff_datetime` would work just as well, but `tpep_pickup_datetime` was 
+`tpep_dropoff_datetime` would work just as well, but `tpep_pickup_datetime` was
 chosen because it comes first in the column order. If you prefer
 `tpep_dropoff_datetime` then force it to be chosen using the
 `timestamp_field` query parameter.
@@ -1479,18 +1479,18 @@ chosen because it comes first in the column order. If you prefer
 <9> `java_timestamp_formats` are the Java time formats recognized in the time
 fields. {es} mappings and ingest pipelines use this format.
 <10> The timestamp format in this sample doesn't specify a timezone, so to
-accurately convert them to UTC timestamps to store in {es} it's necessary to 
-supply the timezone they relate to. `need_client_timezone` will be `false` for 
+accurately convert them to UTC timestamps to store in {es} it's necessary to
+supply the timezone they relate to. `need_client_timezone` will be `false` for
 timestamp formats that include the timezone.
 
 [discrete]
 [[find-structure-example-timeout]]
 === Setting the timeout parameter
 
-If you try to analyze a lot of data then the analysis will take a long time. If 
-you want to limit the amount of processing your {es} cluster performs for a 
-request, use the `timeout` query parameter. The analysis will be aborted and an 
-error returned when the timeout expires. For example, you can replace 20000 
+If you try to analyze a lot of data then the analysis will take a long time. If
+you want to limit the amount of processing your {es} cluster performs for a
+request, use the `timeout` query parameter. The analysis will be aborted and an
+error returned when the timeout expires. For example, you can replace 20000
 lines in the previous example with 200000 and set a 1 second timeout on the
 analysis:
 
@@ -1681,7 +1681,7 @@ this:
 <2> The `multiline_start_pattern` is set on the basis that the timestamp appears
 in the first line of each multi-line log message.
 <3> A very simple `grok_pattern` has been created, which extracts the timestamp
-and recognizable fields that appear in every analyzed message. In this case the 
+and recognizable fields that appear in every analyzed message. In this case the
 only field that was recognized beyond the timestamp was the log level.
 
 [discrete]

+ 1 - 0
x-pack/docs/en/rest-api/security/get-builtin-privileges.asciidoc

@@ -91,6 +91,7 @@ A successful call returns an object with "cluster" and "index" fields.
     "monitor_ml",
     "monitor_rollup",
     "monitor_snapshot",
+    "monitor_text_structure",
     "monitor_transform",
     "monitor_watcher",
     "none",

+ 9 - 6
x-pack/docs/en/security/authorization/privileges.asciidoc

@@ -19,9 +19,6 @@ details on existing repositories and snapshots.
 `grant_api_key`::
 Privileges to create {es} API keys on behalf of other users.
 
-`monitor_snapshot`::
-Privileges to list and view details on existing repositories and snapshots.
-
 `manage`::
 Builds on `monitor` and adds cluster operations that change values in the cluster.
 This includes snapshotting, updating settings, and rerouting. It also includes
@@ -143,9 +140,6 @@ security roles of the user who created or updated them.
 All cluster read-only operations, like cluster health and state, hot threads,
 node info, node and cluster stats, and pending cluster tasks.
 
-`monitor_transform`::
-All read-only operations related to {transforms}.
-
 `monitor_ml`::
 All read-only {ml} operations, such as getting information about {dfeeds}, jobs,
 model snapshots, or results.
@@ -165,6 +159,15 @@ currently running rollup jobs and their capabilities.
 
 endif::[]
 
+`monitor_snapshot`::
+Privileges to list and view details on existing repositories and snapshots.
+
+`monitor_text_structure`::
+All read-only operations related to the <<find-structure,find structure API>>.
+
+`monitor_transform`::
+All read-only operations related to {transforms}.
+
 `monitor_watcher`::
 All read-only watcher operations, such as getting a watch and watcher stats.
 

+ 4 - 0
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilegeResolver.java

@@ -61,6 +61,7 @@ public class ClusterPrivilegeResolver {
     private static final Set<String> GRANT_API_KEY_PATTERN = Set.of(GrantApiKeyAction.NAME + "*");
     private static final Set<String> MONITOR_PATTERN = Set.of("cluster:monitor/*");
     private static final Set<String> MONITOR_ML_PATTERN = Set.of("cluster:monitor/xpack/ml/*");
+    private static final Set<String> MONITOR_TEXT_STRUCTURE_PATTERN = Set.of("cluster:monitor/text_structure/*");
     private static final Set<String> MONITOR_TRANSFORM_PATTERN = Set.of("cluster:monitor/data_frame/*", "cluster:monitor/transform/*");
     private static final Set<String> MONITOR_WATCHER_PATTERN = Set.of("cluster:monitor/xpack/watcher/*");
     private static final Set<String> MONITOR_ROLLUP_PATTERN = Set.of("cluster:monitor/xpack/rollup/*");
@@ -96,6 +97,8 @@ public class ClusterPrivilegeResolver {
     public static final NamedClusterPrivilege MONITOR_ML = new ActionClusterPrivilege("monitor_ml", MONITOR_ML_PATTERN);
     public static final NamedClusterPrivilege MONITOR_TRANSFORM_DEPRECATED =
         new ActionClusterPrivilege("monitor_data_frame_transforms", MONITOR_TRANSFORM_PATTERN);
+    public static final NamedClusterPrivilege MONITOR_TEXT_STRUCTURE =
+        new ActionClusterPrivilege("monitor_text_structure", MONITOR_TEXT_STRUCTURE_PATTERN);
     public static final NamedClusterPrivilege MONITOR_TRANSFORM =
             new ActionClusterPrivilege("monitor_transform", MONITOR_TRANSFORM_PATTERN);
     public static final NamedClusterPrivilege MONITOR_WATCHER = new ActionClusterPrivilege("monitor_watcher", MONITOR_WATCHER_PATTERN);
@@ -151,6 +154,7 @@ public class ClusterPrivilegeResolver {
         ALL,
         MONITOR,
         MONITOR_ML,
+        MONITOR_TEXT_STRUCTURE,
         MONITOR_TRANSFORM_DEPRECATED,
         MONITOR_TRANSFORM,
         MONITOR_WATCHER,

+ 3 - 1
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java

@@ -120,7 +120,9 @@ public class ReservedRolesStore implements BiConsumer<Set<String>, ActionListene
                             // To facilitate ML UI functionality being controlled using Kibana security privileges
                             "manage_ml",
                             // The symbolic constant for this one is in SecurityActionMapper, so not accessible from X-Pack core
-                            "cluster:admin/analyze"
+                            "cluster:admin/analyze",
+                            // To facilitate using the file uploader functionality
+                            "monitor_text_structure"
                         },
                         new RoleDescriptor.IndicesPrivileges[] {
                                 RoleDescriptor.IndicesPrivileges.builder()

+ 1 - 1
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/FindFileStructureAction.java

@@ -31,7 +31,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
 public class FindFileStructureAction extends ActionType<FindFileStructureAction.Response> {
 
     public static final FindFileStructureAction INSTANCE = new FindFileStructureAction();
-    public static final String NAME = "cluster:monitor/xpack/ml/findfilestructure";
+    public static final String NAME = "cluster:monitor/text_structure/findstructure";
 
     private FindFileStructureAction() {
         super(NAME, Response::new);

+ 3 - 2
x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java

@@ -355,6 +355,9 @@ public class ReservedRolesStoreTests extends ESTestCase {
         // ML
         assertRoleHasManageMl(kibanaRole);
 
+        // Text Structure
+        assertThat(kibanaRole.cluster().check(FindFileStructureAction.NAME, request, authentication), is(true));
+
         // Application Privileges
         DeletePrivilegesRequest deleteKibanaPrivileges = new DeletePrivilegesRequest("kibana-.kibana", new String[]{ "all", "read" });
         DeletePrivilegesRequest deleteLogstashPrivileges = new DeletePrivilegesRequest("logstash", new String[]{ "all", "read" });
@@ -1301,7 +1304,6 @@ public class ReservedRolesStoreTests extends ESTestCase {
         assertThat(role.cluster().check(EvaluateDataFrameAction.NAME, request, authentication), is(true));
         assertThat(role.cluster().check(ExplainDataFrameAnalyticsAction.NAME, request, authentication), is(true));
         assertThat(role.cluster().check(FinalizeJobExecutionAction.NAME, request, authentication), is(false)); // internal use only
-        assertThat(role.cluster().check(FindFileStructureAction.NAME, request, authentication), is(true));
         assertThat(role.cluster().check(FlushJobAction.NAME, request, authentication), is(true));
         assertThat(role.cluster().check(ForecastJobAction.NAME, request, authentication), is(true));
         assertThat(role.cluster().check(GetBucketsAction.NAME, request, authentication), is(true));
@@ -1371,7 +1373,6 @@ public class ReservedRolesStoreTests extends ESTestCase {
         assertThat(role.cluster().check(DeleteJobAction.NAME, request, authentication), is(false));
         assertThat(role.cluster().check(DeleteModelSnapshotAction.NAME, request, authentication), is(false));
         assertThat(role.cluster().check(FinalizeJobExecutionAction.NAME, request, authentication), is(false));
-        assertThat(role.cluster().check(FindFileStructureAction.NAME, request, authentication), is(true));
         assertThat(role.cluster().check(FlushJobAction.NAME, request, authentication), is(false));
         assertThat(role.cluster().check(ForecastJobAction.NAME, request, authentication), is(false));
         assertThat(role.cluster().check(GetBucketsAction.NAME, request, authentication), is(true));

+ 1 - 1
x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java

@@ -225,6 +225,7 @@ public class Constants {
         "cluster:monitor/task",
         "cluster:monitor/task/get",
         "cluster:monitor/tasks/lists",
+        "cluster:monitor/text_structure/findstructure",
         "cluster:monitor/transform/get",
         "cluster:monitor/transform/stats/get",
         "cluster:monitor/xpack/analytics/stats",
@@ -264,7 +265,6 @@ public class Constants {
         "cluster:monitor/xpack/ml/data_frame/evaluate",
         "cluster:monitor/xpack/ml/datafeeds/get",
         "cluster:monitor/xpack/ml/datafeeds/stats/get",
-        "cluster:monitor/xpack/ml/findfilestructure",
         "cluster:monitor/xpack/ml/inference/get",
         "cluster:monitor/xpack/ml/inference/stats/get",
         "cluster:monitor/xpack/ml/info/get",

+ 1 - 1
x-pack/plugin/src/test/resources/rest-api-spec/test/privileges/11_builtin.yml

@@ -15,5 +15,5 @@ setup:
   # This is fragile - it needs to be updated every time we add a new cluster/index privilege
   # I would much prefer we could just check that specific entries are in the array, but we don't have
   # an assertion for that
-  - length: { "cluster" : 38 }
+  - length: { "cluster" : 39 }
   - length: { "index" : 19 }

+ 0 - 0
x-pack/plugin/text-structure/qa/build.gradle


+ 32 - 0
x-pack/plugin/text-structure/qa/text-structure-with-security/build.gradle

@@ -0,0 +1,32 @@
+apply plugin: 'elasticsearch.yaml-rest-test'
+
+dependencies {
+  yamlRestTestImplementation project(path: xpackModule('core'))
+  yamlRestTestImplementation project(path: xpackModule('core'), configuration: 'testArtifacts')
+  yamlRestTestImplementation project(path: xpackProject('plugin').path, configuration: 'testArtifacts')
+}
+
+// bring in text structure rest test suite
+restResources {
+  restApi {
+    // needed for template installation, etc.
+    includeCore '_common', 'indices'
+    includeXpack 'text_structure'
+  }
+  restTests {
+    includeXpack 'text_structure'
+  }
+}
+
+tasks.named("yamlRestTest").configure {
+}
+
+testClusters.all {
+  testDistribution = 'DEFAULT'
+  extraConfigFile 'roles.yml', file('roles.yml')
+  user username: "x_pack_rest_user", password: "x-pack-test-password"
+  user username: "text_structure_user", password: "x-pack-test-password", role: "minimal,monitor_text_structure"
+  user username: "no_text_structure", password: "x-pack-test-password", role: "minimal"
+  setting 'xpack.license.self_generated.type', 'trial'
+  setting 'xpack.security.enabled', 'true'
+}

+ 8 - 0
x-pack/plugin/text-structure/qa/text-structure-with-security/roles.yml

@@ -0,0 +1,8 @@
+monitor_text_structure:
+  cluster:
+    - monitor_text_structure
+minimal:
+  cluster:
+    # This is always required because the REST client uses it to find the version of
+    # Elasticsearch it's talking to
+    - cluster:monitor/main

+ 58 - 0
x-pack/plugin/text-structure/qa/text-structure-with-security/src/yamlRestTest/java/org/elasticsearch/smoketest/TextStructureWithSecurityIT.java

@@ -0,0 +1,58 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+package org.elasticsearch.smoketest;
+
+import com.carrotsearch.randomizedtesting.annotations.Name;
+import org.elasticsearch.common.settings.SecureString;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.ThreadContext;
+import org.elasticsearch.test.SecuritySettingsSourceField;
+import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
+import org.elasticsearch.xpack.test.rest.AbstractXPackRestTest;
+
+import java.util.Collections;
+import java.util.Map;
+
+import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue;
+
+public class TextStructureWithSecurityIT extends AbstractXPackRestTest {
+
+    private static final String TEST_ADMIN_USERNAME = "x_pack_rest_user";
+
+    public TextStructureWithSecurityIT(@Name("yaml") ClientYamlTestCandidate testCandidate) {
+        super(testCandidate);
+    }
+
+    protected String[] getCredentials() {
+        return new String[] { "text_structure_user", "x-pack-test-password" };
+    }
+
+    @Override
+    protected Settings restClientSettings() {
+        String[] creds = getCredentials();
+        String token = basicAuthHeaderValue(creds[0], new SecureString(creds[1].toCharArray()));
+        return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build();
+    }
+
+    @Override
+    protected Settings restAdminSettings() {
+        String token = basicAuthHeaderValue(TEST_ADMIN_USERNAME, SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING);
+        return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build();
+    }
+
+    protected Map<String, String> getApiCallHeaders() {
+        return Collections.singletonMap(
+            "Authorization",
+            basicAuthHeaderValue(TEST_ADMIN_USERNAME, SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING)
+        );
+    }
+
+    @Override
+    protected boolean isMonitoringTest() {
+        return false;
+    }
+
+}

+ 56 - 0
x-pack/plugin/text-structure/qa/text-structure-with-security/src/yamlRestTest/java/org/elasticsearch/smoketest/TextStructureWithSecurityInsufficientRoleIT.java

@@ -0,0 +1,56 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+package org.elasticsearch.smoketest;
+
+import com.carrotsearch.randomizedtesting.annotations.Name;
+
+import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
+import org.elasticsearch.test.rest.yaml.section.DoSection;
+import org.elasticsearch.test.rest.yaml.section.ExecutableSection;
+
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.containsString;
+
+public class TextStructureWithSecurityInsufficientRoleIT extends TextStructureWithSecurityIT {
+
+    private final ClientYamlTestCandidate testCandidate;
+
+    public TextStructureWithSecurityInsufficientRoleIT(@Name("yaml") ClientYamlTestCandidate testCandidate) {
+        super(testCandidate);
+        this.testCandidate = testCandidate;
+    }
+
+    @Override
+    @SuppressWarnings("unchecked")
+    public void test() throws IOException {
+        try {
+            // Cannot use expectThrows here because blacklisted tests will throw an
+            // InternalAssumptionViolatedException rather than an AssertionError
+            super.test();
+
+            // We should have got here if and only if no text structure endpoints were called
+            for (ExecutableSection section : testCandidate.getTestSection().getExecutableSections()) {
+                if (section instanceof DoSection) {
+                    String apiName = ((DoSection) section).getApiCallSection().getApi();
+
+                    if (apiName.startsWith("text_structure.")) {
+                        fail("call to text_structure endpoint [" + apiName + "] should have failed because of missing role");
+                    }
+                }
+            }
+        } catch (AssertionError ae) {
+            assertThat(ae.getMessage(), containsString("action [cluster:monitor/text_structure"));
+            assertThat(ae.getMessage(), containsString("returned [403 Forbidden]"));
+            assertThat(ae.getMessage(), containsString("is unauthorized for user [no_text_structure]"));
+        }
+    }
+
+    @Override
+    protected String[] getCredentials() {
+        return new String[] { "no_text_structure", "x-pack-test-password" };
+    }
+}