浏览代码

[DOCS] Change `// TESTRESPONSE[_cat]` to `// TESTRESPONSE[non_json]` (#43006)

James Rodewig 6 年之前
父节点
当前提交
655032b07e

+ 3 - 3
buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy

@@ -43,7 +43,7 @@ public class SnippetsTask extends DefaultTask {
     private static final String SKIP = /skip:([^\]]+)/
     private static final String SETUP = /setup:([^ \]]+)/
     private static final String WARNING = /warning:(.+)/
-    private static final String CAT = /(_cat)/
+    private static final String NON_JSON = /(non_json)/
     private static final String TEST_SYNTAX =
         /(?:$CATCH|$SUBSTITUTION|$SKIP|(continued)|$SETUP|$WARNING|(skip_shard_failures)) ?/
 
@@ -255,12 +255,12 @@ public class SnippetsTask extends DefaultTask {
                             substitutions = []
                         }
                         String loc = "$file:$lineNumber"
-                        parse(loc, matcher.group(2), /(?:$SUBSTITUTION|$CAT|$SKIP) ?/) {
+                        parse(loc, matcher.group(2), /(?:$SUBSTITUTION|$NON_JSON|$SKIP) ?/) {
                             if (it.group(1) != null) {
                                 // TESTRESPONSE[s/adsf/jkl/]
                                 substitutions.add([it.group(1), it.group(2)])
                             } else if (it.group(3) != null) {
-                                // TESTRESPONSE[_cat]
+                                // TESTRESPONSE[non_json]
                                 substitutions.add(['^', '/'])
                                 substitutions.add(['\n$', '\\\\s*/'])
                                 substitutions.add(['( +)', '$1\\\\s+'])

+ 3 - 3
docs/README.asciidoc

@@ -62,9 +62,9 @@ for its modifiers:
     them" which looks like `// TESTRESPONSE[s/\d+/$body.$_path/]`.
   * You can't use `// TESTRESPONSE` immediately after `// TESTSETUP`. Instead,
   consider using `// TEST[continued]` or rearrange your snippets.
-  * `// TESTRESPONSE[_cat]`: Add substitutions for testing `_cat` responses. Use
-  this after all other substitutions so it doesn't make other substitutions
-  difficult.
+  * `// TESTRESPONSE[non_json]`: Add substitutions for testing responses in a
+  format other than JSON. Use this after all other substitutions so it doesn't
+  make other substitutions difficult.
   * `// TESTRESPONSE[skip:reason]`: Skip the assertions specified by this
   response.
 * `// TESTSETUP`: Marks this snippet as the "setup" for all other snippets in

+ 3 - 3
docs/reference/cat.asciidoc

@@ -40,7 +40,7 @@ Might respond with:
 id                     host      ip        node
 u_n93zwxThWHi1PDBJAGAg 127.0.0.1 127.0.0.1 u_n93zw
 --------------------------------------------------
-// TESTRESPONSE[s/u_n93zw(xThWHi1PDBJAGAg)?/.+/ _cat]
+// TESTRESPONSE[s/u_n93zw(xThWHi1PDBJAGAg)?/.+/ non_json]
 
 [float]
 [[help]]
@@ -64,7 +64,7 @@ host | h | host name
 ip   |   | ip address
 node | n | node name
 --------------------------------------------------
-// TESTRESPONSE[s/[|]/[|]/ _cat]
+// TESTRESPONSE[s/[|]/[|]/ non_json]
 
 NOTE: `help` is not supported if any optional url parameter is used.
 For example `GET _cat/shards/twitter?help` or `GET _cat/indices/twi*?help`
@@ -90,7 +90,7 @@ Responds with:
 --------------------------------------------------
 127.0.0.1 9300 27 sLBaIGK
 --------------------------------------------------
-// TESTRESPONSE[s/9300 27 sLBaIGK/\\d+ \\d+ .+/ _cat]
+// TESTRESPONSE[s/9300 27 sLBaIGK/\\d+ \\d+ .+/ non_json]
 
 You can also request multiple columns using simple wildcards like
 `/_cat/thread_pool?h=ip,queue*` to get all headers (or aliases) starting

+ 1 - 1
docs/reference/cat/alias.asciidoc

@@ -49,7 +49,7 @@ alias2 test1 *      -            -
 alias3 test1 -      1            1
 alias4 test1 -      2            1,2
 --------------------------------------------------
-// TESTRESPONSE[s/[*]/[*]/ _cat]
+// TESTRESPONSE[s/[*]/[*]/ non_json]
 
 The output shows that `alias2` has configured a filter, and specific routing
 configurations in `alias3` and `alias4`.

+ 1 - 1
docs/reference/cat/allocation.asciidoc

@@ -19,7 +19,7 @@ shards disk.indices disk.used disk.avail disk.total disk.percent host      ip
      1         260b    47.3gb     43.4gb    100.7gb           46 127.0.0.1 127.0.0.1 CSUXak2
 --------------------------------------------------
 // TESTRESPONSE[s/\d+(\.\d+)?[tgmk]?b/\\d+(\\.\\d+)?[tgmk]?b/ s/46/\\d+/]
-// TESTRESPONSE[s/CSUXak2/.+/ _cat]
+// TESTRESPONSE[s/CSUXak2/.+/ non_json]
 
 Here we can see that the single shard created has been allocated to the single
 node available.

+ 2 - 2
docs/reference/cat/count.asciidoc

@@ -19,7 +19,7 @@ Looks like:
 epoch      timestamp count
 1475868259 15:24:19  121
 --------------------------------------------------
-// TESTRESPONSE[s/1475868259 15:24:19/\\d+ \\d+:\\d+:\\d+/ _cat]
+// TESTRESPONSE[s/1475868259 15:24:19/\\d+ \\d+:\\d+:\\d+/ non_json]
 
 Or for a single index:
 
@@ -35,7 +35,7 @@ GET /_cat/count/twitter?v
 epoch      timestamp count
 1475868259 15:24:20  120
 --------------------------------------------------
-// TESTRESPONSE[s/1475868259 15:24:20/\\d+ \\d+:\\d+:\\d+/ _cat]
+// TESTRESPONSE[s/1475868259 15:24:20/\\d+ \\d+:\\d+:\\d+/ non_json]
 
 
 NOTE: The document count indicates the number of live documents and does not include deleted documents which have not yet been cleaned up by the merge process.

+ 3 - 3
docs/reference/cat/fielddata.asciidoc

@@ -52,7 +52,7 @@ Nqk-6inXQq-OxUfOUI8jNQ 127.0.0.1 127.0.0.1 Nqk-6in body    544b
 Nqk-6inXQq-OxUfOUI8jNQ 127.0.0.1 127.0.0.1 Nqk-6in soul    480b
 --------------------------------------------------
 // TESTRESPONSE[s/544b|480b/\\d+(\\.\\d+)?[tgmk]?b/]
-// TESTRESPONSE[s/Nqk-6in[^ ]*/.+/ s/soul|body/\\w+/ _cat]
+// TESTRESPONSE[s/Nqk-6in[^ ]*/.+/ s/soul|body/\\w+/ non_json]
 
 Fields can be specified either as a query parameter, or in the URL path:
 
@@ -71,7 +71,7 @@ id                     host      ip        node    field   size
 Nqk-6inXQq-OxUfOUI8jNQ 127.0.0.1 127.0.0.1 Nqk-6in body    544b
 --------------------------------------------------
 // TESTRESPONSE[s/544b|480b/\\d+(\\.\\d+)?[tgmk]?b/]
-// TESTRESPONSE[s/Nqk-6in[^ ]*/.+/ _cat]
+// TESTRESPONSE[s/Nqk-6in[^ ]*/.+/ non_json]
 
 And it accepts a comma delimited list:
 
@@ -91,6 +91,6 @@ Nqk-6inXQq-OxUfOUI8jNQ 127.0.0.1 127.0.0.1 Nqk-6in body    544b
 Nqk-6inXQq-OxUfOUI8jNQ 127.0.0.1 127.0.0.1 Nqk-6in soul    480b
 --------------------------------------------------
 // TESTRESPONSE[s/544b|480b/\\d+(\\.\\d+)?[tgmk]?b/]
-// TESTRESPONSE[s/Nqk-6in[^ ]*/.+/ s/soul|body/\\w+/ _cat]
+// TESTRESPONSE[s/Nqk-6in[^ ]*/.+/ s/soul|body/\\w+/ non_json]
 
 The output shows the individual fielddata for the `body` and `soul` fields, one row per field per node.

+ 2 - 2
docs/reference/cat/health.asciidoc

@@ -17,7 +17,7 @@ epoch      timestamp cluster       status node.total node.data shards pri relo i
 1475871424 16:17:04  elasticsearch green           1         1      1   1    0    0        0             0                  -                100.0%
 --------------------------------------------------
 // TESTRESPONSE[s/1475871424 16:17:04/\\d+ \\d+:\\d+:\\d+/]
-// TESTRESPONSE[s/elasticsearch/[^ ]+/ s/0                  -/\\d+ (-|\\d+(\\.\\d+)?[ms]+)/ _cat]
+// TESTRESPONSE[s/elasticsearch/[^ ]+/ s/0                  -/\\d+ (-|\\d+(\\.\\d+)?[ms]+)/ non_json]
 
 It has one option `ts` to disable the timestamping:
 
@@ -35,7 +35,7 @@ which looks like:
 cluster       status node.total node.data shards pri relo init unassign pending_tasks max_task_wait_time active_shards_percent
 elasticsearch green           1         1      1   1    0    0        0             0                  -                100.0%
 --------------------------------------------------
-// TESTRESPONSE[s/elasticsearch/[^ ]+/ s/0                  -/\\d+ (-|\\d+(\\.\\d+)?[ms]+)/ _cat]
+// TESTRESPONSE[s/elasticsearch/[^ ]+/ s/0                  -/\\d+ (-|\\d+(\\.\\d+)?[ms]+)/ non_json]
 
 A common use of this command is to verify the health is consistent
 across nodes:

+ 5 - 5
docs/reference/cat/indices.asciidoc

@@ -21,7 +21,7 @@ yellow open   twitter  u8FNjxh8Rfy_awN11oDKYQ   1   1       1200            0
 green  open   twitter2 nYFWZEO7TUiOjLQXBaYJpA   1   0          0            0       260b           260b
 --------------------------------------------------
 // TESTRESPONSE[s/\d+(\.\d+)?[tgmk]?b/\\d+(\\.\\d+)?[tgmk]?b/]
-// TESTRESPONSE[s/u8FNjxh8Rfy_awN11oDKYQ|nYFWZEO7TUiOjLQXBaYJpA/.+/ _cat]
+// TESTRESPONSE[s/u8FNjxh8Rfy_awN11oDKYQ|nYFWZEO7TUiOjLQXBaYJpA/.+/ non_json]
 
 We can tell quickly how many shards make up an index, the number of
 docs, deleted docs, primary store size, and total store size (all shards including replicas).
@@ -64,7 +64,7 @@ health status index    uuid                   pri rep docs.count docs.deleted st
 yellow open   twitter  u8FNjxh8Rfy_awN11oDKYQ   1   1       1200            0     88.1kb         88.1kb
 --------------------------------------------------
 // TESTRESPONSE[s/\d+(\.\d+)?[tgmk]?b/\\d+(\\.\\d+)?[tgmk]?b/]
-// TESTRESPONSE[s/u8FNjxh8Rfy_awN11oDKYQ/.+/ _cat]
+// TESTRESPONSE[s/u8FNjxh8Rfy_awN11oDKYQ/.+/ non_json]
 
 Which index has the largest number of documents?
 
@@ -84,7 +84,7 @@ yellow open   twitter  u8FNjxh8Rfy_awN11oDKYQ   1   1       1200            0
 green  open   twitter2 nYFWZEO7TUiOjLQXBaYJpA   1   0          0            0       260b           260b
 --------------------------------------------------
 // TESTRESPONSE[s/\d+(\.\d+)?[tgmk]?b/\\d+(\\.\\d+)?[tgmk]?b/]
-// TESTRESPONSE[s/u8FNjxh8Rfy_awN11oDKYQ|nYFWZEO7TUiOjLQXBaYJpA/.+/ _cat]
+// TESTRESPONSE[s/u8FNjxh8Rfy_awN11oDKYQ|nYFWZEO7TUiOjLQXBaYJpA/.+/ non_json]
 
 How many merge operations have the shards for the `twitter` completed?
 
@@ -102,7 +102,7 @@ Might look like:
 health index   pri rep docs.count mt pri.mt
 yellow twitter   1   1 1200       16     16
 --------------------------------------------------
-// TESTRESPONSE[s/16/\\d+/ _cat]
+// TESTRESPONSE[s/16/\\d+/ non_json]
 
 How much memory is used per index?
 
@@ -122,4 +122,4 @@ twitter   8.1gb
 twitter2  30.5kb
 --------------------------------------------------
 // TESTRESPONSE[s/\d+(\.\d+)?[tgmk]?b/\\d+(\\.\\d+)?[tgmk]?b/]
-// TESTRESPONSE[_cat]
+// TESTRESPONSE[non_json]

+ 1 - 1
docs/reference/cat/master.asciidoc

@@ -17,7 +17,7 @@ might respond:
 id                     host      ip        node
 YzWoH_2BT-6UjVGDyPdqYg 127.0.0.1 127.0.0.1 YzWoH_2
 --------------------------------------------------
-// TESTRESPONSE[s/YzWoH_2.+/.+/ _cat]
+// TESTRESPONSE[s/YzWoH_2.+/.+/ non_json]
 
 This information is also available via the `nodes` command, but this
 is slightly shorter when all you want to do, for example, is verify

+ 2 - 2
docs/reference/cat/nodeattrs.asciidoc

@@ -22,7 +22,7 @@ node-0 127.0.0.1 127.0.0.1 testattr test
 ...
 --------------------------------------------------
 // TESTRESPONSE[s/\.\.\.\n$/\n(.+ xpack\\.installed true\n)?\n/]
-// TESTRESPONSE[s/\.\.\.\n/(.+ ml\\..+\n)*/ _cat]
+// TESTRESPONSE[s/\.\.\.\n/(.+ ml\\..+\n)*/ non_json]
 // If xpack is not installed then neither ... with match anything
 // If xpack is installed then the first ... contains ml attributes
 // and the second contains xpack.installed=true
@@ -68,7 +68,7 @@ node-0 19566 testattr test
 --------------------------------------------------
 // TESTRESPONSE[s/19566/\\d*/]
 // TESTRESPONSE[s/\.\.\.\n$/\n(.+ xpack\\.installed true\n)?\n/]
-// TESTRESPONSE[s/\.\.\.\n/(.+ ml\\..+\n)*/ _cat]
+// TESTRESPONSE[s/\.\.\.\n/(.+ ml\\..+\n)*/ non_json]
 // If xpack is not installed then neither ... with match anything
 // If xpack is installed then the first ... contains ml attributes
 // and the second contains xpack.installed=true

+ 2 - 2
docs/reference/cat/nodes.asciidoc

@@ -18,7 +18,7 @@ ip        heap.percent ram.percent cpu load_1m load_5m load_15m node.role master
 --------------------------------------------------
 // TESTRESPONSE[s/3.07/(\\d+\\.\\d+( \\d+\\.\\d+ (\\d+\\.\\d+)?)?)?/]
 // TESTRESPONSE[s/65          99  42/\\d+ \\d+ \\d+/]
-// TESTRESPONSE[s/[*]/[*]/ s/mJw06l1/.+/ _cat]
+// TESTRESPONSE[s/[*]/[*]/ s/mJw06l1/.+/ non_json]
 
 The first few columns (`ip`, `heap.percent`, `ram.percent`, `cpu`, `load_*`) tell
 you where your nodes live and give a quick picture of performance stats.
@@ -63,7 +63,7 @@ Might look like:
 id   ip        port  v         m
 veJR 127.0.0.1 59938 {version} *
 --------------------------------------------------
-// TESTRESPONSE[s/veJR/.+/ s/59938/\\d+/ s/[*]/[*]/ _cat]
+// TESTRESPONSE[s/veJR/.+/ s/59938/\\d+/ s/[*]/[*]/ non_json]
 
 [cols="<,<,<,<,<",options="header",subs="normal"]
 |=======================================================================

+ 1 - 1
docs/reference/cat/pending_tasks.asciidoc

@@ -24,6 +24,6 @@ insertOrder timeInQueue priority source
        1690       787ms HIGH     update-mapping [foo][t]
        1691       773ms HIGH     update-mapping [foo][t]
 --------------------------------------------------
-// TESTRESPONSE[s/(\n.+)+/(\\n.+)*/ _cat]
+// TESTRESPONSE[s/(\n.+)+/(\\n.+)*/ non_json]
 // We can't assert anything about the tasks in progress here because we don't
 // know what might be in progress....

+ 1 - 1
docs/reference/cat/plugins.asciidoc

@@ -31,6 +31,6 @@ U7321H6 mapper-size             {version_qualified} The Mapper Size plugin allow
 U7321H6 store-smb               {version_qualified} The Store SMB plugin adds support for SMB stores.
 U7321H6 transport-nio           {version_qualified} The nio transport.
 ------------------------------------------------------------------------------
-// TESTRESPONSE[s/([.()])/\\$1/ s/U7321H6/.+/ _cat]
+// TESTRESPONSE[s/([.()])/\\$1/ s/U7321H6/.+/ non_json]
 
 We can tell quickly how many plugins per node we have and which versions.

+ 3 - 3
docs/reference/cat/recovery.asciidoc

@@ -30,7 +30,7 @@ twitter 0     13ms store done  n/a         n/a         127.0.0.1   node-0      n
 // TESTRESPONSE[s/100%/0.0%/]
 // TESTRESPONSE[s/9928/0/]
 // TESTRESPONSE[s/13ms/\\d+m?s/]
-// TESTRESPONSE[s/13/\\d+/ _cat]
+// TESTRESPONSE[s/13/\\d+/ non_json]
 
 In the above case, the source and target nodes are the same because the recovery
 type was store, i.e. they were read from local storage on node start.
@@ -57,7 +57,7 @@ twitter 0 1252ms peer done  192.168.1.1 192.168.1.2 0     100.0%  0 100.0%
 // TESTRESPONSE[s/192.168.1.2/127.0.0.1/]
 // TESTRESPONSE[s/192.168.1.1/n\/a/]
 // TESTRESPONSE[s/100.0%/0.0%/]
-// TESTRESPONSE[s/1252/\\d+/ _cat]
+// TESTRESPONSE[s/1252/\\d+/ non_json]
 
 We can see in the above listing that our thw twitter shard was recovered from another node.
 Notice that the recovery type is shown as `peer`. The files and bytes copied are
@@ -81,4 +81,4 @@ This will show a recovery of type snapshot in the response
 i       s t      ty       st    rep     snap   f  fp   b     bp
 twitter 0 1978ms snapshot done  twitter snap_1 79 8.0% 12086 9.0%
 --------------------------------------------------------------------------------
-// TESTRESPONSE[_cat]
+// TESTRESPONSE[non_json]

+ 1 - 1
docs/reference/cat/repositories.asciidoc

@@ -19,6 +19,6 @@ id    type
 repo1   fs
 repo2   s3
 --------------------------------------------------
-// TESTRESPONSE[s/\nrepo2   s3// _cat]
+// TESTRESPONSE[s/\nrepo2   s3// non_json]
 
 We can quickly see which repositories are registered and their type.

+ 1 - 1
docs/reference/cat/segments.asciidoc

@@ -20,7 +20,7 @@ index shard prirep ip        segment generation docs.count docs.deleted size siz
 test  0     p      127.0.0.1 _0               0          1            0  3kb        2042 false     true       {lucene_version}   true
 test1 0     p      127.0.0.1 _0               0          1            0  3kb        2042 false     true       {lucene_version}   true
 --------------------------------------------------
-// TESTRESPONSE[s/3kb/\\d+(\\.\\d+)?[mk]?b/ s/2042/\\d+/ _cat]
+// TESTRESPONSE[s/3kb/\\d+(\\.\\d+)?[mk]?b/ s/2042/\\d+/ non_json]
 
 The output shows information about index names and shard numbers in the first
 two columns.

+ 5 - 5
docs/reference/cat/shards.asciidoc

@@ -23,7 +23,7 @@ twitter 0 p STARTED 3014 31.1mb 192.168.56.10 H5dfFeA
 // TESTRESPONSE[s/3014/\\d+/]
 // TESTRESPONSE[s/31.1mb/\\d+(\.\\d+)?[kmg]?b/]
 // TESTRESPONSE[s/192.168.56.10/.*/]
-// TESTRESPONSE[s/H5dfFeA/node-0/ _cat]
+// TESTRESPONSE[s/H5dfFeA/node-0/ non_json]
 
 [float]
 [[index-pattern]]
@@ -49,7 +49,7 @@ twitter 0 p STARTED 3014 31.1mb 192.168.56.10 H5dfFeA
 // TESTRESPONSE[s/3014/\\d+/]
 // TESTRESPONSE[s/31.1mb/\\d+(\.\\d+)?[kmg]?b/]
 // TESTRESPONSE[s/192.168.56.10/.*/]
-// TESTRESPONSE[s/H5dfFeA/node-0/ _cat]
+// TESTRESPONSE[s/H5dfFeA/node-0/ non_json]
 
 
 [float]
@@ -72,7 +72,7 @@ A relocating shard will be shown as follows
 ---------------------------------------------------------------------------
 twitter 0 p RELOCATING 3014 31.1mb 192.168.56.10 H5dfFeA -> -> 192.168.56.30 bGG90GE
 ---------------------------------------------------------------------------
-// TESTRESPONSE[_cat]
+// TESTRESPONSE[non_json]
 
 [float]
 [[states]]
@@ -95,7 +95,7 @@ You can get the initializing state in the response like this
 twitter 0 p STARTED      3014 31.1mb 192.168.56.10 H5dfFeA
 twitter 0 r INITIALIZING    0 14.3mb 192.168.56.30 bGG90GE
 ---------------------------------------------------------------------------
-// TESTRESPONSE[_cat]
+// TESTRESPONSE[non_json]
 
 If a shard cannot be assigned, for example you've overallocated the
 number of replicas for the number of nodes in the cluster, the shard
@@ -119,7 +119,7 @@ twitter 0 r STARTED    3014 31.1mb 192.168.56.30 bGG90GE
 twitter 0 r STARTED    3014 31.1mb 192.168.56.20 I8hydUG
 twitter 0 r UNASSIGNED ALLOCATION_FAILED
 ---------------------------------------------------------------------------
-// TESTRESPONSE[_cat]
+// TESTRESPONSE[non_json]
 
 [float]
 [[reason-unassigned]]

+ 1 - 1
docs/reference/cat/snapshots.asciidoc

@@ -26,7 +26,7 @@ snap2 SUCCESS 1445634298  23:04:58   1445634672 23:11:12     6.2m       2
 // TESTRESPONSE[s/\d+:\d+:\d+/\\d+:\\d+:\\d+/]
 // TESTRESPONSE[s/1                 4             1            5/\\d+ \\d+ \\d+ \\d+/]
 // TESTRESPONSE[s/2                10             0           10/\\d+ \\d+ \\d+ \\d+/]
-// TESTRESPONSE[_cat]
+// TESTRESPONSE[non_json]
 
 Each snapshot contains information about when it was started and stopped.
 Start and stop timestamps are available in two formats.

+ 1 - 1
docs/reference/cat/templates.asciidoc

@@ -28,7 +28,7 @@ template0 [te*]          0
 template1 [tea*]         1
 template2 [teak*]        2     7
 --------------------------------------------------
-// TESTRESPONSE[s/\*/\\*/ s/\[/\\[/ s/\]/\\]/ _cat]
+// TESTRESPONSE[s/\*/\\*/ s/\[/\\[/ s/\]/\\]/ non_json]
 
 The output shows that there are three existing templates,
 with template2 having a version value.

+ 2 - 2
docs/reference/cat/thread_pool.asciidoc

@@ -23,7 +23,7 @@ node-0 flush               0 0 0
 node-0 write               0 0 0
 --------------------------------------------------
 // TESTRESPONSE[s/\.\.\./(node-0 \\S+ 0 0 0\n)*/]
-// TESTRESPONSE[s/\d+/\\d+/ _cat]
+// TESTRESPONSE[s/\d+/\\d+/ non_json]
 // The substitutions do two things:
 // 1. Expect any number of extra thread pools. This allows us to only list a
 //    few thread pools. The list would be super long otherwise. In addition,
@@ -107,7 +107,7 @@ which looks like:
 id                     name    active rejected completed
 0EWUhXeBQtaVGlexUeVwMg generic      0        0        70
 --------------------------------------------------
-// TESTRESPONSE[s/0EWUhXeBQtaVGlexUeVwMg/[\\w-]+/ s/\d+/\\d+/ _cat]
+// TESTRESPONSE[s/0EWUhXeBQtaVGlexUeVwMg/[\\w-]+/ s/\d+/\\d+/ non_json]
 
 Here the host columns and the active, rejected and completed suggest thread pool statistics are displayed.
 

+ 1 - 1
docs/reference/frozen-indices.asciidoc

@@ -118,5 +118,5 @@ The response looks like:
 i         sth
 twitter   true
 --------------------------------------------------
-// TESTRESPONSE[_cat]
+// TESTRESPONSE[non_json]
 

+ 6 - 6
docs/reference/getting-started.asciidoc

@@ -303,7 +303,7 @@ epoch      timestamp cluster       status node.total node.data shards pri relo i
 --------------------------------------------------
 // TESTRESPONSE[s/1475247709 17:01:49  elasticsearch/\\d+ \\d+:\\d+:\\d+ integTest/]
 // TESTRESPONSE[s/0             0                  -/0             \\d+                  -/]
-// TESTRESPONSE[_cat]
+// TESTRESPONSE[non_json]
 
 We can see that our cluster named "elasticsearch" is up with a green status.
 
@@ -333,7 +333,7 @@ ip        heap.percent ram.percent cpu load_1m load_5m load_15m node.role master
 127.0.0.1           10           5   5    4.46                        mdi      *      PB2SGZY
 --------------------------------------------------
 // TESTRESPONSE[s/10           5   5    4.46/\\d+ \\d+ \\d+ (\\d+\\.\\d+)? (\\d+\\.\\d+)? (\\d+\.\\d+)?/]
-// TESTRESPONSE[s/[*]/[*]/ s/PB2SGZY/.+/ _cat]
+// TESTRESPONSE[s/[*]/[*]/ s/PB2SGZY/.+/ non_json]
 
 Here, we can see our one node named "PB2SGZY", which is the single node that is currently in our cluster.
 
@@ -354,7 +354,7 @@ And the response:
 --------------------------------------------------
 health status index uuid pri rep docs.count docs.deleted store.size pri.store.size
 --------------------------------------------------
-// TESTRESPONSE[_cat]
+// TESTRESPONSE[non_json]
 
 Which simply means we have no indices yet in the cluster.
 
@@ -379,7 +379,7 @@ And the response:
 health status index    uuid                   pri rep docs.count docs.deleted store.size pri.store.size
 yellow open   customer 95SQ4TSUT7mWBT7VNHH67A   1   1          0            0       260b           260b
 --------------------------------------------------
-// TESTRESPONSE[s/95SQ4TSUT7mWBT7VNHH67A/.+/ s/260b/\\d+\\.?\\d?k?b/ _cat]
+// TESTRESPONSE[s/95SQ4TSUT7mWBT7VNHH67A/.+/ s/260b/\\d+\\.?\\d?k?b/ non_json]
 
 The results of the second command tells us that we now have one index named customer and it has one primary shard and one replica (the defaults) and it contains zero documents in it.
 
@@ -471,7 +471,7 @@ And the response:
 --------------------------------------------------
 health status index uuid pri rep docs.count docs.deleted store.size pri.store.size
 --------------------------------------------------
-// TESTRESPONSE[_cat]
+// TESTRESPONSE[non_json]
 
 Which means that the index was deleted successfully and we are now back to where we started with nothing in our cluster.
 
@@ -715,7 +715,7 @@ health status index uuid                   pri rep docs.count docs.deleted store
 yellow open   bank  l7sSYV2cQXmu6_4rJWVIww   5   1       1000            0    128.6kb        128.6kb
 --------------------------------------------------
 // TESTRESPONSE[s/128.6kb/\\d+(\\.\\d+)?[mk]?b/]
-// TESTRESPONSE[s/l7sSYV2cQXmu6_4rJWVIww/.+/ _cat]
+// TESTRESPONSE[s/l7sSYV2cQXmu6_4rJWVIww/.+/ non_json]
 
 Which means that we just successfully bulk indexed 1000 documents into the bank index.
 

+ 2 - 2
docs/reference/sql/endpoints/rest.asciidoc

@@ -30,7 +30,7 @@ Alastair Reynolds|Revelation Space    |585            |2000-03-15T00:00:00.000Z
 James S.A. Corey |Leviathan Wakes     |561            |2011-06-02T00:00:00.000Z
 --------------------------------------------------
 // TESTRESPONSE[s/\|/\\|/ s/\+/\\+/]
-// TESTRESPONSE[_cat]
+// TESTRESPONSE[non_json]
 
 [[sql-kibana-console]]
 .Using Kibana Console
@@ -233,7 +233,7 @@ Which returns:
 Douglas Adams  |The Hitchhiker's Guide to the Galaxy|180            |1979-10-12T00:00:00.000Z
 --------------------------------------------------
 // TESTRESPONSE[s/\|/\\|/ s/\+/\\+/]
-// TESTRESPONSE[_cat]
+// TESTRESPONSE[non_json]
 
 [[sql-rest-columnar]]
 [float]

+ 1 - 1
docs/reference/sql/getting-started.asciidoc

@@ -40,7 +40,7 @@ Dan Simmons    |Hyperion       |482            |1989-05-26T00:00:00.000Z
 Frank Herbert  |Dune           |604            |1965-06-01T00:00:00.000Z
 --------------------------------------------------
 // TESTRESPONSE[s/\|/\\|/ s/\+/\\+/]
-// TESTRESPONSE[_cat]
+// TESTRESPONSE[non_json]
 
 You can also use the <<sql-cli>>. There is a script to start it
 shipped in x-pack's bin directory: