Browse Source

Generate and run tests from the docs

Adds infrastructure so `gradle :docs:check` will extract tests from
snippets in the documentation and execute the tests. This is included
in `gradle check` so it should happen on CI and during a normal build.

By default each `// AUTOSENSE` snippet creates a unique REST test. These
tests are executed in a random order and the cluster is wiped between
each one. If multiple snippets chain together into a test you can annotate
all snippets after the first with `// TEST[continued]` to have the
generated tests for both snippets joined.

Snippets marked as `// TESTRESPONSE` are checked against the response
of the last action.

See docs/README.asciidoc for lots more.

Closes #12583. That issue is about catching bugs in the docs during build.
This catches *some* bugs in the docs during build which is a good start.
Nik Everett 9 years ago
parent
commit
4b1c116461
65 changed files with 1381 additions and 436 deletions
  1. 214 0
      buildSrc/src/main/groovy/org/elasticsearch/gradle/RestTestsFromSnippetsTask.groovy
  2. 299 0
      buildSrc/src/main/groovy/org/elasticsearch/gradle/SnippetsTask.groovy
  3. 1 1
      core/src/main/java/org/elasticsearch/index/mapper/MapperService.java
  4. 44 1
      docs/README.asciidoc
  5. 84 0
      docs/build.gradle
  6. 10 13
      docs/reference/analysis/analyzers/pattern-analyzer.asciidoc
  7. 10 11
      docs/reference/cluster/tasks.asciidoc
  8. 66 33
      docs/reference/docs/index_.asciidoc
  9. 57 26
      docs/reference/docs/reindex.asciidoc
  10. 47 17
      docs/reference/docs/update-by-query.asciidoc
  11. 5 2
      docs/reference/index-modules/allocation/delayed.asciidoc
  12. 5 0
      docs/reference/index-modules/allocation/filtering.asciidoc
  13. 1 0
      docs/reference/index-modules/allocation/prioritization.asciidoc
  14. 1 2
      docs/reference/indices/analyze.asciidoc
  15. 18 14
      docs/reference/indices/flush.asciidoc
  16. 14 8
      docs/reference/indices/put-mapping.asciidoc
  17. 1 1
      docs/reference/indices/templates.asciidoc
  18. 4 3
      docs/reference/ingest.asciidoc
  19. 25 22
      docs/reference/ingest/ingest-node.asciidoc
  20. 1 0
      docs/reference/mapping.asciidoc
  21. 3 2
      docs/reference/mapping/dynamic-mapping.asciidoc
  22. 7 9
      docs/reference/mapping/dynamic/templates.asciidoc
  23. 3 4
      docs/reference/mapping/fields/all-field.asciidoc
  24. 3 13
      docs/reference/mapping/fields/field-names-field.asciidoc
  25. 4 12
      docs/reference/mapping/fields/parent-field.asciidoc
  26. 6 22
      docs/reference/mapping/fields/routing-field.asciidoc
  27. 1 0
      docs/reference/mapping/fields/timestamp-field.asciidoc
  28. 5 24
      docs/reference/mapping/fields/type-field.asciidoc
  29. 0 7
      docs/reference/mapping/params.asciidoc
  30. 7 8
      docs/reference/mapping/params/analyzer.asciidoc
  31. 14 7
      docs/reference/mapping/params/boost.asciidoc
  32. 7 6
      docs/reference/mapping/params/coerce.asciidoc
  33. 3 3
      docs/reference/mapping/params/copy-to.asciidoc
  34. 8 13
      docs/reference/mapping/params/dynamic.asciidoc
  35. 5 8
      docs/reference/mapping/params/fielddata.asciidoc
  36. 6 6
      docs/reference/mapping/params/ignore-malformed.asciidoc
  37. 1 1
      docs/reference/mapping/params/include-in-all.asciidoc
  38. 4 5
      docs/reference/mapping/params/multi-fields.asciidoc
  39. 1 2
      docs/reference/mapping/params/norms.asciidoc
  40. 5 5
      docs/reference/mapping/params/position-increment-gap.asciidoc
  41. 1 2
      docs/reference/mapping/params/properties.asciidoc
  42. 1 3
      docs/reference/mapping/params/search-analyzer.asciidoc
  43. 2 3
      docs/reference/mapping/params/store.asciidoc
  44. 1 2
      docs/reference/mapping/types/date.asciidoc
  45. 2 1
      docs/reference/mapping/types/nested.asciidoc
  46. 1 2
      docs/reference/migration/migrate_5_0/mapping.asciidoc
  47. 2 2
      docs/reference/modules/cluster/allocation_filtering.asciidoc
  48. 1 2
      docs/reference/modules/cluster/disk_allocator.asciidoc
  49. 1 1
      docs/reference/modules/node.asciidoc
  50. 18 19
      docs/reference/modules/scripting/painless.asciidoc
  51. 14 13
      docs/reference/modules/scripting/using.asciidoc
  52. 30 30
      docs/reference/query-dsl/bool-query.asciidoc
  53. 1 2
      docs/reference/query-dsl/function-score-query.asciidoc
  54. 7 4
      docs/reference/query-dsl/term-query.asciidoc
  55. 4 4
      docs/reference/setup/cluster_restart.asciidoc
  56. 4 4
      docs/reference/setup/rolling_upgrade.asciidoc
  57. 53 0
      docs/src/test/java/org/elasticsearch/smoketest/SmokeTestDocsIT.java
  58. 1 0
      settings.gradle
  59. 1 1
      test/framework/src/main/java/org/elasticsearch/test/rest/RestTestExecutionContext.java
  60. 19 8
      test/framework/src/main/java/org/elasticsearch/test/rest/Stash.java
  61. 16 0
      test/framework/src/main/java/org/elasticsearch/test/rest/client/RestClient.java
  62. 3 3
      test/framework/src/main/java/org/elasticsearch/test/rest/json/JsonPath.java
  63. 16 12
      test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSectionParser.java
  64. 15 7
      test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSuiteParseContext.java
  65. 167 0
      test/framework/src/main/java/org/elasticsearch/test/rest/section/ResponseBodyAssertion.java

+ 214 - 0
buildSrc/src/main/groovy/org/elasticsearch/gradle/RestTestsFromSnippetsTask.groovy

@@ -0,0 +1,214 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gradle
+
+import org.elasticsearch.gradle.SnippetsTask.Snippet
+import org.gradle.api.InvalidUserDataException
+import org.gradle.api.tasks.Input
+import org.gradle.api.tasks.OutputDirectory
+
+import java.nio.file.Files
+import java.nio.file.Path
+import java.util.regex.Matcher
+
+/**
+ * Generates REST tests for each snippet marked // TEST.
+ */
+public class RestTestsFromSnippetsTask extends SnippetsTask {
+    @Input
+    Map<String, String> setups = new HashMap()
+
+    /**
+     * Root directory of the tests being generated. To make rest tests happy
+     * we generate them in a testRoot() which is contained in this directory.
+     */
+    @OutputDirectory
+    File testRoot = project.file('build/rest')
+
+    public RestTestsFromSnippetsTask() {
+        project.afterEvaluate {
+            // Wait to set this so testRoot can be customized
+            project.sourceSets.test.output.dir(testRoot, builtBy: this)
+        }
+        TestBuilder builder = new TestBuilder()
+        doFirst { outputRoot().delete() }
+        perSnippet builder.&handleSnippet
+        doLast builder.&finishLastTest
+    }
+
+    /**
+     * Root directory containing all the files generated by this task. It is
+     * contained withing testRoot.
+     */
+    File outputRoot() {
+        return new File(testRoot, '/rest-api-spec/test')
+    }
+
+    private class TestBuilder {
+        private static final String SYNTAX = {
+            String method = /(?<method>GET|PUT|POST|HEAD|OPTIONS|DELETE)/
+            String pathAndQuery = /(?<pathAndQuery>[^\n]+)/
+            String badBody = /GET|PUT|POST|HEAD|OPTIONS|DELETE|#/
+            String body = /(?<body>(?:\n(?!$badBody)[^\n]+)+)/
+            String nonComment = /$method\s+$pathAndQuery$body?/
+            String comment = /(?<comment>#.+)/
+            /(?:$comment|$nonComment)\n+/
+        }()
+
+        /**
+         * The file in which we saw the last snippet that made a test.
+         */
+        Path lastDocsPath
+
+        /**
+         * The file we're building.
+         */
+        PrintWriter current
+
+        /**
+         * Called each time a snippet is encountered. Tracks the snippets and
+         * calls buildTest to actually build the test.
+         */
+        void handleSnippet(Snippet snippet) {
+            if (snippet.testSetup) {
+                setup(snippet)
+                return
+            }
+            if (snippet.testResponse) {
+                response(snippet)
+                return
+            }
+            if (snippet.test || snippet.autoSense) {
+                test(snippet)
+                return
+            }
+            // Must be an unmarked snippet....
+        }
+
+        private void test(Snippet test) {
+            setupCurrent(test)
+
+            if (false == test.continued) {
+                current.println('---')
+                current.println("\"$test.start\":")
+            }
+            if (test.skipTest) {
+                current.println("  - skip:")
+                current.println("      features: always_skip")
+                current.println("      reason: $test.skipTest")
+            }
+            if (test.setup != null) {
+                String setup = setups[test.setup]
+                if (setup == null) {
+                    throw new InvalidUserDataException("Couldn't find setup "
+                        + "for $test")
+                }
+                current.println(setup)
+            }
+
+            body(test)
+        }
+
+        private void response(Snippet response) {
+            current.println("  - response_body: |")
+            response.contents.eachLine { current.println("      $it") }
+        }
+
+        void emitDo(String method, String pathAndQuery,
+                String body, String catchPart) {
+            def (String path, String query) = pathAndQuery.tokenize('?')
+            current.println("  - do:")
+            if (catchPart != null) {
+                current.println("      catch: $catchPart")
+            }
+            current.println("      raw:")
+            current.println("        method: $method")
+            current.println("        path: \"$path\"")
+            if (query != null) {
+                for (String param: query.tokenize('&')) {
+                    def (String name, String value) = param.tokenize('=')
+                    current.println("        $name: \"$value\"")
+                }
+            }
+            if (body != null) {
+                // Throw out the leading newline we get from parsing the body
+                body = body.substring(1)
+                current.println("        body: |")
+                body.eachLine { current.println("          $it") }
+            }
+        }
+
+        private void setup(Snippet setup) {
+            if (lastDocsPath == setup.path) {
+                throw new InvalidUserDataException("$setup: wasn't first")
+            }
+            setupCurrent(setup)
+            current.println('---')
+            current.println("setup:")
+            body(setup)
+        }
+
+        private void body(Snippet snippet) {
+            parse("$snippet", snippet.contents, SYNTAX) { matcher, last ->
+                if (matcher.group("comment") != null) {
+                    // Comment
+                    return
+                }
+                String method = matcher.group("method")
+                String pathAndQuery = matcher.group("pathAndQuery")
+                String body = matcher.group("body")
+                String catchPart = last ? snippet.catchPart : null
+                if (pathAndQuery.startsWith('/')) {
+                    // Why not do some light linting while we're here?
+                    throw new InvalidUserDataException(
+                        "Path shouldn't start with a '/': $snippet\n"
+                        + snippet.contents)
+                }
+                emitDo(method, pathAndQuery, body, catchPart)
+            }
+        }
+
+        private PrintWriter setupCurrent(Snippet test) {
+            if (lastDocsPath == test.path) {
+                return
+            }
+            finishLastTest()
+            lastDocsPath = test.path
+
+            // Make the destination file:
+            // Shift the path into the destination directory tree
+            Path dest = outputRoot().toPath().resolve(test.path)
+            // Replace the extension
+            String fileName = dest.getName(dest.nameCount - 1)
+            dest = dest.parent.resolve(fileName.replace('.asciidoc', '.yaml'))
+
+            // Now setup the writer
+            Files.createDirectories(dest.parent)
+            current = dest.newPrintWriter()
+        }
+
+        void finishLastTest() {
+            if (current != null) {
+                current.close()
+                current = null
+            }
+        }
+    }
+}

+ 299 - 0
buildSrc/src/main/groovy/org/elasticsearch/gradle/SnippetsTask.groovy

@@ -0,0 +1,299 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gradle
+
+import org.gradle.api.DefaultTask
+import org.gradle.api.InvalidUserDataException
+import org.gradle.api.file.ConfigurableFileTree
+import org.gradle.api.tasks.InputFiles
+import org.gradle.api.tasks.TaskAction
+
+import java.nio.file.Path
+import java.util.regex.Matcher
+
+/**
+ * A task which will run a closure on each snippet in the documentation.
+ */
+public class SnippetsTask extends DefaultTask {
+    private static final String SCHAR = /(?:\\\/|[^\/])/
+    private static final String SUBSTITUTION = /s\/($SCHAR+)\/($SCHAR*)\//
+    private static final String CATCH = /catch:\s*((?:\/[^\/]+\/)|[^ \]]+)/
+    private static final String SKIP = /skip:([^\]]+)/
+    private static final String SETUP = /setup:([^ \]]+)/
+    private static final String TEST_SYNTAX =
+        /(?:$CATCH|$SUBSTITUTION|$SKIP|(continued)|$SETUP) ?/
+
+    /**
+     * Action to take on each snippet. Called with a single parameter, an
+     * instance of Snippet.
+     */
+    Closure perSnippet
+
+    /**
+     * The docs to scan. Defaults to every file in the directory exception the
+     * build.gradle file because that is appropriate for Elasticsearch's docs
+     * directory.
+     */
+    @InputFiles
+    ConfigurableFileTree docs = project.fileTree(project.projectDir) {
+        // No snippets in the build file
+        exclude 'build.gradle'
+    }
+
+    @TaskAction
+    public void executeTask() {
+        /*
+         * Walks each line of each file, building snippets as it encounters
+         * the lines that make up the snippet.
+         */
+        for (File file: docs) {
+            String lastLanguage
+            int lastLanguageLine
+            Snippet snippet = null
+            StringBuilder contents = null
+            List substitutions = null
+            Closure emit = {
+                snippet.contents = contents.toString()
+                contents = null
+                if (substitutions != null) {
+                    substitutions.each { String pattern, String subst ->
+                        /*
+                         * $body is really common but it looks like a
+                         * backreference so we just escape it here to make the
+                         * tests cleaner.
+                         */
+                        subst = subst.replace('$body', '\\$body')
+                        // \n is a new line....
+                        subst = subst.replace('\\n', '\n')
+                        snippet.contents = snippet.contents.replaceAll(
+                            pattern, subst)
+                    }
+                    substitutions = null
+                }
+                perSnippet(snippet)
+                snippet = null
+            }
+            file.eachLine('UTF-8') { String line, int lineNumber ->
+                Matcher matcher
+                if (line ==~ /-{4,}\s*/) { // Four dashes looks like a snippet
+                    if (snippet == null) {
+                        Path path = docs.dir.toPath().relativize(file.toPath())
+                        snippet = new Snippet(path: path, start: lineNumber)
+                        if (lastLanguageLine == lineNumber - 1) {
+                            snippet.language = lastLanguage
+                        }
+                    } else {
+                        snippet.end = lineNumber
+                    }
+                    return
+                }
+                matcher = line =~ /\[source,(\w+)]\s*/
+                if (matcher.matches()) {
+                    lastLanguage = matcher.group(1)
+                    lastLanguageLine = lineNumber
+                    return
+                }
+                if (line ==~ /\/\/ AUTOSENSE\s*/) {
+                    if (snippet == null) {
+                        throw new InvalidUserDataException("AUTOSENSE not " +
+                            "paired with a snippet at $file:$lineNumber")
+                    }
+                    snippet.autoSense = true
+                    return
+                }
+                matcher = line =~ /\/\/ TEST(\[(.+)\])?\s*/
+                if (matcher.matches()) {
+                    if (snippet == null) {
+                        throw new InvalidUserDataException("TEST not " +
+                            "paired with a snippet at $file:$lineNumber")
+                    }
+                    snippet.test = true
+                    if (matcher.group(2) != null) {
+                        String loc = "$file:$lineNumber"
+                        parse(loc, matcher.group(2), TEST_SYNTAX) {
+                            if (it.group(1) != null) {
+                                snippet.catchPart = it.group(1)
+                                return
+                            }
+                            if (it.group(2) != null) {
+                                if (substitutions == null) {
+                                    substitutions = []
+                                }
+                                substitutions.add([it.group(2), it.group(3)])
+                                return
+                            }
+                            if (it.group(4) != null) {
+                                snippet.skipTest = it.group(4)
+                                return
+                            }
+                            if (it.group(5) != null) {
+                                snippet.continued = true
+                                return
+                            }
+                            if (it.group(6) != null) {
+                                snippet.setup = it.group(6)
+                                return
+                            }
+                            throw new InvalidUserDataException(
+                                    "Invalid test marker: $line")
+                        }
+                    }
+                    return
+                }
+                matcher = line =~ /\/\/ TESTRESPONSE(\[(.+)\])?\s*/
+                if (matcher.matches()) {
+                    if (snippet == null) {
+                        throw new InvalidUserDataException("TESTRESPONSE not " +
+                            "paired with a snippet at $file:$lineNumber")
+                    }
+                    snippet.testResponse = true
+                    if (matcher.group(2) != null) {
+                        substitutions = []
+                        String loc = "$file:$lineNumber"
+                        parse(loc, matcher.group(2), /$SUBSTITUTION ?/) {
+                            substitutions.add([it.group(1), it.group(2)])
+                        }
+                    }
+                    return
+                }
+                if (line ==~ /\/\/ TESTSETUP\s*/) {
+                    snippet.testSetup = true
+                    return
+                }
+                if (snippet == null) {
+                    // Outside
+                    return
+                }
+                if (snippet.end == Snippet.NOT_FINISHED) {
+                    // Inside
+                    if (contents == null) {
+                        contents = new StringBuilder()
+                    }
+                    // We don't need the annotations
+                    line = line.replaceAll(/<\d+>/, '')
+                    // Nor any trailing spaces
+                    line = line.replaceAll(/\s+$/, '')
+                    contents.append(line).append('\n')
+                    return
+                }
+                // Just finished
+                emit()
+            }
+            if (snippet != null) emit()
+        }
+    }
+
+    static class Snippet {
+        static final int NOT_FINISHED = -1
+
+        /**
+         * Path to the file containing this snippet. Relative to docs.dir of the
+         * SnippetsTask that created it.
+         */
+        Path path
+        int start
+        int end = NOT_FINISHED
+        String contents
+
+        boolean autoSense = false
+        boolean test = false
+        boolean testResponse = false
+        boolean testSetup = false
+        String skipTest = null
+        boolean continued = false
+        String language = null
+        String catchPart = null
+        String setup = null
+
+        @Override
+        public String toString() {
+            String result = "$path[$start:$end]"
+            if (language != null) {
+                result += "($language)"
+            }
+            if (autoSense) {
+                result += '// AUTOSENSE'
+            }
+            if (test) {
+                result += '// TEST'
+                if (catchPart) {
+                    result += "[catch: $catchPart]"
+                }
+                if (skipTest) {
+                    result += "[skip=$skipTest]"
+                }
+                if (continued) {
+                    result += '[continued]'
+                }
+                if (setup) {
+                    result += "[setup:$setup]"
+                }
+            }
+            if (testResponse) {
+                result += '// TESTRESPONSE'
+            }
+            if (testSetup) {
+                result += '// TESTSETUP'
+            }
+            return result
+        }
+    }
+
+    /**
+     * Repeatedly match the pattern to the string, calling the closure with the
+     * matchers each time there is a match. If there are characters that don't
+     * match then blow up. If the closure takes two parameters then the second
+     * one is "is this the last match?".
+     */
+    protected parse(String location, String s, String pattern, Closure c) {
+        if (s == null) {
+            return // Silly null, only real stuff gets to match!
+        }
+        Matcher m = s =~ pattern
+        int offset = 0
+        Closure extraContent = { message ->
+            StringBuilder cutOut = new StringBuilder()
+            cutOut.append(s[offset - 6..offset - 1])
+            cutOut.append('*')
+            cutOut.append(s[offset..Math.min(offset + 5, s.length() - 1)])
+            String cutOutNoNl = cutOut.toString().replace('\n', '\\n')
+            throw new InvalidUserDataException("$location: Extra content "
+                + "$message ('$cutOutNoNl') matching [$pattern]: $s")
+        }
+        while (m.find()) {
+            if (m.start() != offset) {
+                extraContent("between [$offset] and [${m.start()}]")
+            }
+            offset = m.end()
+            if (c.maximumNumberOfParameters == 1) {
+                c(m)
+            } else {
+                c(m, offset == s.length())
+            }
+        }
+        if (offset == 0) {
+            throw new InvalidUserDataException("$location: Didn't match "
+                + "$pattern: $s")
+        }
+        if (offset != s.length()) {
+            extraContent("after [$offset]")
+        }
+    }
+}

+ 1 - 1
core/src/main/java/org/elasticsearch/index/mapper/MapperService.java

@@ -92,7 +92,7 @@ public class MapperService extends AbstractIndexComponent {
             Setting.longSetting("index.mapping.depth.limit", 20L, 1, Property.Dynamic, Property.IndexScope);
     public static final boolean INDEX_MAPPER_DYNAMIC_DEFAULT = true;
     public static final Setting<Boolean> INDEX_MAPPER_DYNAMIC_SETTING =
-        Setting.boolSetting("index.mapper.dynamic", INDEX_MAPPER_DYNAMIC_DEFAULT, Property.IndexScope);
+        Setting.boolSetting("index.mapper.dynamic", INDEX_MAPPER_DYNAMIC_DEFAULT, Property.Dynamic, Property.IndexScope);
     private static ObjectHashSet<String> META_FIELDS = ObjectHashSet.from(
             "_uid", "_id", "_type", "_all", "_parent", "_routing", "_index",
             "_size", "_timestamp", "_ttl"

+ 44 - 1
docs/README.asciidoc

@@ -1,4 +1,47 @@
-The Elasticsearch docs are in AsciiDoc format and can be built using the Elasticsearch documentation build process
+The Elasticsearch docs are in AsciiDoc format and can be built using the
+Elasticsearch documentation build process.
 
 See: https://github.com/elastic/docs
 
+Snippets marked with `// AUTOSENSE` are automatically annotated with "VIEW IN
+SENSE" in the documentation and are automatically tested by the command
+`gradle :docs:check`. By default `// AUTOSENSE` snippet runs as its own isolated
+test. You can manipulate the test execution in the following ways:
+
+* `// TEST`: Explicitly marks a snippet as a test. Snippets marked this way
+are tests even if they don't have `// AUTOSENSE`.
+  * `// TEST[s/foo/bar/]`: Replace `foo` with `bar` in the test. This should be
+  used sparingly because it makes the test "lie". Sometimes, though, you can use
+  it to make the tests more clear.
+  * `// TEST[catch:foo]`: Used to expect errors in the requests. Replace `foo`
+  with `request` to expect a 400 error, for example. If the snippet contains
+  multiple requests then only the last request will expect the error.
+  * `// TEST[continued]`: Continue the test started in the last snippet. Between
+  tests the nodes are cleaned: indexes are removed, etc. This will prevent that.
+  This is really useful when you have text and snippets that work together to
+  tell the story of some use case because it merges the snippets (and thus the
+  use case) into one big test.
+  * `// TEST[skip:reason]`: Skip this test. Replace `reason` with the actual
+  reason to skip the test. Snippets without `// TEST` or `// AUTOSENSE` aren't
+  considered tests anyway but this is useful for explicitly documenting the
+  reason why the test shouldn't be run.
+  * `// TEST[setup:name]`: Run some setup code before running the snippet. This
+  is useful for creating and populating indexes used in the snippet. The setup
+  code is defined in `docs/build.gradle`.
+* `// TESTRESPONSE`: Matches this snippet against the body of the response of
+  the last test. If the response is JSON then order is ignored. With
+  `// TEST[continued]` you can make tests that contain multiple command snippets
+  and multiple response snippets.
+  * `// TESTRESPONSE[s/foo/bar/]`: Substitutions. See `// TEST[s/foo/bar]`.
+* `// TESTSETUP`: Marks this snippet as the "setup" for all other snippets in
+  this file. This is a somewhat natural way of structuring documentation. You
+  say "this is the data we use to explain this feature" then you add the
+  snippet that you mark `// TESTSETUP` and then every snippet will turn into
+  a test that runs the setup snippet first. See the "painless" docs for a file
+  that puts this to good use. This is fairly similar to `// TEST[setup:name]`
+  but rather than the setup defined in `docs/build.gradle` the setup is defined
+  right in the documentation file.
+
+Any place you can use json you can use elements like `$body.path.to.thing`
+which is replaced on the fly with the contents of the thing at `path.to.thing`
+in the last response.

+ 84 - 0
docs/build.gradle

@@ -0,0 +1,84 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import org.elasticsearch.gradle.SnippetsTask
+import org.elasticsearch.gradle.SnippetsTask.Snippet
+import org.elasticsearch.gradle.RestTestsFromSnippetsTask
+
+apply plugin: 'elasticsearch.rest-test'
+
+task listSnippets(type: SnippetsTask) {
+  group 'Docs'
+  description 'List each snippet'
+  perSnippet { println(it) }
+}
+
+task listAutoSenseCandidates(type: SnippetsTask) {
+  group 'Docs'
+  description 'List snippets that probably should be marked // AUTOSENSE'
+  perSnippet {
+    if (
+             it.autoSense    // Already marked, nothing to do
+          || it.testResponse // Only commands are autosense
+        ) {
+      return
+    }
+    List<String> languages = [
+      'js', 'json', // These languages should almost always be marked autosense
+      'sh', 'shell', // These are often curl commands that should be converted
+    ]
+    if (false == languages.contains(it.language)) {
+      return
+    }
+    println(it)
+  }
+}
+
+task buildRestTests(type: RestTestsFromSnippetsTask) {
+  docs = fileTree(project.projectDir) {
+    // No snippets in here!
+    exclude 'build.gradle'
+    // Remove plugins because they aren't installed during this test. Yet?
+    exclude 'plugins'
+    // This file simply doesn't pass yet. We should figure out how to fix it.
+    exclude 'reference/modules/snapshots.asciidoc'
+  }
+  Closure setupTwitter = { String name, int count ->
+    setups[name] = '''
+  - do:
+      bulk:
+        index: twitter
+        type: tweet
+        refresh: true
+        body: |'''
+    for (int i = 0; i < count; i++) {
+      setups[name] += """
+          {"index":{}}
+          {"msg": "some message with the number $i", "date": $i}"""
+    }
+  }
+  setupTwitter('twitter', 5)
+  setupTwitter('big_twitter', 120)
+}
+
+integTest {
+  cluster {
+    setting 'script.inline', 'true'
+  }
+}

+ 10 - 13
docs/reference/analysis/analyzers/pattern-analyzer.asciidoc

@@ -33,9 +33,7 @@ before running each example.
 
 [source,js]
 --------------------------------------------------
-DELETE test
-
-PUT /test
+PUT test
 {
   "settings": {
     "analysis": {
@@ -49,8 +47,9 @@ PUT /test
   }
 }
 
-GET /test/_analyze?analyzer=whitespace&text=foo,bar baz
+GET _cluster/health?wait_for_status=yellow
 
+GET test/_analyze?analyzer=whitespace&text=foo,bar baz
 # "foo,bar", "baz"
 --------------------------------------------------
 // AUTOSENSE
@@ -60,9 +59,7 @@ GET /test/_analyze?analyzer=whitespace&text=foo,bar baz
 
 [source,js]
 --------------------------------------------------
-DELETE test
-
-PUT /test
+PUT test
 {
   "settings": {
     "analysis": {
@@ -76,10 +73,12 @@ PUT /test
   }
 }
 
-GET /test/_analyze?analyzer=nonword&text=foo,bar baz
+GET _cluster/health?wait_for_status=yellow
+
+GET test/_analyze?analyzer=nonword&text=foo,bar baz
 # "foo,bar baz" becomes "foo", "bar", "baz"
 
-GET /test/_analyze?analyzer=nonword&text=type_1-type_4
+GET test/_analyze?analyzer=nonword&text=type_1-type_4
 # "type_1","type_4"
 --------------------------------------------------
 // AUTOSENSE
@@ -90,9 +89,7 @@ GET /test/_analyze?analyzer=nonword&text=type_1-type_4
 
 [source,js]
 --------------------------------------------------
-DELETE test
-
-PUT /test?pretty=1
+PUT test?pretty=1
 {
   "settings": {
     "analysis": {
@@ -106,7 +103,7 @@ PUT /test?pretty=1
   }
 }
 
-GET /test/_analyze?analyzer=camel&text=MooseX::FTPClass2_beta
+GET test/_analyze?analyzer=camel&text=MooseX::FTPClass2_beta
 # "moose","x","ftp","class","2","beta"
 --------------------------------------------------
 // AUTOSENSE

+ 10 - 11
docs/reference/cluster/tasks.asciidoc

@@ -11,9 +11,9 @@ executing on one or more nodes in the cluster.
 
 [source,js]
 --------------------------------------------------
-GET /_tasks <1>
-GET /_tasks?nodes=nodeId1,nodeId2 <2>
-GET /_tasks?nodes=nodeId1,nodeId2&actions=cluster:* <3>
+GET _tasks <1>
+GET _tasks?nodes=nodeId1,nodeId2 <2>
+GET _tasks?nodes=nodeId1,nodeId2&actions=cluster:* <3>
 --------------------------------------------------
 // AUTOSENSE
 
@@ -56,7 +56,6 @@ The result will look similar to the following:
     }
   }
 }
-
 --------------------------------------------------
 
 It is also possible to retrieve information for a particular task, or all children of a particular
@@ -64,8 +63,8 @@ tasks using the following two commands:
 
 [source,js]
 --------------------------------------------------
-GET /_tasks/taskId1
-GET /_tasks?parent_task_id=parentTaskId1
+GET _tasks/taskId:1
+GET _tasks?parent_task_id=parentTaskId:1
 --------------------------------------------------
 // AUTOSENSE
 
@@ -74,7 +73,7 @@ block for 10 seconds or until the task with id `oTUltX4IQMOUUVeiohTt8A:12345` is
 
 [source,js]
 --------------------------------------------------
-GET /_tasks/oTUltX4IQMOUUVeiohTt8A:12345?wait_for_completion=true&timeout=10s
+GET _tasks/oTUltX4IQMOUUVeiohTt8A:12345?wait_for_completion=true&timeout=10s
 --------------------------------------------------
 // AUTOSENSE
 
@@ -83,7 +82,7 @@ as the standard list tasks command.
 
 [source,js]
 --------------------------------------------------
-GET /_cat/tasks
+GET _cat/tasks
 --------------------------------------------------
 // AUTOSENSE
 
@@ -94,7 +93,7 @@ If a long-running task supports cancellation, it can be cancelled by the followi
 
 [source,js]
 --------------------------------------------------
-POST /_tasks/taskId1/_cancel
+POST _tasks/taskId:1/_cancel
 --------------------------------------------------
 // AUTOSENSE
 
@@ -104,7 +103,7 @@ nodes `nodeId1` and `nodeId2`.
 
 [source,js]
 --------------------------------------------------
-POST /_tasks/_cancel?node_id=nodeId1,nodeId2&actions=*reindex
+POST _tasks/_cancel?node_id=nodeId1,nodeId2&actions=*reindex
 --------------------------------------------------
 // AUTOSENSE
 
@@ -117,6 +116,6 @@ The following command will change the grouping to parent tasks:
 
 [source,js]
 --------------------------------------------------
-GET /_tasks?group_by=parents
+GET _tasks?group_by=parents
 --------------------------------------------------
 // AUTOSENSE

+ 66 - 33
docs/reference/docs/index_.asciidoc

@@ -7,12 +7,14 @@ into the "twitter" index, under a type called "tweet" with an id of 1:
 
 [source,js]
 --------------------------------------------------
-$ curl -XPUT 'http://localhost:9200/twitter/tweet/1' -d '{
+PUT twitter/tweet/1
+{
     "user" : "kimchy",
     "post_date" : "2009-11-15T14:12:12",
     "message" : "trying out Elasticsearch"
-}'
+}
 --------------------------------------------------
+// AUTOSENSE
 
 The result of the above index operation is:
 
@@ -20,9 +22,9 @@ The result of the above index operation is:
 --------------------------------------------------
 {
     "_shards" : {
-        "total" : 10,
+        "total" : 2,
         "failed" : 0,
-        "successful" : 10
+        "successful" : 2
     },
     "_index" : "twitter",
     "_type" : "tweet",
@@ -31,6 +33,7 @@ The result of the above index operation is:
     "created" : true
 }
 --------------------------------------------------
+// TESTRESPONSE[s/"successful" : 2/"successful" : 1/]
 
 The `_shards` header provides information about the replication process of the index operation.
 
@@ -90,10 +93,13 @@ meantime (when reading in order to update, it is recommended to set
 
 [source,js]
 --------------------------------------------------
-curl -XPUT 'localhost:9200/twitter/tweet/1?version=2' -d '{
+PUT twitter/tweet/1?version=2
+{
     "message" : "elasticsearch now has versioning support, double cool!"
-}'
+}
 --------------------------------------------------
+// AUTOSENSE
+// TEST[catch: conflict]
 
 *NOTE:* versioning is completely real time, and is not affected by the
 near real time aspects of search operations. If no version is provided,
@@ -160,23 +166,27 @@ Here is an example of using the `op_type` parameter:
 
 [source,js]
 --------------------------------------------------
-$ curl -XPUT 'http://localhost:9200/twitter/tweet/1?op_type=create' -d '{
+PUT twitter/tweet/1?op_type=create
+{
     "user" : "kimchy",
     "post_date" : "2009-11-15T14:12:12",
     "message" : "trying out Elasticsearch"
-}'
+}
 --------------------------------------------------
+// AUTOSENSE
 
 Another option to specify `create` is to use the following uri:
 
 [source,js]
 --------------------------------------------------
-$ curl -XPUT 'http://localhost:9200/twitter/tweet/1/_create' -d '{
+PUT twitter/tweet/1/_create
+{
     "user" : "kimchy",
     "post_date" : "2009-11-15T14:12:12",
     "message" : "trying out Elasticsearch"
-}'
+}
 --------------------------------------------------
+// AUTOSENSE
 
 [float]
 === Automatic ID Generation
@@ -188,18 +198,25 @@ will automatically be set to `create`. Here is an example (note the
 
 [source,js]
 --------------------------------------------------
-$ curl -XPOST 'http://localhost:9200/twitter/tweet/' -d '{
+POST twitter/tweet/
+{
     "user" : "kimchy",
     "post_date" : "2009-11-15T14:12:12",
     "message" : "trying out Elasticsearch"
-}'
+}
 --------------------------------------------------
+// AUTOSENSE
 
 The result of the above index operation is:
 
 [source,js]
 --------------------------------------------------
 {
+    "_shards" : {
+        "total" : 2,
+        "failed" : 0,
+        "successful" : 2
+    },
     "_index" : "twitter",
     "_type" : "tweet",
     "_id" : "6a8ca01c-7896-48e9-81cc-9f70661fcb32",
@@ -207,6 +224,7 @@ The result of the above index operation is:
     "created" : true
 }
 --------------------------------------------------
+// TESTRESPONSE[s/6a8ca01c-7896-48e9-81cc-9f70661fcb32/$body._id/ s/"successful" : 2/"successful" : 1/]
 
 [float]
 [[index-routing]]
@@ -219,12 +237,14 @@ on a per-operation basis using the `routing` parameter. For example:
 
 [source,js]
 --------------------------------------------------
-$ curl -XPOST 'http://localhost:9200/twitter/tweet?routing=kimchy' -d '{
+POST twitter/tweet?routing=kimchy
+{
     "user" : "kimchy",
     "post_date" : "2009-11-15T14:12:12",
     "message" : "trying out Elasticsearch"
-}'
+}
 --------------------------------------------------
+// AUTOSENSE
 
 In the example above, the "tweet" document is routed to a shard based on
 the `routing` parameter provided: "kimchy".
@@ -245,10 +265,24 @@ For example:
 
 [source,js]
 --------------------------------------------------
-$ curl -XPUT localhost:9200/blogs/blog_tag/1122?parent=1111 -d '{
+PUT blogs
+{
+  "mappings": {
+    "tag_parent": {},
+    "blog_tag": {
+      "_parent": {
+        "type": "tag_parent"
+      }
+    }
+  }
+}
+
+PUT blogs/blog_tag/1122?parent=1111
+{
     "tag" : "something"
-}'
+}
 --------------------------------------------------
+// AUTOSENSE
 
 When indexing a child document, the routing value is automatically set
 to be the same as its parent, unless the routing value is explicitly
@@ -266,11 +300,13 @@ parameter. For example:
 
 [source,js]
 --------------------------------------------------
-$ curl -XPUT localhost:9200/twitter/tweet/1?timestamp=2009-11-15T14%3A12%3A12 -d '{
+PUT twitter/tweet/1?timestamp=2009-11-15T14:12:12
+{
     "user" : "kimchy",
     "message" : "trying out Elasticsearch"
-}'
+}
 --------------------------------------------------
+// AUTOSENSE
 
 If the `timestamp` value is not provided externally or in the `_source`,
 the `timestamp` will be automatically set to the date the document was
@@ -295,28 +331,23 @@ as shown in the following examples:
 
 [source,js]
 --------------------------------------------------
-curl -XPUT 'http://localhost:9200/twitter/tweet/1?ttl=86400000' -d '{
-    "user": "kimchy",
-    "message": "Trying out elasticsearch, so far so good?"
-}'
---------------------------------------------------
-
-[source,js]
---------------------------------------------------
-curl -XPUT 'http://localhost:9200/twitter/tweet/1?ttl=1d' -d '{
+PUT twitter/tweet/1?ttl=86400000ms
+{
     "user": "kimchy",
     "message": "Trying out elasticsearch, so far so good?"
-}'
+}
 --------------------------------------------------
+// AUTOSENSE
 
 [source,js]
 --------------------------------------------------
-curl -XPUT 'http://localhost:9200/twitter/tweet/1' -d '{
-    "_ttl": "1d",
+PUT twitter/tweet/1?ttl=1d
+{
     "user": "kimchy",
     "message": "Trying out elasticsearch, so far so good?"
-}'
+}
 --------------------------------------------------
+// AUTOSENSE
 
 More information can be found on the
 <<mapping-ttl-field,_ttl mapping page>>.
@@ -392,9 +423,11 @@ to 5 minutes:
 
 [source,js]
 --------------------------------------------------
-$ curl -XPUT 'http://localhost:9200/twitter/tweet/1?timeout=5m' -d '{
+PUT twitter/tweet/1?timeout=5m
+{
     "user" : "kimchy",
     "post_date" : "2009-11-15T14:12:12",
     "message" : "trying out Elasticsearch"
-}'
+}
 --------------------------------------------------
+// AUTOSENSE

+ 57 - 26
docs/reference/docs/reindex.asciidoc

@@ -8,7 +8,7 @@ This will copy documents from the `twitter` index into the `new_twitter` index:
 
 [source,js]
 --------------------------------------------------
-POST /_reindex
+POST _reindex
 {
   "source": {
     "index": "twitter"
@@ -19,20 +19,29 @@ POST /_reindex
 }
 --------------------------------------------------
 // AUTOSENSE
+// TEST[setup:big_twitter]
 
 That will return something like this:
 
 [source,js]
 --------------------------------------------------
 {
-  "took" : 639,
-  "updated": 112,
-  "batches": 130,
+  "took" : 147,
+  "timed_out": false,
+  "created": 120,
+  "updated": 0,
+  "batches": 2,
   "version_conflicts": 0,
-  "failures" : [ ],
-  "created": 12344
+  "noops": 0,
+  "retries": 0,
+  "throttled_millis": 0,
+  "requests_per_second": "unlimited",
+  "throttled_until_millis": 0,
+  "total": 120,
+  "failures" : [ ]
 }
 --------------------------------------------------
+// TESTRESPONSE[s/"took" : 147/"took" : "$body.took"/]
 
 Just like <<docs-update-by-query,`_update_by_query`>>, `_reindex` gets a
 snapshot of the source index but its target must be a **different** index so
@@ -44,7 +53,7 @@ the same type and id:
 
 [source,js]
 --------------------------------------------------
-POST /_reindex
+POST _reindex
 {
   "source": {
     "index": "twitter"
@@ -56,6 +65,7 @@ POST /_reindex
 }
 --------------------------------------------------
 // AUTOSENSE
+// TEST[setup:twitter]
 
 Setting `version_type` to `external` will cause Elasticsearch to preserve the
 `version` from the source, create any documents that are missing, and update
@@ -64,7 +74,7 @@ in the source index:
 
 [source,js]
 --------------------------------------------------
-POST /_reindex
+POST _reindex
 {
   "source": {
     "index": "twitter"
@@ -76,6 +86,7 @@ POST /_reindex
 }
 --------------------------------------------------
 // AUTOSENSE
+// TEST[setup:twitter]
 
 Settings `op_type` to `create` will cause `_reindex` to only create missing
 documents in the target index. All existing documents will cause a version
@@ -83,7 +94,7 @@ conflict:
 
 [source,js]
 --------------------------------------------------
-POST /_reindex
+POST _reindex
 {
   "source": {
     "index": "twitter"
@@ -95,13 +106,14 @@ POST /_reindex
 }
 --------------------------------------------------
 // AUTOSENSE
+// TEST[setup:twitter]
 
 By default version conflicts abort the `_reindex` process but you can just
 count them by settings `"conflicts": "proceed"` in the request body:
 
 [source,js]
 --------------------------------------------------
-POST /_reindex
+POST _reindex
 {
   "conflicts": "proceed",
   "source": {
@@ -114,13 +126,14 @@ POST /_reindex
 }
 --------------------------------------------------
 // AUTOSENSE
+// TEST[setup:twitter]
 
 You can limit the documents by adding a type to the `source` or by adding a
 query. This will only copy ++tweet++&apos;s made by `kimchy` into `new_twitter`:
 
 [source,js]
 --------------------------------------------------
-POST /_reindex
+POST _reindex
 {
   "source": {
     "index": "twitter",
@@ -137,6 +150,7 @@ POST /_reindex
 }
 --------------------------------------------------
 // AUTOSENSE
+// TEST[setup:twitter]
 
 `index` and `type` in `source` can both be lists, allowing you to copy from
 lots of sources in one request. This will copy documents from the `tweet` and
@@ -148,7 +162,7 @@ which document will survive because the iteration order isn't well defined.
 
 [source,js]
 --------------------------------------------------
-POST /_reindex
+POST _reindex
 {
   "source": {
     "index": ["twitter", "blog"],
@@ -160,6 +174,7 @@ POST /_reindex
 }
 --------------------------------------------------
 // AUTOSENSE
+// TEST[s/^/PUT twitter\nPUT blog\n/]
 
 It's also possible to limit the number of processed documents by setting
 `size`. This will only copy a single document from `twitter` to
@@ -167,7 +182,7 @@ It's also possible to limit the number of processed documents by setting
 
 [source,js]
 --------------------------------------------------
-POST /_reindex
+POST _reindex
 {
   "size": 1,
   "source": {
@@ -179,6 +194,7 @@ POST /_reindex
 }
 --------------------------------------------------
 // AUTOSENSE
+// TEST[setup:twitter]
 
 If you want a particular set of documents from the twitter index you'll
 need to sort. Sorting makes the scroll less efficient but in some contexts
@@ -187,7 +203,7 @@ This will copy 10000 documents from `twitter` into `new_twitter`:
 
 [source,js]
 --------------------------------------------------
-POST /_reindex
+POST _reindex
 {
   "size": 10000,
   "source": {
@@ -200,6 +216,7 @@ POST /_reindex
 }
 --------------------------------------------------
 // AUTOSENSE
+// TEST[setup:twitter]
 
 Like `_update_by_query`, `_reindex` supports a script that modifies the
 document. Unlike `_update_by_query`, the script is allowed to modify the
@@ -207,21 +224,22 @@ document's metadata. This example bumps the version of the source document:
 
 [source,js]
 --------------------------------------------------
-POST /_reindex
+POST _reindex
 {
   "source": {
-    "index": "twitter",
+    "index": "twitter"
   },
   "dest": {
     "index": "new_twitter",
     "version_type": "external"
-  }
+  },
   "script": {
-    "internal": "if (ctx._source.foo == 'bar') {ctx._version++; ctx._source.remove('foo')}"
+    "script": "if (ctx._source.foo == 'bar') {ctx._version++; ctx._source.remove('foo')}"
   }
 }
 --------------------------------------------------
 // AUTOSENSE
+// TEST[setup:twitter]
 
 Think of the possibilities! Just be careful! With great power.... You can
 change:
@@ -264,7 +282,7 @@ routing set to `cat`.
 
 [source,js]
 --------------------------------------------------
-POST /_reindex
+POST _reindex
 {
   "source": {
     "index": "source",
@@ -281,13 +299,14 @@ POST /_reindex
 }
 --------------------------------------------------
 // AUTOSENSE
+// TEST[s/^/PUT source\n/]
 
 By default `_reindex` uses scroll batches of 100. You can change the
 batch size with the `size` field in the `source` element:
 
 [source,js]
 --------------------------------------------------
-POST /_reindex
+POST _reindex
 {
   "source": {
     "index": "source",
@@ -300,13 +319,14 @@ POST /_reindex
 }
 --------------------------------------------------
 // AUTOSENSE
+// TEST[s/^/PUT source\n/]
 
 Reindex can also use the <<ingest>> feature by specifying a
 `pipeline` like this:
 
 [source,js]
 --------------------------------------------------
-POST /_reindex
+POST _reindex
 {
   "source": {
     "index": "source"
@@ -318,6 +338,7 @@ POST /_reindex
 }
 --------------------------------------------------
 // AUTOSENSE
+// TEST[s/^/PUT source\n/]
 
 [float]
 === URL Parameters
@@ -414,7 +435,7 @@ While Reindex is running you can fetch their status using the
 
 [source,js]
 --------------------------------------------------
-GET /_tasks/?pretty&detailed=true&actions=*reindex
+GET _tasks/?pretty&detailed=true&actions=*reindex
 --------------------------------------------------
 // AUTOSENSE
 
@@ -473,7 +494,7 @@ Any Reindex can be canceled using the <<tasks,Task Cancel API>>:
 
 [source,js]
 --------------------------------------------------
-POST /_tasks/{task_id}/_cancel
+POST _tasks/taskid:1/_cancel
 --------------------------------------------------
 // AUTOSENSE
 
@@ -492,7 +513,7 @@ the `_rethrottle` API:
 
 [source,js]
 --------------------------------------------------
-POST /_reindex/{task_id}/_rethrottle?requests_per_second=unlimited
+POST _reindex/taskid:1/_rethrottle?requests_per_second=unlimited
 --------------------------------------------------
 // AUTOSENSE
 
@@ -540,6 +561,7 @@ POST _reindex?pretty
 }
 --------------------------------------------------
 // AUTOSENSE
+// TEST[continued]
 
 Now you can get the new document:
 
@@ -548,15 +570,24 @@ Now you can get the new document:
 GET test2/test/1?pretty
 --------------------------------------------------
 // AUTOSENSE
+// TEST[continued]
 
 and it'll look like:
 
 [source,js]
 --------------------------------------------------
 {
-  "text": "words words",
-  "tag": "foo"
+  "found": true,
+  "_id": "1",
+  "_index": "test2",
+  "_type": "test",
+  "_version": 1,
+  "_source": {
+    "text": "words words",
+    "tag": "foo"
+  }
 }
 --------------------------------------------------
+// TESTRESPONSE
 
 Or you can search by `tag` or whatever you want.

+ 47 - 17
docs/reference/docs/update-by-query.asciidoc

@@ -10,22 +10,31 @@ mapping change. Here is the API:
 
 [source,js]
 --------------------------------------------------
-POST /twitter/_update_by_query?conflicts=proceed
+POST twitter/_update_by_query?conflicts=proceed
 --------------------------------------------------
 // AUTOSENSE
+// TEST[setup:big_twitter]
 
 That will return something like this:
 
 [source,js]
 --------------------------------------------------
 {
-  "took" : 639,
-  "updated": 1235,
-  "batches": 13,
-  "version_conflicts": 2,
+  "took" : 147,
+  "timed_out": false,
+  "updated": 120,
+  "batches": 2,
+  "version_conflicts": 0,
+  "noops": 0,
+  "retries": 0,
+  "throttled_millis": 0,
+  "requests_per_second": "unlimited",
+  "throttled_until_millis": 0,
+  "total": 120,
   "failures" : [ ]
 }
 --------------------------------------------------
+// TESTRESPONSE[s/"took" : 147/"took" : "$body.took"/]
 
 `_update_by_query` gets a snapshot of the index when it starts and indexes what
 it finds using `internal` versioning. That means that you'll get a version
@@ -53,9 +62,10 @@ will only update `tweet`s from the `twitter` index:
 
 [source,js]
 --------------------------------------------------
-POST /twitter/tweet/_update_by_query?conflicts=proceed
+POST twitter/tweet/_update_by_query?conflicts=proceed
 --------------------------------------------------
 // AUTOSENSE
+// TEST[setup:twitter]
 
 You can also limit `_update_by_query` using the
 <<query-dsl,Query DSL>>. This will update all documents from the
@@ -63,7 +73,7 @@ You can also limit `_update_by_query` using the
 
 [source,js]
 --------------------------------------------------
-POST /twitter/_update_by_query?conflicts=proceed
+POST twitter/_update_by_query?conflicts=proceed
 {
   "query": { <1>
     "term": {
@@ -73,6 +83,7 @@ POST /twitter/_update_by_query?conflicts=proceed
 }
 --------------------------------------------------
 // AUTOSENSE
+// TEST[setup:twitter]
 
 <1> The query must be passed as a value to the `query` key, in the same
 way as the <<search-search,Search API>>. You can also use the `q`
@@ -86,7 +97,7 @@ will increment the `likes` field on all of kimchy's tweets:
 
 [source,js]
 --------------------------------------------------
-POST /twitter/_update_by_query
+POST twitter/_update_by_query
 {
   "script": {
     "inline": "ctx._source.likes++"
@@ -99,6 +110,7 @@ POST /twitter/_update_by_query
 }
 --------------------------------------------------
 // AUTOSENSE
+// TEST[setup:twitter]
 
 Just as in <<docs-update,Update API>> you can set `ctx.op = "noop"` if
 your script decides that it doesn't have to make any changes. That will cause
@@ -119,36 +131,50 @@ types at once, just like the search API:
 
 [source,js]
 --------------------------------------------------
-POST /twitter,blog/tweet,post/_update_by_query
+POST twitter,blog/tweet,post/_update_by_query
 --------------------------------------------------
 // AUTOSENSE
+// TEST[s/^/PUT twitter\nPUT blog\n/]
 
 If you provide `routing` then the routing is copied to the scroll query,
 limiting the process to the shards that match that routing value:
 
 [source,js]
 --------------------------------------------------
-POST /twitter/_update_by_query?routing=1
+POST twitter/_update_by_query?routing=1
 --------------------------------------------------
 // AUTOSENSE
+// TEST[setup:twitter]
 
 By default `_update_by_query` uses scroll batches of 100. You can change the
 batch size with the `scroll_size` URL parameter:
 
 [source,js]
 --------------------------------------------------
-POST /twitter/_update_by_query?scroll_size=1000
+POST twitter/_update_by_query?scroll_size=1000
 --------------------------------------------------
 // AUTOSENSE
+// TEST[setup:twitter]
 
 `_update_by_query` can also use the <<ingest>> feature by
 specifying a `pipeline` like this:
 
 [source,js]
 --------------------------------------------------
-POST /twitter/_update_by_query?pipeline=some_ingest_pipeline
+PUT _ingest/pipeline/set-foo
+{
+  "description" : "sets foo",
+  "processors" : [ {
+      "set" : {
+        "field": "foo",
+        "value": "bar"
+      }
+  } ]
+}
+POST twitter/_update_by_query?pipeline=set-foo
 --------------------------------------------------
 // AUTOSENSE
+// TEST[setup:twitter]
 
 [float]
 === URL Parameters
@@ -240,7 +266,7 @@ While Update By Query is running you can fetch their status using the
 
 [source,js]
 --------------------------------------------------
-POST /_tasks/?pretty&detailed=true&action=*byquery
+GET _tasks/?pretty&detailed=true&action=*byquery
 --------------------------------------------------
 // AUTOSENSE
 
@@ -299,7 +325,7 @@ Any Update By Query can be canceled using the <<tasks,Task Cancel API>>:
 
 [source,js]
 --------------------------------------------------
-POST /_tasks/{task_id}/_cancel
+POST _tasks/taskid:1/_cancel
 --------------------------------------------------
 // AUTOSENSE
 
@@ -318,7 +344,7 @@ using the `_rethrottle` API:
 
 [source,js]
 --------------------------------------------------
-POST /_update_by_query/{task_id}/_rethrottle?requests_per_second=unlimited
+POST _update_by_query/taskid:1/_rethrottle?requests_per_second=unlimited
 --------------------------------------------------
 // AUTOSENSE
 
@@ -356,12 +382,12 @@ POST test/test?refresh
 {
   "text": "words words",
   "flag": "bar"
-}'
+}
 POST test/test?refresh
 {
   "text": "words words",
   "flag": "foo"
-}'
+}
 PUT test/_mapping/test   <2>
 {
   "properties": {
@@ -391,6 +417,7 @@ POST test/_search?filter_path=hits.total
 }
 --------------------------------------------------
 // AUTOSENSE
+// TEST[continued]
 
 [source,js]
 --------------------------------------------------
@@ -400,6 +427,7 @@ POST test/_search?filter_path=hits.total
   }
 }
 --------------------------------------------------
+// TESTRESPONSE
 
 But you can issue an `_update_by_query` request to pick up the new mapping:
 
@@ -416,6 +444,7 @@ POST test/_search?filter_path=hits.total
 }
 --------------------------------------------------
 // AUTOSENSE
+// TEST[continued]
 
 [source,js]
 --------------------------------------------------
@@ -425,5 +454,6 @@ POST test/_search?filter_path=hits.total
   }
 }
 --------------------------------------------------
+// TESTRESPONSE
 
 You can do the exact same thing when adding a field to a multifield.

+ 5 - 2
docs/reference/index-modules/allocation/delayed.asciidoc

@@ -38,7 +38,7 @@ This setting can be updated on a live index (or on all indices):
 
 [source,js]
 ------------------------------
-PUT /_all/_settings
+PUT _all/_settings
 {
   "settings": {
     "index.unassigned.node_left.delayed_timeout": "5m"
@@ -46,6 +46,7 @@ PUT /_all/_settings
 }
 ------------------------------
 // AUTOSENSE
+// TEST[s/^/PUT test\n/]
 
 With delayed allocation enabled, the above scenario changes to look like this:
 
@@ -82,6 +83,7 @@ can be viewed with the <<cluster-health,cluster health API>>:
 ------------------------------
 GET _cluster/health <1>
 ------------------------------
+// AUTOSENSE
 <1> This request will return a `delayed_unassigned_shards` value.
 
 ==== Removing a node permanently
@@ -92,7 +94,7 @@ the missing shards immediately, just update the timeout to zero:
 
 [source,js]
 ------------------------------
-PUT /_all/_settings
+PUT _all/_settings
 {
   "settings": {
     "index.unassigned.node_left.delayed_timeout": "0"
@@ -100,5 +102,6 @@ PUT /_all/_settings
 }
 ------------------------------
 // AUTOSENSE
+// TEST[s/^/PUT test\n/]
 
 You can reset the timeout as soon as the missing shards have started to recover.

+ 5 - 0
docs/reference/index-modules/allocation/filtering.asciidoc

@@ -23,6 +23,7 @@ These metadata attributes can be used with the
 group of nodes.  For instance, we can move the index `test` to either `big` or
 `medium` nodes as follows:
 
+
 [source,js]
 ------------------------
 PUT test/_settings
@@ -31,6 +32,7 @@ PUT test/_settings
 }
 ------------------------
 // AUTOSENSE
+// TEST[s/^/PUT test\n/]
 
 Alternatively, we can move the index `test` away from the `small` nodes with
 an `exclude` rule:
@@ -43,6 +45,7 @@ PUT test/_settings
 }
 ------------------------
 // AUTOSENSE
+// TEST[s/^/PUT test\n/]
 
 Multiple rules can be specified, in which case all conditions must be
 satisfied.  For instance, we could move the index `test` to `big` nodes in
@@ -57,6 +60,7 @@ PUT test/_settings
 }
 ------------------------
 // AUTOSENSE
+// TEST[s/^/PUT test\n/]
 
 NOTE: If some conditions cannot be satisfied then shards will not be moved.
 
@@ -97,3 +101,4 @@ PUT test/_settings
 }
 ------------------------
 // AUTOSENSE
+// TEST[skip:indexes don't assign]

+ 1 - 0
docs/reference/index-modules/allocation/prioritization.asciidoc

@@ -53,3 +53,4 @@ PUT index_4/_settings
 }
 ------------------------------
 // AUTOSENSE
+// TEST[continued]

+ 1 - 2
docs/reference/indices/analyze.asciidoc

@@ -112,7 +112,7 @@ experimental[The format of the additional detail information is experimental and
 
 [source,js]
 --------------------------------------------------
-GET test/_analyze
+GET _analyze
 {
   "tokenizer" : "standard",
   "token_filter" : ["snowball"],
@@ -172,4 +172,3 @@ The request returns the following result:
 }
 --------------------------------------------------
 <1> Output only "keyword" attribute, since specify "attributes" in the request.
-

+ 18 - 14
docs/reference/indices/flush.asciidoc

@@ -10,9 +10,10 @@ trigger flush operations as required in order to clear memory.
 
 [source,js]
 --------------------------------------------------
-POST /twitter/_flush
+POST twitter/_flush
 --------------------------------------------------
 // AUTOSENSE
+// TEST[setup:twitter]
 
 [float]
 [[flush-parameters]]
@@ -23,7 +24,7 @@ The flush API accepts the following request parameters:
 [horizontal]
 `wait_if_ongoing`::  If set to `true` the flush operation will block until the
 flush can be executed if another flush operation is already executing.
-The default is `false` and will cause an exception to be thrown on 
+The default is `false` and will cause an exception to be thrown on
 the shard level if another flush operation is already running.
 
 `force`:: Whether a flush should be forced even if it is not necessarily needed ie.
@@ -40,11 +41,12 @@ or even on `_all` the indices.
 
 [source,js]
 --------------------------------------------------
-POST /kimchy,elasticsearch/_flush
+POST kimchy,elasticsearch/_flush
 
-POST /_flush
+POST _flush
 --------------------------------------------------
 // AUTOSENSE
+// TEST[s/^/PUT kimchy\nPUT elasticsearch\n/]
 
 [[indices-synced-flush]]
 === Synced Flush
@@ -72,9 +74,10 @@ the <<indices-stats,indices stats>> API:
 
 [source,sh]
 --------------------------------------------------
-GET /twitter/_stats/commit?level=shards
+GET twitter/_stats/commit?level=shards
 --------------------------------------------------
 // AUTOSENSE
+// TEST[s/^/PUT twitter\n/]
 
 
 which returns something similar to:
@@ -136,9 +139,10 @@ NOTE: It is harmless to request a synced flush while there is ongoing indexing.
 
 [source,sh]
 --------------------------------------------------
-POST /twitter/_flush/synced
+POST twitter/_flush/synced
 --------------------------------------------------
 // AUTOSENSE
+// TEST[setup:twitter]
 
 The response contains details about how many shards were successfully sync-flushed and information about any failure.
 
@@ -149,18 +153,18 @@ sync-flushed:
 --------------------------------------------------
 {
    "_shards": {
-      "total": 4,
-      "successful": 4,
+      "total": 10,
+      "successful": 10,
       "failed": 0
    },
    "twitter": {
-      "total": 4,
-      "successful": 4,
+      "total": 10,
+      "successful": 10,
       "failed": 0
    }
 }
 --------------------------------------------------
-
+// TESTRESPONSE[s/"successful": 10/"successful": 5/]
 
 Here is what it looks like when one shard group failed due to pending operations:
 
@@ -230,8 +234,8 @@ or even on `_all` the indices.
 
 [source,js]
 --------------------------------------------------
-POST /kimchy,elasticsearch/_flush/synced
+POST kimchy,elasticsearch/_flush/synced
 
-POST /_flush/synced
+POST _flush/synced
 --------------------------------------------------
-// AUTOSENSE
+// AUTOSENSE

+ 14 - 8
docs/reference/indices/put-mapping.asciidoc

@@ -135,9 +135,9 @@ exists in more than one type will throw an exception, unless you specify the
 across all fields with the same name in the same index.
 
 TIP: The only parameters which are exempt from this rule -- they can be set to
-different values on each field -- can be found in  <<field-conflicts>>.
+different values on each field -- can be found in <<field-conflicts>>.
 
-For example:
+For example, this fails:
 
 [source,js]
 -----------------------------------
@@ -173,8 +173,17 @@ PUT my_index/_mapping/type_one <2>
     }
   }
 }
+-----------------------------------
+// AUTOSENSE
+// TEST[catch:request]
+<1> Create an index with two types, both of which contain a `text` field which have the same mapping.
+<2> Trying to update the `search_analyzer` just for `type_one` throws an exception like `"Merge failed with failures..."`.
+
+But this then running this succeeds:
 
-PUT my_index/_mapping/type_one?update_all_types <3>
+[source,js]
+-----------------------------------
+PUT my_index/_mapping/type_one?update_all_types <1>
 {
   "properties": {
     "text": {
@@ -186,8 +195,5 @@ PUT my_index/_mapping/type_one?update_all_types <3>
 }
 -----------------------------------
 // AUTOSENSE
-<1> Create an index with two types, both of which contain a `text` field which have the same mapping.
-<2> Trying to update the `search_analyzer` just for `type_one` throws an exception like `"Merge failed with failures..."`.
-<3> Adding the `update_all_types` parameter updates the `text` field in `type_one` and `type_two`.
-
-
+// TEST[continued]
+<1> Adding the `update_all_types` parameter updates the `text` field in `type_one` and `type_two`.

+ 1 - 1
docs/reference/indices/templates.asciidoc

@@ -13,7 +13,7 @@ For example:
 
 [source,js]
 --------------------------------------------------
-PUT /_template/template_1
+PUT _template/template_1
 {
   "template": "te*",
   "settings": {

+ 4 - 3
docs/reference/ingest.asciidoc

@@ -25,15 +25,16 @@ tell the ingest node which pipeline to use. For example:
 
 [source,js]
 --------------------------------------------------
-PUT /my-index/my-type/my-id?pipeline=my_pipeline_id
+PUT my-index/my-type/my-id?pipeline=my_pipeline_id
 {
-  ...
+  "foo": "bar"
 }
 --------------------------------------------------
 // AUTOSENSE
+// TEST[catch:request]
 
 See <<ingest-apis,Ingest APIs>> for more information about creating, adding, and deleting pipelines.
 
 --
 
-include::ingest/ingest-node.asciidoc[]
+include::ingest/ingest-node.asciidoc[]

+ 25 - 22
docs/reference/ingest/ingest-node.asciidoc

@@ -41,10 +41,11 @@ PUT _ingest/pipeline/my-pipeline-id
   "description" : "describe pipeline",
   "processors" : [
     {
-      "simple" : {
-        // settings
+      "set" : {
+        "field": "foo",
+        "value": "bar"
       }
-    },
+    }
     // other processors
   ]
 }
@@ -64,28 +65,31 @@ The get pipeline API returns pipelines based on ID. This API always returns a lo
 GET _ingest/pipeline/my-pipeline-id
 --------------------------------------------------
 // AUTOSENSE
+// TEST[continued]
 
 Example response:
 
 [source,js]
 --------------------------------------------------
 {
-   "my-pipeline-id": {
-      "_source" : {
-        "description": "describe pipeline",
-        "processors": [
-          {
-            "simple" : {
-              // settings
-            }
-          },
-          // other processors
-        ]
-      },
-      "_version" : 0
-   }
+  "pipelines": [ {
+    "id": "my-pipeline-id",
+    "config": {
+      "description": "describe pipeline",
+      "processors": [
+        {
+          "set" : {
+            "field": "foo",
+            "value": "bar"
+          }
+        }
+        // other processors
+      ]
+    }
+  } ]
 }
 --------------------------------------------------
+// TESTRESPONSE
 
 For each returned pipeline, the source and the version are returned.
 The version is useful for knowing which version of the pipeline the node has.
@@ -101,6 +105,7 @@ The delete pipeline API deletes pipelines by ID.
 DELETE _ingest/pipeline/my-pipeline-id
 --------------------------------------------------
 // AUTOSENSE
+// TEST[continued]
 
 [[simulate-pipeline-api]]
 === Simulate Pipeline API
@@ -674,9 +679,9 @@ Specifying `boolean` will set the field to true if its string value is equal to
 false if its string value is equal to `false` (ignore case), or it will throw an exception otherwise.
 
 Specifying `auto` will attempt to convert the string-valued `field` into the closest non-string type.
-For example, a field whose value is `"true"` will be converted to its respective boolean type: `true`. And 
-a value of `"242.15"` will "automatically" be converted to `242.15` of type `float`. If a provided field cannot 
-be appropriately converted, the Convert Processor will still process successfully and leave the field value as-is. In 
+For example, a field whose value is `"true"` will be converted to its respective boolean type: `true`. And
+a value of `"242.15"` will "automatically" be converted to `242.15` of type `float`. If a provided field cannot
+be appropriately converted, the Convert Processor will still process successfully and leave the field value as-is. In
 such a case, `target_field` will still be updated with the unconverted field value.
 
 [[convert-options]]
@@ -1320,5 +1325,3 @@ Converts a string to its uppercase equivalent.
   }
 }
 --------------------------------------------------
-
-

+ 1 - 0
docs/reference/mapping.asciidoc

@@ -140,6 +140,7 @@ PUT my_index <1>
       }
     },
     "blogpost": { <2>
+      "_all":       { "enabled": false  }, <3>
       "properties": { <4>
         "title":    { "type": "text"  }, <5>
         "body":     { "type": "text"  }, <5>

+ 3 - 2
docs/reference/mapping/dynamic-mapping.asciidoc

@@ -46,12 +46,14 @@ setting to `false`, either by setting the default value in the
 
 [source,js]
 --------------------------------------------------
-PUT /_settings <1>
+PUT data/_settings <1>
 {
   "index.mapper.dynamic":false
 }
 --------------------------------------------------
 // AUTOSENSE
+// TEST[continued]
+
 <1> Disable automatic type creation for all indices.
 
 Regardless of the value of this setting, types can still be added explicitly
@@ -64,4 +66,3 @@ include::dynamic/default-mapping.asciidoc[]
 include::dynamic/field-mapping.asciidoc[]
 
 include::dynamic/templates.asciidoc[]
-

+ 7 - 9
docs/reference/mapping/dynamic/templates.asciidoc

@@ -347,7 +347,7 @@ PUT my_index
 --------------------------------------------------
 
 The sub `keyword` field appears in this template to be consistent with the
-default rules of dynamic mappings. Of course if you do not need them because 
+default rules of dynamic mappings. Of course if you do not need them because
 you don't need to perform exact search or aggregate on this field, you could
 remove it as described in the previous section.
 
@@ -406,14 +406,12 @@ new indices, you could create the following index template:
 --------------------------------------------------
 PUT _template/disable_all_field
 {
-  "disable_all_field": {
-    "order": 0,
-    "template": "*", <1>
-    "mappings": {
-      "_default_": { <2>
-        "_all": { <3>
-          "enabled": false
-        }
+  "order": 0,
+  "template": "*", <1>
+  "mappings": {
+    "_default_": { <2>
+      "_all": { <3>
+        "enabled": false
       }
     }
   }

+ 3 - 4
docs/reference/mapping/fields/all-field.asciidoc

@@ -116,6 +116,7 @@ PUT my_index
 }
 --------------------------------
 // AUTOSENSE
+// TEST[s/\.\.\.//]
 
 <1> The `_all` field in `type_1` is enabled.
 <2> The `_all` field in `type_2` is completely disabled.
@@ -143,7 +144,7 @@ PUT my_index
   },
   "settings": {
     "index.query.default_field": "content" <2>
-  },
+  }
 }
 --------------------------------
 // AUTOSENSE
@@ -337,7 +338,7 @@ GET _search
   "highlight": {
     "fields": {
       "*_name": { <2>
-        "require_field_match": "false"  <3>
+        "require_field_match": false  <3>
       }
     }
   }
@@ -348,5 +349,3 @@ GET _search
 <1> The query inspects the `_all` field to find matching documents.
 <2> Highlighting is performed on the two name fields, which are available from the `_source`.
 <3> The query wasn't run against the name fields, so set `require_field_match` to `false`.
-
-

+ 3 - 13
docs/reference/mapping/fields/field-names-field.asciidoc

@@ -6,8 +6,7 @@ contains any value other than `null`.  This field is used by the
 <<query-dsl-exists-query,`exists`>> query to find documents that
 either have or don't have any non-+null+ value for a particular field.
 
-The value of the `_field_name` field is accessible in queries, aggregations, and
-scripts:
+The value of the `_field_name` field is accessible in queries and scripts:
 
 [source,js]
 --------------------------
@@ -30,17 +29,9 @@ GET my_index/_search
       "_field_names": [ "title" ] <1>
     }
   },
-  "aggs": {
-    "Field names": {
-      "terms": {
-        "field": "_field_names", <2>
-        "size": 10
-      }
-    }
-  },
   "script_fields": {
     "Field names": {
-      "script": "doc['_field_names']" <3>
+      "script": "doc['_field_names']" <2>
     }
   }
 }
@@ -49,5 +40,4 @@ GET my_index/_search
 // AUTOSENSE
 
 <1> Querying on the `_field_names` field (also see the <<query-dsl-exists-query,`exists`>> query)
-<2> Aggregating on the `_field_names` field
-<3> Accessing the `_field_names` field in scripts (inline scripts must be <<enable-dynamic-scripting,enabled>> for this example to work)
+<2> Accessing the `_field_names` field in scripts (inline scripts must be <<enable-dynamic-scripting,enabled>> for this example to work)

+ 4 - 12
docs/reference/mapping/fields/parent-field.asciidoc

@@ -59,8 +59,8 @@ See the <<query-dsl-has-child-query,`has_child`>> and
 the <<search-aggregations-bucket-children-aggregation,`children`>> aggregation,
 and <<parent-child-inner-hits,inner hits>> for more information.
 
-The value of the `_parent` field is accessible in queries, aggregations, scripts,
-and when sorting:
+The value of the `_parent` field is accessible in queries, aggregations,
+and scripts:
 
 [source,js]
 --------------------------
@@ -79,13 +79,6 @@ GET my_index/_search
       }
     }
   },
-  "sort": [
-    {
-      "_parent": { <3>
-        "order": "desc"
-      }
-    }
-  ],
   "script_fields": {
     "parent": {
       "script": "doc['_parent']" <4>
@@ -94,11 +87,11 @@ GET my_index/_search
 }
 --------------------------
 // AUTOSENSE
+// TEST[continued]
 
 <1> Querying on the `_parent` field (also see the <<query-dsl-has-parent-query,`has_parent` query>> and the <<query-dsl-has-child-query,`has_child` query>>)
 <2> Aggregating on the `_parent` field (also see the <<search-aggregations-bucket-children-aggregation,`children`>> aggregation)
-<3> Sorting on the `_parent` field
-<4> Accessing the `_parent` field in scripts (inline scripts must be <<enable-dynamic-scripting,enabled>> for this example to work)
+<3> Accessing the `_parent` field in scripts (inline scripts must be <<enable-dynamic-scripting,enabled>> for this example to work)
 
 
 ==== Parent-child restrictions
@@ -158,4 +151,3 @@ GET _stats/fielddata?human&fields=_parent
 GET _nodes/stats/indices/fielddata?human&fields=_parent
 --------------------------------------------------
 // AUTOSENSE
-

+ 6 - 22
docs/reference/mapping/fields/routing-field.asciidoc

@@ -22,14 +22,14 @@ PUT my_index/my_type/1?routing=user1 <1>
 GET my_index/my_type/1?routing=user1 <2>
 ------------------------------
 // AUTOSENSE
+// TESTSETUP
 
 <1> This document uses `user1` as its routing value, instead of its ID.
 <2> The same `routing` value needs to be provided when
     <<docs-get,getting>>, <<docs-delete,deleting>>, or <<docs-update,updating>>
     the document.
 
-The value of the `_routing` field is accessible in queries, aggregations, scripts,
-and when sorting:
+The value of the `_routing` field is accessible in queries and scripts:
 
 [source,js]
 --------------------------
@@ -40,21 +40,6 @@ GET my_index/_search
       "_routing": [ "user1" ] <1>
     }
   },
-  "aggs": {
-    "Routing values": {
-      "terms": {
-        "field": "_routing", <2>
-        "size": 10
-      }
-    }
-  },
-  "sort": [
-    {
-      "_routing": { <3>
-        "order": "desc"
-      }
-    }
-  ],
   "script_fields": {
     "Routing value": {
       "script": "doc['_routing']" <4>
@@ -65,9 +50,7 @@ GET my_index/_search
 // AUTOSENSE
 
 <1> Querying on the `_routing` field (also see the <<query-dsl-ids-query,`ids` query>>)
-<2> Aggregating on the `_routing` field
-<3> Sorting on the `_routing` field
-<4> Accessing the `_routing` field in scripts (inline scripts must be <<enable-dynamic-scripting,enabled>> for this example to work)
+<2> Accessing the `_routing` field in scripts (inline scripts must be <<enable-dynamic-scripting,enabled>> for this example to work)
 
 
 ==== Searching with custom routing
@@ -104,7 +87,7 @@ custom `routing` value required for all CRUD operations:
 
 [source,js]
 ------------------------------
-PUT my_index
+PUT my_index2
 {
   "mappings": {
     "my_type": {
@@ -115,12 +98,13 @@ PUT my_index
   }
 }
 
-PUT my_index/my_type/1 <2>
+PUT my_index2/my_type/1 <2>
 {
   "text": "No routing value provided"
 }
 ------------------------------
 // AUTOSENSE
+// TEST[catch:request]
 <1> Routing is required for `my_type` documents.
 <2> This index request throws a `routing_missing_exception`.
 

+ 1 - 0
docs/reference/mapping/fields/timestamp-field.asciidoc

@@ -89,6 +89,7 @@ GET my_index/_search
 }
 --------------------------
 // AUTOSENSE
+// TEST[continued]
 
 <1> Querying on the `_timestamp` field
 <2> Aggregating on the `_timestamp` field

+ 5 - 24
docs/reference/mapping/fields/type-field.asciidoc

@@ -1,12 +1,11 @@
-[[mapping-type-field]]
+\[[mapping-type-field]]
 === `_type` field
 
 Each document indexed is associated with a <<mapping-type-field,`_type`>> (see
 <<mapping-type>>) and an <<mapping-id-field,`_id`>>.  The `_type` field is
 indexed in order to make searching by type name fast.
 
-The value of the `_type` field is accessible in queries, aggregations,
-scripts, and when sorting:
+The value of the `_type` field is accessible in queries and scripts:
 
 [source,js]
 --------------------------
@@ -21,31 +20,16 @@ PUT my_index/type_2/2
   "text": "Document with type 2"
 }
 
-GET my_index/_search/type_*
+GET my_index/type_*/_search
 {
   "query": {
     "terms": {
       "_type": [ "type_1", "type_2" ] <1>
     }
   },
-  "aggs": {
-    "types": {
-      "terms": {
-        "field": "_type", <2>
-        "size": 10
-      }
-    }
-  },
-  "sort": [
-    {
-      "_type": { <3>
-        "order": "desc"
-      }
-    }
-  ],
   "script_fields": {
     "type": {
-      "script": "doc['_type']" <4>
+      "script": "doc['_type']" <2>
     }
   }
 }
@@ -54,7 +38,4 @@ GET my_index/_search/type_*
 // AUTOSENSE
 
 <1> Querying on the `_type` field
-<2> Aggregating on the `_type` field
-<3> Sorting on the `_type` field
-<4> Accessing the `_type` field in scripts (inline scripts must be <<enable-dynamic-scripting,enabled>> for this example to work)
-
+<2> Accessing the `_type` field in scripts (inline scripts must be <<enable-dynamic-scripting,enabled>> for this example to work)

+ 0 - 7
docs/reference/mapping/params.asciidoc

@@ -91,10 +91,3 @@ include::params/similarity.asciidoc[]
 include::params/store.asciidoc[]
 
 include::params/term-vector.asciidoc[]
-
-
-[source,js]
---------------------------------------------------
---------------------------------------------------
-// AUTOSENSE
-

+ 7 - 8
docs/reference/mapping/params/analyzer.asciidoc

@@ -80,7 +80,7 @@ GET my_index/_analyze?field=text.english <4>
 [[search-quote-analyzer]]
 ==== `search_quote_analyzer`
 
-The `search_quote_analyzer` setting allows you to specify an analyzer for phrases, this is particularly useful when dealing with disabling 
+The `search_quote_analyzer` setting allows you to specify an analyzer for phrases, this is particularly useful when dealing with disabling
 stop words for phrase queries.
 
 To disable stop words for phrases a field utilising three analyzer settings will be required:
@@ -91,7 +91,7 @@ To disable stop words for phrases a field utilising three analyzer settings will
 
 [source,js]
 --------------------------------------------------
-PUT /my_index
+PUT my_index
 {
    "settings":{
       "analysis":{
@@ -128,7 +128,6 @@ PUT /my_index
                "analyzer":"my_analyzer", <3>
                "search_analyzer":"my_stop_analyzer", <4>
                "search_quote_analyzer":"my_analyzer" <5>
-              }
             }
          }
       }
@@ -162,10 +161,10 @@ GET my_index/my_type/_search
 <2> `my_stop_analyzer` analyzer which removes stop words
 <3> `analyzer` setting that points to the `my_analyzer` analyzer which will be used at index time
 <4> `search_analyzer` setting that points to the `my_stop_analyzer` and removes stop words for non-phrase queries
-<5> `search_quote_analyzer` setting that points to the `my_analyzer` analyzer and ensures that stop words are not removed from phrase queries  
+<5> `search_quote_analyzer` setting that points to the `my_analyzer` analyzer and ensures that stop words are not removed from phrase queries
 <6> Since the query is wrapped in quotes it is detected as a phrase query therefore the `search_quote_analyzer` kicks in and ensures the stop words
-are not removed from the query. The `my_analyzer` analyzer will then return the following tokens [`the`, `quick`, `brown`, `fox`] which will match one 
-of the documents. Meanwhile term queries will be analyzed with the `my_stop_analyzer` analyzer which will filter out stop words. So a search for either 
-`The quick brown fox` or `A quick brown fox` will return both documents since both documents contain the following tokens [`quick`, `brown`, `fox`]. 
-Without the `search_quote_analyzer` it would not be possible to do exact matches for phrase queries as the stop words from phrase queries would be 
+are not removed from the query. The `my_analyzer` analyzer will then return the following tokens [`the`, `quick`, `brown`, `fox`] which will match one
+of the documents. Meanwhile term queries will be analyzed with the `my_stop_analyzer` analyzer which will filter out stop words. So a search for either
+`The quick brown fox` or `A quick brown fox` will return both documents since both documents contain the following tokens [`quick`, `brown`, `fox`].
+Without the `search_quote_analyzer` it would not be possible to do exact matches for phrase queries as the stop words from phrase queries would be
 removed resulting in both documents matching.

+ 14 - 7
docs/reference/mapping/params/boost.asciidoc

@@ -34,24 +34,31 @@ You can achieve the same effect by using the boost parameter directly in the que
 
 [source,js]
 --------------------------------------------------
+POST _search
 {
-    "match" : {
-        "title": {
-            "query": "quick brown fox"
+    "query": {
+        "match" : {
+            "title": {
+                "query": "quick brown fox"
+            }
         }
     }
 }
 --------------------------------------------------
+// AUTOSENSE
 
 is equivalent to:
 
 [source,js]
 --------------------------------------------------
+POST _search
 {
-    "match" : {
-        "title": {
-            "query": "quick brown fox",
-            "boost": 2
+    "query": {
+        "match" : {
+            "title": {
+                "query": "quick brown fox",
+                "boost": 2
+            }
         }
     }
 }

+ 7 - 6
docs/reference/mapping/params/coerce.asciidoc

@@ -45,6 +45,7 @@ PUT my_index/my_type/2
 }
 --------------------------------------------------
 // AUTOSENSE
+// TEST[catch:request]
 <1> The `number_one` field will contain the integer `10`.
 <2> This document will be rejected because coercion is disabled.
 
@@ -69,11 +70,11 @@ PUT my_index
     "my_type": {
       "properties": {
         "number_one": {
-          "type": "integer"
-        },
-        "number_two": {
           "type": "integer",
           "coerce": true
+        },
+        "number_two": {
+          "type": "integer"
         }
       }
     }
@@ -87,6 +88,6 @@ PUT my_index/my_type/2
 { "number_two": "10" } <2>
 --------------------------------------------------
 // AUTOSENSE
-<1> This document will be rejected because the `number_one` field inherits the index-level coercion setting.
-<2> The `number_two` field overrides the index level setting to enable coercion.
-
+// TEST[catch:request]
+<1> The `number_one` field overrides the index level setting to enable coercion.
+<2> This document will be rejected because the `number_two` field inherits the index-level coercion setting.

+ 3 - 3
docs/reference/mapping/params/copy-to.asciidoc

@@ -9,7 +9,7 @@ the `full_name` field as follows:
 
 [source,js]
 --------------------------------------------------
-PUT /my_index
+PUT my_index
 {
   "mappings": {
     "my_type": {
@@ -30,13 +30,13 @@ PUT /my_index
   }
 }
 
-PUT /my_index/my_type/1
+PUT my_index/my_type/1
 {
   "first_name": "John",
   "last_name": "Smith"
 }
 
-GET /my_index/_search
+GET my_index/_search
 {
   "query": {
     "match": {

+ 8 - 13
docs/reference/mapping/params/dynamic.asciidoc

@@ -7,9 +7,7 @@ containing the new field.  For instance:
 
 [source,js]
 --------------------------------------------------
-DELETE my_index <1>
-
-PUT my_index/my_type/1 <2>
+PUT my_index/my_type/1 <1>
 {
   "username": "johnsmith",
   "name": {
@@ -18,9 +16,9 @@ PUT my_index/my_type/1 <2>
   }
 }
 
-GET my_index/_mapping <3>
+GET my_index/_mapping <2>
 
-PUT my_index/my_type/2 <4>
+PUT my_index/my_type/2 <3>
 {
   "username": "marywhite",
   "email": "mary@white.com",
@@ -31,16 +29,15 @@ PUT my_index/my_type/2 <4>
   }
 }
 
-GET my_index/_mapping <5>
+GET my_index/_mapping <4>
 --------------------------------------------------
 // AUTOSENSE
-<1> First delete the index, in case it already exists.
-<2> This document introduces the string field `username`, the object field
+<1> This document introduces the string field `username`, the object field
     `name`, and two string fields under the `name` object which can be
     referred to as `name.first` and `name.last`.
-<3> Check the mapping to verify the above.
-<4> This document adds two string fields: `email` and `name.middle`.
-<5> Check the mapping to verify the changes.
+<2> Check the mapping to verify the above.
+<3> This document adds two string fields: `email` and `name.middle`.
+<4> Check the mapping to verify the changes.
 
 The details of how new fields are detected and added to the mapping is explained in <<dynamic-mapping>>.
 
@@ -88,5 +85,3 @@ PUT my_index
 TIP: The `dynamic` setting is allowed to have different settings for fields of
 the same name in the same index.  Its value can be updated on existing fields
 using the <<indices-put-mapping,PUT mapping API>>.
-
-

+ 5 - 8
docs/reference/mapping/params/fielddata.asciidoc

@@ -100,14 +100,11 @@ PUT my_index
       "properties": {
         "tag": {
           "type": "text",
-          "fielddata": {
-            "filter": {
-              "frequency": {
-                "min": 0.001,
-                "max": 0.1,
-                "min_segment_size": 500
-              }
-            }
+          "fielddata": true,
+          "fielddata_frequency_filter": {
+            "min": 0.001,
+            "max": 0.1,
+            "min_segment_size": 500
           }
         }
       }

+ 6 - 6
docs/reference/mapping/params/ignore-malformed.asciidoc

@@ -20,11 +20,11 @@ PUT my_index
     "my_type": {
       "properties": {
         "number_one": {
-          "type": "integer"
-        },
-        "number_two": {
           "type": "integer",
           "ignore_malformed": true
+        },
+        "number_two": {
+          "type": "integer"
         }
       }
     }
@@ -44,8 +44,9 @@ PUT my_index/my_type/2
 }
 --------------------------------------------------
 // AUTOSENSE
-<1> This document will be rejected because `number_one` does not allow malformed values.
-<2> This document will have the `text` field indexed, but not the `number_two` field.
+// TEST[catch:request]
+<1> This document will have the `text` field indexed, but not the `number_one` field.
+<2> This document will be rejected because `number_two` does not allow malformed values.
 
 TIP: The `ignore_malformed` setting is allowed to have different settings for
 fields of the same name in the same index.  Its value can be updated on
@@ -84,4 +85,3 @@ PUT my_index
 
 <1> The `number_one` field inherits the index-level setting.
 <2> The `number_two` field overrides the index-level setting to turn off `ignore_malformed`.
-

+ 1 - 1
docs/reference/mapping/params/include-in-all.asciidoc

@@ -15,7 +15,7 @@ PUT my_index
       "properties": {
         "title": { <1>
           "type": "text"
-        }
+        },
         "content": { <1>
           "type": "text"
         },

+ 4 - 5
docs/reference/mapping/params/multi-fields.asciidoc

@@ -8,7 +8,7 @@ search, and as a `keyword` field for sorting or aggregations:
 
 [source,js]
 --------------------------------------------------
-PUT /my_index
+PUT my_index
 {
   "mappings": {
     "my_type": {
@@ -26,17 +26,17 @@ PUT /my_index
   }
 }
 
-PUT /my_index/my_type/1
+PUT my_index/my_type/1
 {
   "city": "New York"
 }
 
-PUT /my_index/my_type/2
+PUT my_index/my_type/2
 {
   "city": "York"
 }
 
-GET /my_index/_search
+GET my_index/_search
 {
   "query": {
     "match": {
@@ -132,4 +132,3 @@ stemmed field allows a query for `foxes` to also match the document containing
 just `fox`.  This allows us to match as many documents as possible.  By also
 querying the unstemmed `text` field, we improve the relevance score of the
 document which matches `foxes` exactly.
-

+ 1 - 2
docs/reference/mapping/params/norms.asciidoc

@@ -31,11 +31,10 @@ PUT my_index/_mapping/my_type
 }
 ------------
 // AUTOSENSE
+// TEST[s/^/PUT my_index\n/]
 
 NOTE: Norms will not be removed instantly, but will be removed as old segments
 are merged into new segments as you continue indexing new documents. Any score
 computation on a field that has had norms removed might return inconsistent
 results since some documents won't have norms anymore while other documents
 might still have norms.
-
-

+ 5 - 5
docs/reference/mapping/params/position-increment-gap.asciidoc

@@ -13,12 +13,12 @@ For example:
 
 [source,js]
 --------------------------------------------------
-PUT /my_index/groups/1
+PUT my_index/groups/1
 {
     "names": [ "John Abraham", "Lincoln Smith"]
 }
 
-GET /my_index/groups/_search
+GET my_index/groups/_search
 {
     "query": {
         "match_phrase": {
@@ -29,7 +29,7 @@ GET /my_index/groups/_search
     }
 }
 
-GET /my_index/groups/_search
+GET my_index/groups/_search
 {
     "query": {
         "match_phrase": {
@@ -65,12 +65,12 @@ PUT my_index
   }
 }
 
-PUT /my_index/groups/1
+PUT my_index/groups/1
 {
     "names": [ "John Abraham", "Lincoln Smith"]
 }
 
-GET /my_index/groups/_search
+GET my_index/groups/_search
 {
     "query": {
         "match_phrase": {

+ 1 - 2
docs/reference/mapping/params/properties.asciidoc

@@ -99,7 +99,6 @@ GET my_index/_search
 }
 --------------------------------------------------
 // AUTOSENSE
+// TEST[continued]
 
 IMPORTANT: The full path to the inner field must be specified.
-
-

+ 1 - 3
docs/reference/mapping/params/search-analyzer.asciidoc

@@ -14,7 +14,7 @@ this can be overridden with the `search_analyzer` setting:
 
 [source,js]
 --------------------------------------------------
-PUT /my_index
+PUT my_index
 {
   "settings": {
     "analysis": {
@@ -81,5 +81,3 @@ type] for a full explanation of this example.
 TIP: The `search_analyzer` setting must have the same setting for fields of
 the same name in the same index.  Its value can be updated on existing fields
 using the <<indices-put-mapping,PUT mapping API>>.
-
-

+ 2 - 3
docs/reference/mapping/params/store.asciidoc

@@ -18,7 +18,7 @@ to extract those fields from a large `_source` field:
 
 [source,js]
 --------------------------------------------------
-PUT /my_index
+PUT my_index
 {
   "mappings": {
     "my_type": {
@@ -39,7 +39,7 @@ PUT /my_index
   }
 }
 
-PUT /my_index/my_type/1
+PUT my_index/my_type/1
 {
   "title":   "Some short title",
   "date":    "2015-01-01",
@@ -70,4 +70,3 @@ field instead.
 
 Another situation where it can make sense to make a field stored is for those
 that don't appear in the `_source` field (such as <<copy-to,`copy_to` fields>>).
-

+ 1 - 2
docs/reference/mapping/types/date.asciidoc

@@ -40,7 +40,7 @@ PUT my_index/my_type/1
 { "date": "2015-01-01" } <2>
 
 PUT my_index/my_type/2
-{ "date": "2015-01-01T12:10:30Z" } <3>
+{ "date": "2015-01-01T12:10:30Z" } <3>
 
 PUT my_index/my_type/3
 { "date": 1420070400001 } <4>
@@ -134,4 +134,3 @@ The following parameters are accepted by `date` fields:
     Whether the field value should be stored and retrievable separately from
     the <<mapping-source-field,`_source`>> field. Accepts `true` or `false`
     (default).
-

+ 2 - 1
docs/reference/mapping/types/nested.asciidoc

@@ -62,6 +62,7 @@ GET my_index/_search
 }
 --------------------------------------------------
 // AUTOSENSE
+// TEST[continued]
 
 ==== Using `nested` fields for arrays of objects
 
@@ -137,7 +138,7 @@ GET my_index/_search
             "user.first": {}
           }
         }
-
+      }
     }
   }
 }

+ 1 - 2
docs/reference/migration/migrate_5_0/mapping.asciidoc

@@ -53,7 +53,7 @@ you could map it both as a number and a `keyword` using <<multi-fields>>:
 
 [source,js]
 --------------------------------------------------
-PUT /my_index
+PUT my_index
 {
   "mappings": {
     "my_type": {
@@ -149,4 +149,3 @@ Per-field boosts on the `_all` are now compressed into a single byte instead
 of the 4 bytes used previously. While this will make the index much more
 space-efficient, it also means that index time boosts will be less accurately
 encoded.
-

+ 2 - 2
docs/reference/modules/cluster/allocation_filtering.asciidoc

@@ -14,7 +14,7 @@ For instance, we could decommission a node using its IP address as follows:
 
 [source,js]
 --------------------------------------------------
-PUT /_cluster/settings
+PUT _cluster/settings
 {
   "transient" : {
     "cluster.routing.allocation.exclude._ip" : "10.0.0.1"
@@ -67,4 +67,4 @@ PUT _cluster/settings
 }
 ------------------------
 // AUTOSENSE
-
+// TEST[skip:indexes don't assign]

+ 1 - 2
docs/reference/modules/cluster/disk_allocator.asciidoc

@@ -56,7 +56,7 @@ the cluster every minute:
 
 [source,js]
 --------------------------------------------------
-PUT /_cluster/settings
+PUT _cluster/settings
 {
   "transient": {
     "cluster.routing.allocation.disk.watermark.low": "80%",
@@ -73,4 +73,3 @@ data paths, one with 50b out of 100b free (50% used) and another with
 40b out of 50b free (80% used) it would see the node's disk usage as 90b
 out of 150b). In 2.0.0, the minimum and maximum disk usages are tracked
 separately.
-

+ 1 - 1
docs/reference/modules/node.asciidoc

@@ -158,6 +158,7 @@ PUT _cluster/settings
 }
 ----------------------------
 // AUTOSENSE
+// TEST[catch:/cannot set discovery.zen.minimum_master_nodes to more than the current master nodes/]
 
 TIP: An advantage of splitting the master and data roles between dedicated
 nodes is that you can have just three master-eligible nodes and set
@@ -299,4 +300,3 @@ same data directory. This can lead to unexpected data loss.
 More node settings can be found in <<modules,Modules>>.  Of particular note are
 the <<cluster.name,`cluster.name`>>, the <<node.name,`node.name`>> and the
 <<modules-network,network settings>>.
-

+ 18 - 19
docs/reference/modules/scripting/painless.asciidoc

@@ -40,11 +40,9 @@ to `painless`.
 
 To illustrate how Painless works, let's load some hockey stats into an Elasticsearch index:
 
-[source,sh]
+[source,js]
 ----------------------------------------------------------------
-DELETE /hockey-stats
-
-PUT /hockey-stats/player/_bulk
+PUT hockey/player/_bulk?refresh
 {"index":{"_id":1}}
 {"first":"johnny","last":"gaudreau","goals":[9,27,1],"assists":[17,46,0],"gp":[26,82,1]}
 {"index":{"_id":2}}
@@ -69,6 +67,7 @@ PUT /hockey-stats/player/_bulk
 {"first":"joe","last":"colborne","goals":[3,18,13],"assists":[6,20,24],"gp":[26,67,82]}
 ----------------------------------------------------------------
 // AUTOSENSE
+// TESTSETUP
 
 [float]
 === Accessing Doc Values from Painless
@@ -77,9 +76,9 @@ All Painless scripts take in a `Map<String,def>` of values called `input`.  Docu
 
 For example, the following script calculates a player's total goals. This example uses a strongly typed `int` and a `for` loop.
 
-[source,sh]
+[source,js]
 ----------------------------------------------------------------
-GET /hockey-stats/_search
+GET hockey/_search
 {
   "query": {
     "function_score": {
@@ -97,9 +96,9 @@ GET /hockey-stats/_search
 
 Alternatively, you could do the same thing using a script field instead of a function score:
 
-[source,sh]
+[source,js]
 ----------------------------------------------------------------
-GET /hockey-stats/_search
+GET hockey/_search
 {
   "query": {
     "match_all": {}
@@ -120,16 +119,16 @@ You must always specify the index of the field value you want, even if there's o
 All fields in Elasticsearch are multi-valued and Painless does not provide a `.value` shortcut. The following example uses a Painless script to sort the players by their combined first and last names. The names are accessed using
 `input.doc['first'].0` and `input.doc['last'].0`.
 
-[source,sh]
+[source,js]
 ----------------------------------------------------------------
-GET /hockey-stats/_search
+GET hockey/_search
 {
   "query": {
     "match_all": {}
   },
   "sort": {
     "_script": {
-      "type": "keyword",
+      "type": "string",
       "order": "asc",
       "script": {
         "lang": "painless",
@@ -148,9 +147,9 @@ You can also easily update fields. You access the original source for a field as
 
 First, let's look at the source data for a player by submitting the following request:
 
-[source,sh]
+[source,js]
 ----------------------------------------------------------------
-GET /hockey-stats/_search
+GET hockey/_search
 {
   "fields": [
     "_id",
@@ -167,9 +166,9 @@ GET /hockey-stats/_search
 
 To change player 1's last name to `hockey`, simply set `input.ctx._source.last` to the new value:
 
-[source,sh]
+[source,js]
 ----------------------------------------------------------------
-POST /hockey-stats/player/1/_update
+POST hockey/player/1/_update
 {
   "script": {
     "lang": "painless",
@@ -185,9 +184,9 @@ POST /hockey-stats/player/1/_update
 You can also add fields to a document. For example, this script adds a new field that contains
 the player's nickname,  _hockey_.
 
-[source,sh]
+[source,js]
 ----------------------------------------------------------------
-POST /hockey-stats/player/1/_update
+POST hockey/player/1/_update
 {
   "script": {
     "lang": "painless",
@@ -208,9 +207,9 @@ If you explicitly specify types, the compiler doesn't have to perform type looku
 improve performance. For example, the following script performs the same first name, last name sort we showed before,
 but it's fully type-safe.
 
-[source,sh]
+[source,js]
 ----------------------------------------------------------------
-GET /hockey-stats/_search
+GET hockey/_search
 {
   "query": {
     "match_all": {}

+ 14 - 13
docs/reference/modules/scripting/using.asciidoc

@@ -186,35 +186,27 @@ state:
 
 [source,js]
 -----------------------------------
-POST /_scripts/groovy/calculate-score
+POST _scripts/groovy/calculate-score
 {
   "script": "log(_score * 2) + my_modifier"
 }
 -----------------------------------
 // AUTOSENSE
 
-
 This same script can be retrieved with:
 
 [source,js]
 -----------------------------------
-GET /_scripts/groovy/calculate-score
------------------------------------
-// AUTOSENSE
-
-or deleted with:
-
-[source,js]
------------------------------------
-DELETE /_scripts/groovy/calculate-score
+GET _scripts/groovy/calculate-score
 -----------------------------------
 // AUTOSENSE
+// TEST[continued]
 
 Stored scripts can be used by specifying the `lang` and `id` parameters as follows:
 
 [source,js]
 --------------------------------------------------
-GET my_index/_search
+GET _search
 {
   "query": {
     "script": {
@@ -229,10 +221,19 @@ GET my_index/_search
   }
 }
 --------------------------------------------------
+// AUTOSENSE
+// TEST[continued]
 
+And deleted with:
+
+[source,js]
+-----------------------------------
+DELETE _scripts/groovy/calculate-score
+-----------------------------------
+// AUTOSENSE
+// TEST[continued]
 
 NOTE: The size of stored scripts is limited to 65,535 bytes. This can be
 changed by setting `script.max_size_in_bytes` setting to increase that soft
 limit, but if scripts are really large then alternatives like
 <<modules-scripting-native,native>> scripts should be considered instead.
-

+ 30 - 30
docs/reference/query-dsl/bool-query.asciidoc

@@ -39,47 +39,47 @@ on the fraction of all query terms that a document contains. See Lucene
 `BooleanQuery` for more details.
 
 The `bool` query takes a _more-matches-is-better_ approach, so the score from
-each matching `must` or `should` clause will be added together to provide the 
+each matching `must` or `should` clause will be added together to provide the
 final `_score` for each document.
 
 [source,js]
 --------------------------------------------------
+POST _search
 {
+  "query": {
     "bool" : {
-        "must" : {
-            "term" : { "user" : "kimchy" }
-        },
-        "filter": {
-            "term" : { "tag" : "tech" }
-        },
-        "must_not" : {
-            "range" : {
-                "age" : { "from" : 10, "to" : 20 }
-            }
-        },
-        "should" : [
-            {
-                "term" : { "tag" : "wow" }
-            },
-            {
-                "term" : { "tag" : "elasticsearch" }
-            }
-        ],
-        "minimum_should_match" : 1,
-        "boost" : 1.0
+      "must" : {
+        "term" : { "user" : "kimchy" }
+      },
+      "filter": {
+        "term" : { "tag" : "tech" }
+      },
+      "must_not" : {
+        "range" : {
+          "age" : { "from" : 10, "to" : 20 }
+        }
+      },
+      "should" : [
+        { "term" : { "tag" : "wow" } },
+        { "term" : { "tag" : "elasticsearch" } }
+      ],
+      "minimum_should_match" : 1,
+      "boost" : 1.0
     }
+  }
 }
 --------------------------------------------------
+// AUTOSENSE
 
-==== Scoring with `bool.filter` 
+==== Scoring with `bool.filter`
 
 Queries specified under the `filter` element have no effect on scoring --
 scores are returned as `0`.  Scores are only affected by the query that has
-been specified.  For instance, all three of the following queries return 
-all documents where the `status` field contains the term `active`. 
+been specified.  For instance, all three of the following queries return
+all documents where the `status` field contains the term `active`.
 
 This first query assigns a score of `0` to all documents, as no scoring
-query has been specified: 
+query has been specified:
 
 [source,json]
 ---------------------------------
@@ -98,7 +98,7 @@ GET _search
 ---------------------------------
 // AUTOSENSE
 
-This `bool` query has a `match_all` query, which assigns a score of `1.0` to 
+This `bool` query has a `match_all` query, which assigns a score of `1.0` to
 all documents.
 
 [source,json]
@@ -107,7 +107,7 @@ GET _search
 {
   "query": {
     "bool": {
-      "query": {
+      "must": {
         "match_all": {}
       },
       "filter": {
@@ -121,9 +121,9 @@ GET _search
 ---------------------------------
 // AUTOSENSE
 
-This `constant_score` query behaves in exactly the same way as the second example above.  
+This `constant_score` query behaves in exactly the same way as the second example above.
 The `constant_score` query assigns a score of `1.0` to all documents matched
-by the filter. 
+by the filter.
 
 [source,json]
 ---------------------------------

+ 1 - 2
docs/reference/query-dsl/function-score-query.asciidoc

@@ -459,7 +459,7 @@ the request would look like this:
 
 [source,js]
 --------------------------------------------------
-GET /hotels/_search/
+GET _search
 {
   "query": {
     "function_score": {
@@ -546,4 +546,3 @@ Only numeric, date, and geo-point fields are supported.
 
 If the numeric field is missing in the document, the function will
 return 1.
-

+ 7 - 4
docs/reference/query-dsl/term-query.asciidoc

@@ -6,10 +6,14 @@ in the inverted index.  For instance:
 
 [source,js]
 --------------------------------------------------
+POST _search
 {
+  "query": {
     "term" : { "user" : "Kimchy" } <1>
+  }
 }
 --------------------------------------------------
+// AUTOSENSE
 <1> Finds documents which contain the exact term `Kimchy` in the inverted index
     of the `user` field.
 
@@ -18,7 +22,7 @@ relevance score than another query, for instance:
 
 [source,js]
 --------------------------------------------------
-GET /_search
+GET _search
 {
   "query": {
     "bool": {
@@ -41,6 +45,7 @@ GET /_search
   }
 }
 --------------------------------------------------
+// AUTOSENSE
 
 <1> The `urgent` query clause has a boost of `2.0`, meaning it is twice as important
     as the query clause for `normal`.
@@ -113,7 +118,6 @@ Now, compare the results for the `term` query and the `match` query:
 
 [source,js]
 --------------------------------------------------
-
 GET my_index/my_type/_search
 {
   "query": {
@@ -151,6 +155,7 @@ GET my_index/my_type/_search
 }
 --------------------------------------------------
 // AUTOSENSE
+// TEST[continued]
 
 <1> This query matches because the `exact_value` field contains the exact
     term `Quick Foxes!`.
@@ -161,5 +166,3 @@ GET my_index/my_type/_search
 <4> This `match` query on the `full_text` field first analyzes the query string,
     then looks for documents containing `quick` or `foxes` or both.
 **************************************************
-
-

+ 4 - 4
docs/reference/setup/cluster_restart.asciidoc

@@ -17,7 +17,7 @@ before shutting down a node:
 
 [source,js]
 --------------------------------------------------
-PUT /_cluster/settings
+PUT _cluster/settings
 {
   "persistent": {
     "cluster.routing.allocation.enable": "none"
@@ -25,6 +25,7 @@ PUT /_cluster/settings
 }
 --------------------------------------------------
 // AUTOSENSE
+// TEST[skip:indexes don't assign]
 
 ==== Step 2: Perform a synced flush
 
@@ -33,7 +34,7 @@ Shard recovery will be much faster if you stop indexing and issue a
 
 [source,sh]
 --------------------------------------------------
-POST /_flush/synced
+POST _flush/synced
 --------------------------------------------------
 // AUTOSENSE
 
@@ -96,7 +97,7 @@ reenable shard allocation:
 
 [source,js]
 ------------------------------------------------------
-PUT /_cluster/settings
+PUT _cluster/settings
 {
   "persistent": {
     "cluster.routing.allocation.enable": "all"
@@ -123,4 +124,3 @@ GET _cat/recovery
 
 Once the `status` column in the `_cat/health` output has reached `green`, all
 primary and replica shards have been successfully allocated.
-

+ 4 - 4
docs/reference/setup/rolling_upgrade.asciidoc

@@ -21,7 +21,7 @@ allocation before shutting down a node:
 
 [source,js]
 --------------------------------------------------
-PUT /_cluster/settings
+PUT _cluster/settings
 {
   "transient": {
     "cluster.routing.allocation.enable": "none"
@@ -29,6 +29,7 @@ PUT /_cluster/settings
 }
 --------------------------------------------------
 // AUTOSENSE
+// TEST[skip:indexes don't assign]
 
 ==== Step 2: Stop non-essential indexing and perform a synced flush (Optional)
 
@@ -38,7 +39,7 @@ will be much faster if you temporarily stop non-essential indexing and issue a
 
 [source,js]
 --------------------------------------------------
-POST /_flush/synced
+POST _flush/synced
 --------------------------------------------------
 // AUTOSENSE
 
@@ -111,7 +112,7 @@ the node:
 
 [source,js]
 --------------------------------------------------
-PUT /_cluster/settings
+PUT _cluster/settings
 {
   "transient": {
     "cluster.routing.allocation.enable": "all"
@@ -172,4 +173,3 @@ recovery has completed.
 
 When the cluster is stable and the node has recovered, repeat the above steps
 for all remaining nodes.
-

+ 53 - 0
docs/src/test/java/org/elasticsearch/smoketest/SmokeTestDocsIT.java

@@ -0,0 +1,53 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.smoketest;
+
+import com.carrotsearch.randomizedtesting.annotations.Name;
+import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
+
+import org.elasticsearch.test.rest.ESRestTestCase;
+import org.elasticsearch.test.rest.RestTestCandidate;
+import org.elasticsearch.test.rest.parser.RestTestParseException;
+
+import java.io.IOException;
+import java.util.List;
+
+public class SmokeTestDocsIT extends ESRestTestCase {
+
+    public SmokeTestDocsIT(@Name("yaml") RestTestCandidate testCandidate) {
+        super(testCandidate);
+    }
+
+    @ParametersFactory
+    public static Iterable<Object[]> parameters() throws IOException, RestTestParseException {
+        return ESRestTestCase.createParameters(0, 1);
+    }
+
+    @Override
+    protected void afterIfFailed(List<Throwable> errors) {
+        super.afterIfFailed(errors);
+        String name = getTestName().split("=")[1];
+        name = name.substring(0, name.length() - 1);
+        name = name.replaceAll("/([^/]+)$", ".asciidoc:$1");
+        logger.error("This failing test was generated by documentation starting at {}. It may include many snippets. "
+                + "See docs/README.asciidoc for an explanation of test generation.", name);
+    }
+}
+

+ 1 - 0
settings.gradle

@@ -3,6 +3,7 @@ rootProject.name = 'elasticsearch'
 List projects = [
   'rest-api-spec',
   'core',
+  'docs',
   'distribution:integ-test-zip',
   'distribution:zip',
   'distribution:tar',

+ 1 - 1
test/framework/src/main/java/org/elasticsearch/test/rest/RestTestExecutionContext.java

@@ -78,7 +78,7 @@ public class RestTestExecutionContext implements Closeable {
         try {
             response = callApiInternal(apiName, requestParams, body, headers);
             //we always stash the last response body
-            stash.stashValue("body", response.getBody());
+            stash.stashResponse(response);
             return response;
         } catch(RestException e) {
             response = e.restResponse();

+ 19 - 8
test/framework/src/main/java/org/elasticsearch/test/rest/Stash.java

@@ -19,16 +19,17 @@
 
 package org.elasticsearch.test.rest;
 
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
 import org.elasticsearch.common.Strings;
 import org.elasticsearch.common.logging.ESLogger;
 import org.elasticsearch.common.logging.Loggers;
 import org.elasticsearch.common.xcontent.ToXContent;
 import org.elasticsearch.common.xcontent.XContentBuilder;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
+import org.elasticsearch.test.rest.client.RestResponse;
 
 /**
  * Allows to cache the last obtained test response and or part of it within variables
@@ -41,6 +42,7 @@ public class Stash implements ToXContent {
     public static final Stash EMPTY = new Stash();
 
     private final Map<String, Object> stash = new HashMap<>();
+    private RestResponse response;
 
     /**
      * Allows to saved a specific field in the stash as key-value pair
@@ -53,6 +55,12 @@ public class Stash implements ToXContent {
         }
     }
 
+    public void stashResponse(RestResponse response) throws IOException {
+        // TODO we can almost certainly save time by lazily evaluating the body
+        stashValue("body", response.getBody());
+        this.response = response;
+    }
+
     /**
      * Clears the previously stashed values
      */
@@ -78,7 +86,10 @@ public class Stash implements ToXContent {
      * The stash contains fields eventually extracted from previous responses that can be reused
      * as arguments for following requests (e.g. scroll_id)
      */
-    public Object unstashValue(String value) {
+    public Object unstashValue(String value) throws IOException {
+        if (value.startsWith("$body.")) {
+            return response.evaluate(value.substring("$body".length()), this);
+        }
         Object stashedValue = stash.get(value.substring(1));
         if (stashedValue == null) {
             throw new IllegalArgumentException("stashed value not found for key [" + value + "]");
@@ -89,14 +100,14 @@ public class Stash implements ToXContent {
     /**
      * Recursively unstashes map values if needed
      */
-    public Map<String, Object> unstashMap(Map<String, Object> map) {
+    public Map<String, Object> unstashMap(Map<String, Object> map) throws IOException {
         Map<String, Object> copy = new HashMap<>(map);
         unstashObject(copy);
         return copy;
     }
 
     @SuppressWarnings("unchecked")
-    private void unstashObject(Object obj) {
+    private void unstashObject(Object obj) throws IOException {
         if (obj instanceof List) {
             List list = (List) obj;
             for (int i = 0; i < list.size(); i++) {

+ 16 - 0
test/framework/src/main/java/org/elasticsearch/test/rest/client/RestClient.java

@@ -19,6 +19,7 @@
 package org.elasticsearch.test.rest.client;
 
 import com.carrotsearch.randomizedtesting.RandomizedTest;
+
 import org.apache.http.config.Registry;
 import org.apache.http.config.RegistryBuilder;
 import org.apache.http.conn.socket.ConnectionSocketFactory;
@@ -61,6 +62,8 @@ import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
 
+import static java.util.Objects.requireNonNull;
+
 /**
  * REST client used to test the elasticsearch REST layer
  * Holds the {@link RestSpec} used to translate api calls into REST calls
@@ -186,6 +189,19 @@ public class RestClient implements Closeable {
     }
 
     private HttpRequestBuilder callApiBuilder(String apiName, Map<String, String> params, String body) {
+        if ("raw".equals(apiName)) {
+            // Raw requests are bit simpler....
+            HttpRequestBuilder httpRequestBuilder = httpRequestBuilder();
+            httpRequestBuilder.method(requireNonNull(params.remove("method"), "Method must be set to use raw request"));
+            httpRequestBuilder.path("/"+ requireNonNull(params.remove("path"), "Path must be set to use raw request"));
+            httpRequestBuilder.body(body);
+
+            // And everything else is a url parameter!
+            for (Map.Entry<String, String> entry : params.entrySet()) {
+                httpRequestBuilder.addParam(entry.getKey(), entry.getValue());
+            }
+            return httpRequestBuilder;
+        }
 
         //create doesn't exist in the spec but is supported in the clients (index with op_type=create)
         boolean indexCreateApi = "create".equals(apiName);

+ 3 - 3
test/framework/src/main/java/org/elasticsearch/test/rest/json/JsonPath.java

@@ -49,14 +49,14 @@ public class JsonPath {
     /**
      * Returns the object corresponding to the provided path if present, null otherwise
      */
-    public Object evaluate(String path) {
+    public Object evaluate(String path) throws IOException {
         return evaluate(path, Stash.EMPTY);
     }
 
     /**
      * Returns the object corresponding to the provided path if present, null otherwise
      */
-    public Object evaluate(String path, Stash stash) {
+    public Object evaluate(String path, Stash stash) throws IOException {
         String[] parts = parsePath(path);
         Object object = jsonMap;
         for (String part : parts) {
@@ -69,7 +69,7 @@ public class JsonPath {
     }
 
     @SuppressWarnings("unchecked")
-    private Object evaluate(String key, Object object, Stash stash) {
+    private Object evaluate(String key, Object object, Stash stash) throws IOException {
         if (stash.isStashedValue(key)) {
             key = stash.unstashValue(key).toString();
         }

+ 16 - 12
test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSectionParser.java

@@ -33,19 +33,23 @@ public class RestTestSectionParser implements RestTestFragmentParser<TestSection
         XContentParser parser = parseContext.parser();
         parseContext.advanceToFieldName();
         TestSection testSection = new TestSection(parser.currentName());
-        parser.nextToken();
-        testSection.setSkipSection(parseContext.parseSkipSection());
-
-        while ( parser.currentToken() != XContentParser.Token.END_ARRAY) {
-            parseContext.advanceToFieldName();
-            testSection.addExecutableSection(parseContext.parseExecutableSection());
+        try {
+            parser.nextToken();
+            testSection.setSkipSection(parseContext.parseSkipSection());
+    
+            while ( parser.currentToken() != XContentParser.Token.END_ARRAY) {
+                parseContext.advanceToFieldName();
+                testSection.addExecutableSection(parseContext.parseExecutableSection());
+            }
+    
+            parser.nextToken();
+            assert parser.currentToken() == XContentParser.Token.END_OBJECT;
+            parser.nextToken();
+    
+            return testSection;
+        } catch (Exception e) {
+            throw new RestTestParseException("Error parsing test named [" + testSection.getName() + "]", e);
         }
-
-        parser.nextToken();
-        assert parser.currentToken() == XContentParser.Token.END_OBJECT;
-        parser.nextToken();
-
-        return testSection;
     }
 
 }

+ 15 - 7
test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSuiteParseContext.java

@@ -18,18 +18,20 @@
  */
 package org.elasticsearch.test.rest.parser;
 
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
 import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.xcontent.XContentLocation;
 import org.elasticsearch.common.xcontent.XContentParser;
 import org.elasticsearch.test.rest.section.DoSection;
 import org.elasticsearch.test.rest.section.ExecutableSection;
+import org.elasticsearch.test.rest.section.ResponseBodyAssertion;
 import org.elasticsearch.test.rest.section.SetupSection;
 import org.elasticsearch.test.rest.section.SkipSection;
 import org.elasticsearch.test.rest.section.TestSection;
 
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-
 /**
  * Context shared across the whole tests parse phase.
  * Provides shared parse methods and holds information needed to parse the test sections (e.g. es version)
@@ -52,6 +54,7 @@ public class RestTestSuiteParseContext {
         EXECUTABLE_SECTIONS_PARSERS.put("lt", new LessThanParser());
         EXECUTABLE_SECTIONS_PARSERS.put("lte", new LessThanOrEqualToParser());
         EXECUTABLE_SECTIONS_PARSERS.put("length", new LengthParser());
+        EXECUTABLE_SECTIONS_PARSERS.put("response_body", ResponseBodyAssertion.PARSER);
     }
 
     private final String api;
@@ -114,9 +117,14 @@ public class RestTestSuiteParseContext {
         if (execSectionParser == null) {
             throw new RestTestParseException("no parser found for executable section [" + section + "]");
         }
-        ExecutableSection executableSection = execSectionParser.parse(this);
-        parser.nextToken();
-        return executableSection;
+        XContentLocation location = parser.getTokenLocation();
+        try {
+            ExecutableSection executableSection = execSectionParser.parse(this);
+            parser.nextToken();
+            return executableSection;
+        } catch (Exception e) {
+            throw new IOException("Error parsing section starting at ["+ location + "]", e);
+        }
     }
 
     public DoSection parseDoSection() throws IOException, RestTestParseException {

+ 167 - 0
test/framework/src/main/java/org/elasticsearch/test/rest/section/ResponseBodyAssertion.java

@@ -0,0 +1,167 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.TreeMap;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.test.rest.parser.RestTestFragmentParser;
+import org.elasticsearch.test.rest.parser.RestTestParseException;
+import org.elasticsearch.test.rest.parser.RestTestSuiteParseContext;
+
+/**
+ * Checks that the response body matches some text. 
+ */
+public class ResponseBodyAssertion extends Assertion {
+    public static final RestTestFragmentParser<ResponseBodyAssertion> PARSER = new RestTestFragmentParser<ResponseBodyAssertion>() {
+        @Override
+        public ResponseBodyAssertion parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+            try (XContentParser parser = JsonXContent.jsonXContent.createParser(parseContext.parseField())) {
+                return new ResponseBodyAssertion("$body", parser.map());
+            }
+        }
+    };
+
+    private ResponseBodyAssertion(String field, Map<String, Object> expectedValue) {
+        super(field, expectedValue);
+    }
+
+    @Override
+    protected void doAssert(Object actualValue, Object expectedValue) {
+        if (false == expectedValue.equals(actualValue)) {
+            @SuppressWarnings("unchecked")
+            Map<String, Object> actual = (Map<String, Object>) actualValue;
+            @SuppressWarnings("unchecked")
+            Map<String, Object> expected = (Map<String, Object>) expectedValue;
+            FailureMessage message = new FailureMessage();
+            message.compareMaps(actual, expected);
+            throw new AssertionError(message.message);
+        }
+    }
+
+    private class FailureMessage {
+        private final StringBuilder message = new StringBuilder("body didn't match the expected value:\n");
+        private int indent = 0;
+
+        private void compareMaps(Map<String, Object> actual, Map<String, Object> expected) {
+            actual = new TreeMap<>(actual);
+            expected = new TreeMap<>(expected);
+            for (Map.Entry<String, Object> expectedEntry : expected.entrySet()) {
+                compare(expectedEntry.getKey(), expectedEntry.getValue(), actual.remove(expectedEntry.getKey()));
+            }
+            for (Map.Entry<String, Object> unmatchedEntry : actual.entrySet()) {
+                field(unmatchedEntry.getKey(), "unexpected but found [" + unmatchedEntry.getValue() + "]");
+            }
+        }
+
+        private void compareLists(List<Object> actual, List<Object> expected) {
+            int i = 0;
+            while (i < actual.size() && i < expected.size()) {
+                compare(i, actual.get(i), expected.get(i));
+            }
+            if (actual.size() == expected.size()) {
+                return;
+            }
+            indent();
+            if (actual.size() < expected.size()) {
+                message.append("expected [").append(expected.size() - i).append("] more entries\n");
+                return;
+            }
+            message.append("received [").append(actual.size() - i).append("] more entries than expected\n");
+        }
+
+        private void compare(Object field, Object expected, @Nullable Object actual) {
+            if (expected instanceof Map) {
+                if (actual == null) {
+                    field(field, "expected map but not found");
+                    return;
+                }
+                if (false == actual instanceof Map) {
+                    field(field, "expected map but found [" + actual + "]");
+                    return;
+                }
+                @SuppressWarnings("unchecked")
+                Map<String, Object> expectedMap = (Map<String, Object>) expected;
+                @SuppressWarnings("unchecked")
+                Map<String, Object> actualMap = (Map<String, Object>) actual;
+                if (expectedMap.isEmpty() && actualMap.isEmpty()) {
+                    field(field, "same [empty map]");
+                }
+                field(field, null);
+                indent += 1;
+                compareMaps(expectedMap, actualMap);
+                indent -= 1;
+                return;
+            }
+            if (expected instanceof List) {
+                if (actual == null) {
+                    field(field, "expected list but not found");
+                    return;
+                }
+                if (false == actual instanceof List) {
+                    field(field, "expected list but found [" + actual + "]");
+                    return;
+                }
+                @SuppressWarnings("unchecked")
+                List<Object> expectedList = (List<Object>) expected;
+                @SuppressWarnings("unchecked")
+                List<Object> actualList = (List<Object>) actual;
+                if (expectedList.isEmpty() && actualList.isEmpty()) {
+                    field(field, "same [empty list]");
+                    return;
+                }
+                field(field, null);
+                indent += 1;
+                compareLists(expectedList, actualList);
+                indent -= 1;
+                return;
+            }
+            if (actual == null) {
+                field(field, "expected [" + expected + "] but not found");
+                return;
+            }
+            if (expected.equals(actual)) {
+                field(field, "same [" + expected + "]");
+                return;
+            }
+            field(field, "expected [" + expected + "] but was [" + actual + "]");
+        }
+
+        private void indent() {
+            for (int i = 0; i < indent; i++) {
+                message.append("  ");
+            }
+        }
+
+        private void field(Object name, String info) {
+            indent();
+            message.append(String.format(Locale.ROOT, "%30s: ", name));
+            if (info != null) {
+                message.append(info);
+            }
+            message.append('\n');
+        }
+    }
+}