Browse Source

[ML][DOCS] Add zero shot example and setting truncation at inference (#81003)

More examples for the _infer endpoint
David Kyle 3 years ago
parent
commit
aba14aacfa

+ 110 - 1
docs/reference/ml/df-analytics/apis/infer-trained-model-deployment.asciidoc

@@ -90,7 +90,7 @@ text output and the recognized entities.
 --------------------------------------------------
 POST _ml/trained_models/model2/deployment/_infer
 {
-	"input": "Hi my name is Josh and I live in Berlin"
+	"docs": [{"text_field": "Hi my name is Josh and I live in Berlin"}]
 }
 --------------------------------------------------
 // TEST[skip:TBD]
@@ -120,3 +120,112 @@ The API returns in this case:
 }
 ----
 // NOTCONSOLE
+
+Zero-shot classification tasks require extra configuration defining the class labels.
+These labels are passed in the zero-shot inference config.
+
+[source,console]
+--------------------------------------------------
+POST _ml/trained_models/model2/deployment/_infer
+{
+  "docs": [
+    {
+      "text_field": "This is a very happy person"
+    }
+  ],
+  "inference_config": {
+    "zero_shot_classification": {
+      "labels": [
+        "glad",
+        "sad",
+        "bad",
+        "rad"
+      ],
+      "multi_label": false
+    }
+  }
+}
+--------------------------------------------------
+// TEST[skip:TBD]
+
+The API returns the predicted label and the confidence, as well as the top classes:
+
+[source,console-result]
+----
+{
+  "predicted_value" : "glad",
+  "top_classes" : [
+    {
+      "class_name" : "glad",
+      "class_probability" : 0.8061155063386439,
+      "class_score" : 0.8061155063386439
+    },
+    {
+      "class_name" : "rad",
+      "class_probability" : 0.18218006158387956,
+      "class_score" : 0.18218006158387956
+    },
+    {
+      "class_name" : "bad",
+      "class_probability" : 0.006325615787634201,
+      "class_score" : 0.006325615787634201
+    },
+    {
+      "class_name" : "sad",
+      "class_probability" : 0.0053788162898424545,
+      "class_score" : 0.0053788162898424545
+    }
+  ],
+  "prediction_probability" : 0.8061155063386439
+}
+----
+// NOTCONSOLE
+
+
+The tokenization truncate option can be overridden when calling the API:
+
+[source,console]
+--------------------------------------------------
+POST _ml/trained_models/model2/deployment/_infer
+{
+  "docs": [{"text_field": "The Amazon rainforest covers most of the Amazon basin in South America"}],
+  "inference_config": {
+    "ner": {
+      "tokenization": {
+        "bert": {
+          "truncate": "first"
+        }
+      }
+    }
+  }
+}
+--------------------------------------------------
+// TEST[skip:TBD]
+
+When the input has been truncated due to the limit imposed by the model's `max_sequence_length`
+the `is_truncated` field appears in the response.
+
+[source,console-result]
+----
+{
+  "predicted_value" : "The [Amazon](LOC&Amazon) rainforest covers most of the [Amazon](LOC&Amazon) basin in [South America](LOC&South+America)",
+  "entities" : [
+    {
+      "entity" : "Amazon",
+      "class_name" : "LOC",
+      "class_probability" : 0.9505460915724254,
+      "start_pos" : 4,
+      "end_pos" : 10
+    },
+    {
+      "entity" : "Amazon",
+      "class_name" : "LOC",
+      "class_probability" : 0.9969992804311777,
+      "start_pos" : 41,
+      "end_pos" : 47
+    }
+  ],
+  "is_truncated" : true
+}
+----
+// NOTCONSOLE