| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802 | [role="xpack"][[infer-trained-model]]= Infer trained model API[subs="attributes"]++++<titleabbrev>Infer trained model</titleabbrev>++++Evaluates a trained model. The model may be any supervised model either trained by {dfanalytics} or imported.preview::[][[infer-trained-model-request]]== {api-request-title}`POST _ml/trained_models/<model_id>/_infer`////[[infer-trained-model-prereq]]== {api-prereq-title}////////[[infer-trained-model-desc]]== {api-description-title}////[[infer-trained-model-path-params]]== {api-path-parms-title}`<model_id>`::(Required, string)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=model-id][[infer-trained-model-query-params]]== {api-query-parms-title}`timeout`::(Optional, time)Controls the amount of time to wait for {infer} results. Defaults to 10 seconds.[[infer-trained-model-request-body]]== {api-request-body-title}`docs`::(Required, array)An array of objects to pass to the model for inference. The objects shouldcontain the fields matching your configured trained model input. Typically for NLP models, the field name is `text_field`. Currently for NLP models, only a single value is allowed. For {dfanalytics} or imported classification or regression models, more than one value is allowed.//Begin inference_config`inference_config`::(Required, object)The default configuration for inference. This can be: `regression`,`classification`, `fill_mask`, `ner`, `question_answering`, `text_classification`, `text_embedding` or `zero_shot_classification`.If `regression` or `classification`, it must match the `target_type` of theunderlying `definition.trained_model`. If `fill_mask`, `ner`, `question_answering`, `text_classification`, or `text_embedding`; the `model_type` must be `pytorch`.+.Properties of `inference_config`[%collapsible%open]====`classification`:::(Optional, object)Classification configuration for inference.+.Properties of classification inference[%collapsible%open]=====`num_top_classes`::::(Optional, integer)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-classification-num-top-classes]`num_top_feature_importance_values`::::(Optional, integer)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-classification-num-top-feature-importance-values]`prediction_field_type`::::(Optional, string)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-classification-prediction-field-type]`results_field`::::(Optional, string)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field]`top_classes_results_field`::::(Optional, string)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-classification-top-classes-results-field]=====`fill_mask`:::(Optional, object)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-fill-mask]+.Properties of fill_mask inference[%collapsible%open]=====`num_top_classes`::::(Optional, integer)Number of top predicted tokens to return for replacing the mask token. Defaults to `0`.`results_field`::::(Optional, string)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field]`tokenization`::::(Optional, object)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization]+.Properties of tokenization[%collapsible%open]======`bert`::::(Optional, object)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert]+.Properties of bert[%collapsible%open]=======`truncate`::::(Optional, string)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]=======`roberta`::::(Optional, object)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta]+.Properties of roberta[%collapsible%open]=======`truncate`::::(Optional, string)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]=======`mpnet`::::(Optional, object)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet]+.Properties of mpnet[%collapsible%open]=======`truncate`::::(Optional, string)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]==================`ner`:::(Optional, object)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-ner]+.Properties of ner inference[%collapsible%open]=====`results_field`::::(Optional, string)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field]`tokenization`::::(Optional, object)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization]+.Properties of tokenization[%collapsible%open]======`bert`::::(Optional, object)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert]+.Properties of bert[%collapsible%open]=======`truncate`::::(Optional, string)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]=======`roberta`::::(Optional, object)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta]+.Properties of roberta[%collapsible%open]=======`truncate`::::(Optional, string)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]=======`mpnet`::::(Optional, object)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet]+.Properties of mpnet[%collapsible%open]=======`truncate`::::(Optional, string)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]==================`pass_through`:::(Optional, object)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-pass-through]+.Properties of pass_through inference[%collapsible%open]=====`results_field`::::(Optional, string)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field]`tokenization`::::(Optional, object)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization]+.Properties of tokenization[%collapsible%open]======`bert`::::(Optional, object)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert]+.Properties of bert[%collapsible%open]=======`truncate`::::(Optional, string)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]=======`roberta`::::(Optional, object)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta]+.Properties of roberta[%collapsible%open]=======`truncate`::::(Optional, string)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]=======`mpnet`::::(Optional, object)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet]+.Properties of mpnet[%collapsible%open]=======`truncate`::::(Optional, string)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]==================`question_answering`:::(Optional, object)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-question-answering]+.Properties of question_answering inference[%collapsible%open]=====`max_answer_length`::::(Optional, integer)The maximum amount of words in the answer. Defaults to `15`.`num_top_classes`::::(Optional, integer)The number the top found answers to return. Defaults to `0`, meaning only the best found answer is returned.`question`::::(Required, string)The question to use when extracting an answer`results_field`::::(Optional, string)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field]`tokenization`::::(Optional, object)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization]+Recommended to set `max_sequence_length` to `386` with `128` of `span` and set `truncate` to `none`.+.Properties of tokenization[%collapsible%open]======`bert`::::(Optional, object)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert]+.Properties of bert[%collapsible%open]=======`span`::::(Optional, integer)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span]`truncate`::::(Optional, string)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]=======`roberta`::::(Optional, object)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta]+.Properties of roberta[%collapsible%open]=======`span`::::(Optional, integer)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span]`truncate`::::(Optional, string)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]=======`mpnet`::::(Optional, object)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet]+.Properties of mpnet[%collapsible%open]=======`span`::::(Optional, integer)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span]`truncate`::::(Optional, string)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]==================`regression`:::(Optional, object)Regression configuration for inference.+.Properties of regression inference[%collapsible%open]=====`num_top_feature_importance_values`::::(Optional, integer)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-regression-num-top-feature-importance-values]`results_field`::::(Optional, string)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field]=====`text_classification`:::(Optional, object)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-classification]+.Properties of text_classification inference[%collapsible%open]=====`classification_labels`::::(Optional, string) An array of classification labels.`num_top_classes`::::(Optional, integer)Specifies the number of top class predictions to return. Defaults to all classes (-1).`results_field`::::(Optional, string)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field]`tokenization`::::(Optional, object)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization]+.Properties of tokenization[%collapsible%open]======`bert`::::(Optional, object)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert]+.Properties of bert[%collapsible%open]=======`span`::::(Optional, integer)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span]`truncate`::::(Optional, string)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]=======`roberta`::::(Optional, object)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta]+.Properties of roberta[%collapsible%open]=======`span`::::(Optional, integer)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span]`truncate`::::(Optional, string)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]=======`mpnet`::::(Optional, object)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet]+.Properties of mpnet[%collapsible%open]=======`span`::::(Optional, integer)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span]`truncate`::::(Optional, string)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]==================`text_embedding`:::(Object, optional)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-embedding]+.Properties of text_embedding inference[%collapsible%open]=====`results_field`::::(Optional, string)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field]`tokenization`::::(Optional, object)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization]+.Properties of tokenization[%collapsible%open]======`bert`::::(Optional, object)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert]+.Properties of bert[%collapsible%open]=======`truncate`::::(Optional, string)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]=======`roberta`::::(Optional, object)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta]+.Properties of roberta[%collapsible%open]=======`truncate`::::(Optional, string)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]=======`mpnet`::::(Optional, object)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet]+.Properties of mpnet[%collapsible%open]=======`truncate`::::(Optional, string)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]==================`zero_shot_classification`:::(Object, optional)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-zero-shot-classification]+.Properties of zero_shot_classification inference[%collapsible%open]=====`labels`::::(Optional, array)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-zero-shot-classification-labels]`multi_label`::::(Optional, boolean)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-zero-shot-classification-multi-label]`results_field`::::(Optional, string)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field]`tokenization`::::(Optional, object)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization]+.Properties of tokenization[%collapsible%open]======`bert`::::(Optional, object)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert]+.Properties of bert[%collapsible%open]=======`truncate`::::(Optional, string)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]=======`roberta`::::(Optional, object)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta]+.Properties of roberta[%collapsible%open]=======`truncate`::::(Optional, string)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]=======`mpnet`::::(Optional, object)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet]+.Properties of mpnet[%collapsible%open]=======`truncate`::::(Optional, string)include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]======================//End of inference_config////[[infer-trained-model-results]]== {api-response-body-title}////////[[ml-get-trained-models-response-codes]]== {api-response-codes-title}////[[infer-trained-model-example]]== {api-examples-title}The response depends on the kind of model.For example, for language identification the response is the predicted language and the score:[source,console]--------------------------------------------------POST _ml/trained_models/lang_ident_model_1/_infer{  "docs":[{"text": "The fool doth think he is wise, but the wise man knows himself to be a fool."}]}--------------------------------------------------// TEST[skip:TBD]Here are the results predicting english with a high probability.[source,console-result]----{  "inference_results": [    {      "predicted_value": "en",      "prediction_probability": 0.9999658805366392,      "prediction_score": 0.9999658805366392    }  ]}----// NOTCONSOLEWhen it is a text classification model, the response is the score and predicted classification.For example:[source,console]--------------------------------------------------POST _ml/trained_models/model2/_infer{	"docs": [{"text_field": "The movie was awesome!!"}]}--------------------------------------------------// TEST[skip:TBD]The API returns the predicted label and the confidence.[source,console-result]----{  "inference_results": [{    "predicted_value" : "POSITIVE",    "prediction_probability" : 0.9998667964092964  }]}----// NOTCONSOLEFor named entity recognition (NER) models, the response contains the annotatedtext output and the recognized entities.[source,console]--------------------------------------------------POST _ml/trained_models/model2/_infer{	"docs": [{"text_field": "Hi my name is Josh and I live in Berlin"}]}--------------------------------------------------// TEST[skip:TBD]The API returns in this case:[source,console-result]----{  "inference_results": [{    "predicted_value" : "Hi my name is [Josh](PER&Josh) and I live in [Berlin](LOC&Berlin)",    "entities" : [      {        "entity" : "Josh",        "class_name" : "PER",        "class_probability" : 0.9977303419824,        "start_pos" : 14,        "end_pos" : 18      },      {        "entity" : "Berlin",        "class_name" : "LOC",        "class_probability" : 0.9992474323902818,        "start_pos" : 33,        "end_pos" : 39      }    ]  }]}----// NOTCONSOLEZero-shot classification models require extra configuration defining the class labels. These labels are passed in the zero-shot inference config.[source,console]--------------------------------------------------POST _ml/trained_models/model2/_infer{  "docs": [    {      "text_field": "This is a very happy person"    }  ],  "inference_config": {    "zero_shot_classification": {      "labels": [        "glad",        "sad",        "bad",        "rad"      ],      "multi_label": false    }  }}--------------------------------------------------// TEST[skip:TBD]The API returns the predicted label and the confidence, as well as the top classes:[source,console-result]----{  "inference_results": [{    "predicted_value" : "glad",    "top_classes" : [      {        "class_name" : "glad",        "class_probability" : 0.8061155063386439,        "class_score" : 0.8061155063386439      },      {        "class_name" : "rad",        "class_probability" : 0.18218006158387956,        "class_score" : 0.18218006158387956      },      {        "class_name" : "bad",        "class_probability" : 0.006325615787634201,        "class_score" : 0.006325615787634201      },      {        "class_name" : "sad",        "class_probability" : 0.0053788162898424545,        "class_score" : 0.0053788162898424545      }    ],    "prediction_probability" : 0.8061155063386439  }]}----// NOTCONSOLEQuestion answering models require extra configuration defining the question to answer.[source,console]--------------------------------------------------POST _ml/trained_models/model2/_infer{  "docs": [    {      "text_field": "<long text to extract answer>"    }  ],  "inference_config": {    "question_answering": {      "question": "<question to be answered>"    }  }}--------------------------------------------------// TEST[skip:TBD]The API returns a response similar to the following:[source,console-result]----{    "predicted_value": <string subsection of the text that is the answer>,    "start_offset": <character offset in document to start>,    "end_offset": <character offset end of the answer,    "prediction_probability": <prediction score>}----// NOTCONSOLEThe tokenization truncate option can be overridden when calling the API:[source,console]--------------------------------------------------POST _ml/trained_models/model2/_infer{  "docs": [{"text_field": "The Amazon rainforest covers most of the Amazon basin in South America"}],  "inference_config": {    "ner": {      "tokenization": {        "bert": {          "truncate": "first"        }      }    }  }}--------------------------------------------------// TEST[skip:TBD]When the input has been truncated due to the limit imposed by the model's `max_sequence_length`the `is_truncated` field appears in the response.[source,console-result]----{  "inference_results": [{    "predicted_value" : "The [Amazon](LOC&Amazon) rainforest covers most of the [Amazon](LOC&Amazon) basin in [South America](LOC&South+America)",    "entities" : [      {        "entity" : "Amazon",        "class_name" : "LOC",        "class_probability" : 0.9505460915724254,        "start_pos" : 4,        "end_pos" : 10      },      {        "entity" : "Amazon",        "class_name" : "LOC",        "class_probability" : 0.9969992804311777,        "start_pos" : 41,        "end_pos" : 47      }    ],    "is_truncated" : true  }]}----// NOTCONSOLE
 |