middleware.py 58 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578
  1. import time
  2. import logging
  3. import sys
  4. import os
  5. import base64
  6. import asyncio
  7. from aiocache import cached
  8. from typing import Any, Optional
  9. import random
  10. import json
  11. import html
  12. import inspect
  13. import re
  14. from uuid import uuid4
  15. from concurrent.futures import ThreadPoolExecutor
  16. from fastapi import Request
  17. from fastapi import BackgroundTasks
  18. from starlette.responses import Response, StreamingResponse
  19. from open_webui.models.chats import Chats
  20. from open_webui.models.users import Users
  21. from open_webui.socket.main import (
  22. get_event_call,
  23. get_event_emitter,
  24. get_active_status_by_user_id,
  25. )
  26. from open_webui.routers.tasks import (
  27. generate_queries,
  28. generate_title,
  29. generate_image_prompt,
  30. generate_chat_tags,
  31. )
  32. from open_webui.routers.retrieval import process_web_search, SearchForm
  33. from open_webui.routers.images import image_generations, GenerateImageForm
  34. from open_webui.utils.webhook import post_webhook
  35. from open_webui.models.users import UserModel
  36. from open_webui.models.functions import Functions
  37. from open_webui.models.models import Models
  38. from open_webui.retrieval.utils import get_sources_from_files
  39. from open_webui.utils.chat import generate_chat_completion
  40. from open_webui.utils.task import (
  41. get_task_model_id,
  42. rag_template,
  43. tools_function_calling_generation_template,
  44. )
  45. from open_webui.utils.misc import (
  46. get_message_list,
  47. add_or_update_system_message,
  48. add_or_update_user_message,
  49. get_last_user_message,
  50. get_last_assistant_message,
  51. prepend_to_first_user_message_content,
  52. )
  53. from open_webui.utils.tools import get_tools
  54. from open_webui.utils.plugin import load_function_module_by_id
  55. from open_webui.tasks import create_task
  56. from open_webui.config import (
  57. CACHE_DIR,
  58. DEFAULT_TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  59. DEFAULT_CODE_INTERPRETER_PROMPT,
  60. )
  61. from open_webui.env import (
  62. SRC_LOG_LEVELS,
  63. GLOBAL_LOG_LEVEL,
  64. BYPASS_MODEL_ACCESS_CONTROL,
  65. ENABLE_REALTIME_CHAT_SAVE,
  66. )
  67. from open_webui.constants import TASKS
  68. logging.basicConfig(stream=sys.stdout, level=GLOBAL_LOG_LEVEL)
  69. log = logging.getLogger(__name__)
  70. log.setLevel(SRC_LOG_LEVELS["MAIN"])
  71. async def chat_completion_filter_functions_handler(request, body, model, extra_params):
  72. skip_files = None
  73. def get_filter_function_ids(model):
  74. def get_priority(function_id):
  75. function = Functions.get_function_by_id(function_id)
  76. if function is not None and hasattr(function, "valves"):
  77. # TODO: Fix FunctionModel
  78. return (function.valves if function.valves else {}).get("priority", 0)
  79. return 0
  80. filter_ids = [
  81. function.id for function in Functions.get_global_filter_functions()
  82. ]
  83. if "info" in model and "meta" in model["info"]:
  84. filter_ids.extend(model["info"]["meta"].get("filterIds", []))
  85. filter_ids = list(set(filter_ids))
  86. enabled_filter_ids = [
  87. function.id
  88. for function in Functions.get_functions_by_type("filter", active_only=True)
  89. ]
  90. filter_ids = [
  91. filter_id for filter_id in filter_ids if filter_id in enabled_filter_ids
  92. ]
  93. filter_ids.sort(key=get_priority)
  94. return filter_ids
  95. filter_ids = get_filter_function_ids(model)
  96. for filter_id in filter_ids:
  97. filter = Functions.get_function_by_id(filter_id)
  98. if not filter:
  99. continue
  100. if filter_id in request.app.state.FUNCTIONS:
  101. function_module = request.app.state.FUNCTIONS[filter_id]
  102. else:
  103. function_module, _, _ = load_function_module_by_id(filter_id)
  104. request.app.state.FUNCTIONS[filter_id] = function_module
  105. # Check if the function has a file_handler variable
  106. if hasattr(function_module, "file_handler"):
  107. skip_files = function_module.file_handler
  108. # Apply valves to the function
  109. if hasattr(function_module, "valves") and hasattr(function_module, "Valves"):
  110. valves = Functions.get_function_valves_by_id(filter_id)
  111. function_module.valves = function_module.Valves(
  112. **(valves if valves else {})
  113. )
  114. if hasattr(function_module, "inlet"):
  115. try:
  116. inlet = function_module.inlet
  117. # Create a dictionary of parameters to be passed to the function
  118. params = {"body": body} | {
  119. k: v
  120. for k, v in {
  121. **extra_params,
  122. "__model__": model,
  123. "__id__": filter_id,
  124. }.items()
  125. if k in inspect.signature(inlet).parameters
  126. }
  127. if "__user__" in params and hasattr(function_module, "UserValves"):
  128. try:
  129. params["__user__"]["valves"] = function_module.UserValves(
  130. **Functions.get_user_valves_by_id_and_user_id(
  131. filter_id, params["__user__"]["id"]
  132. )
  133. )
  134. except Exception as e:
  135. print(e)
  136. if inspect.iscoroutinefunction(inlet):
  137. body = await inlet(**params)
  138. else:
  139. body = inlet(**params)
  140. except Exception as e:
  141. print(f"Error: {e}")
  142. raise e
  143. if skip_files and "files" in body.get("metadata", {}):
  144. del body["metadata"]["files"]
  145. return body, {}
  146. async def chat_completion_tools_handler(
  147. request: Request, body: dict, user: UserModel, models, extra_params: dict
  148. ) -> tuple[dict, dict]:
  149. async def get_content_from_response(response) -> Optional[str]:
  150. content = None
  151. if hasattr(response, "body_iterator"):
  152. async for chunk in response.body_iterator:
  153. data = json.loads(chunk.decode("utf-8"))
  154. content = data["choices"][0]["message"]["content"]
  155. # Cleanup any remaining background tasks if necessary
  156. if response.background is not None:
  157. await response.background()
  158. else:
  159. content = response["choices"][0]["message"]["content"]
  160. return content
  161. def get_tools_function_calling_payload(messages, task_model_id, content):
  162. user_message = get_last_user_message(messages)
  163. history = "\n".join(
  164. f"{message['role'].upper()}: \"\"\"{message['content']}\"\"\""
  165. for message in messages[::-1][:4]
  166. )
  167. prompt = f"History:\n{history}\nQuery: {user_message}"
  168. return {
  169. "model": task_model_id,
  170. "messages": [
  171. {"role": "system", "content": content},
  172. {"role": "user", "content": f"Query: {prompt}"},
  173. ],
  174. "stream": False,
  175. "metadata": {"task": str(TASKS.FUNCTION_CALLING)},
  176. }
  177. # If tool_ids field is present, call the functions
  178. metadata = body.get("metadata", {})
  179. tool_ids = metadata.get("tool_ids", None)
  180. log.debug(f"{tool_ids=}")
  181. if not tool_ids:
  182. return body, {}
  183. skip_files = False
  184. sources = []
  185. task_model_id = get_task_model_id(
  186. body["model"],
  187. request.app.state.config.TASK_MODEL,
  188. request.app.state.config.TASK_MODEL_EXTERNAL,
  189. models,
  190. )
  191. tools = get_tools(
  192. request,
  193. tool_ids,
  194. user,
  195. {
  196. **extra_params,
  197. "__model__": models[task_model_id],
  198. "__messages__": body["messages"],
  199. "__files__": metadata.get("files", []),
  200. },
  201. )
  202. log.info(f"{tools=}")
  203. specs = [tool["spec"] for tool in tools.values()]
  204. tools_specs = json.dumps(specs)
  205. if request.app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE != "":
  206. template = request.app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  207. else:
  208. template = DEFAULT_TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  209. tools_function_calling_prompt = tools_function_calling_generation_template(
  210. template, tools_specs
  211. )
  212. log.info(f"{tools_function_calling_prompt=}")
  213. payload = get_tools_function_calling_payload(
  214. body["messages"], task_model_id, tools_function_calling_prompt
  215. )
  216. try:
  217. response = await generate_chat_completion(request, form_data=payload, user=user)
  218. log.debug(f"{response=}")
  219. content = await get_content_from_response(response)
  220. log.debug(f"{content=}")
  221. if not content:
  222. return body, {}
  223. try:
  224. content = content[content.find("{") : content.rfind("}") + 1]
  225. if not content:
  226. raise Exception("No JSON object found in the response")
  227. result = json.loads(content)
  228. async def tool_call_handler(tool_call):
  229. log.debug(f"{tool_call=}")
  230. tool_function_name = tool_call.get("name", None)
  231. if tool_function_name not in tools:
  232. return body, {}
  233. tool_function_params = tool_call.get("parameters", {})
  234. try:
  235. required_params = (
  236. tools[tool_function_name]
  237. .get("spec", {})
  238. .get("parameters", {})
  239. .get("required", [])
  240. )
  241. tool_function = tools[tool_function_name]["callable"]
  242. tool_function_params = {
  243. k: v
  244. for k, v in tool_function_params.items()
  245. if k in required_params
  246. }
  247. tool_output = await tool_function(**tool_function_params)
  248. except Exception as e:
  249. tool_output = str(e)
  250. if isinstance(tool_output, str):
  251. if tools[tool_function_name]["citation"]:
  252. sources.append(
  253. {
  254. "source": {
  255. "name": f"TOOL:{tools[tool_function_name]['toolkit_id']}/{tool_function_name}"
  256. },
  257. "document": [tool_output],
  258. "metadata": [
  259. {
  260. "source": f"TOOL:{tools[tool_function_name]['toolkit_id']}/{tool_function_name}"
  261. }
  262. ],
  263. }
  264. )
  265. else:
  266. sources.append(
  267. {
  268. "source": {},
  269. "document": [tool_output],
  270. "metadata": [
  271. {
  272. "source": f"TOOL:{tools[tool_function_name]['toolkit_id']}/{tool_function_name}"
  273. }
  274. ],
  275. }
  276. )
  277. if tools[tool_function_name]["file_handler"]:
  278. skip_files = True
  279. # check if "tool_calls" in result
  280. if result.get("tool_calls"):
  281. for tool_call in result.get("tool_calls"):
  282. await tool_call_handler(tool_call)
  283. else:
  284. await tool_call_handler(result)
  285. except Exception as e:
  286. log.exception(f"Error: {e}")
  287. content = None
  288. except Exception as e:
  289. log.exception(f"Error: {e}")
  290. content = None
  291. log.debug(f"tool_contexts: {sources}")
  292. if skip_files and "files" in body.get("metadata", {}):
  293. del body["metadata"]["files"]
  294. return body, {"sources": sources}
  295. async def chat_web_search_handler(
  296. request: Request, form_data: dict, extra_params: dict, user
  297. ):
  298. event_emitter = extra_params["__event_emitter__"]
  299. await event_emitter(
  300. {
  301. "type": "status",
  302. "data": {
  303. "action": "web_search",
  304. "description": "Generating search query",
  305. "done": False,
  306. },
  307. }
  308. )
  309. messages = form_data["messages"]
  310. user_message = get_last_user_message(messages)
  311. queries = []
  312. try:
  313. res = await generate_queries(
  314. request,
  315. {
  316. "model": form_data["model"],
  317. "messages": messages,
  318. "prompt": user_message,
  319. "type": "web_search",
  320. },
  321. user,
  322. )
  323. response = res["choices"][0]["message"]["content"]
  324. try:
  325. bracket_start = response.find("{")
  326. bracket_end = response.rfind("}") + 1
  327. if bracket_start == -1 or bracket_end == -1:
  328. raise Exception("No JSON object found in the response")
  329. response = response[bracket_start:bracket_end]
  330. queries = json.loads(response)
  331. queries = queries.get("queries", [])
  332. except Exception as e:
  333. queries = [response]
  334. except Exception as e:
  335. log.exception(e)
  336. queries = [user_message]
  337. if len(queries) == 0:
  338. await event_emitter(
  339. {
  340. "type": "status",
  341. "data": {
  342. "action": "web_search",
  343. "description": "No search query generated",
  344. "done": True,
  345. },
  346. }
  347. )
  348. return form_data
  349. searchQuery = queries[0]
  350. await event_emitter(
  351. {
  352. "type": "status",
  353. "data": {
  354. "action": "web_search",
  355. "description": 'Searching "{{searchQuery}}"',
  356. "query": searchQuery,
  357. "done": False,
  358. },
  359. }
  360. )
  361. try:
  362. # Offload process_web_search to a separate thread
  363. loop = asyncio.get_running_loop()
  364. with ThreadPoolExecutor() as executor:
  365. results = await loop.run_in_executor(
  366. executor,
  367. lambda: process_web_search(
  368. request,
  369. SearchForm(
  370. **{
  371. "query": searchQuery,
  372. }
  373. ),
  374. user,
  375. ),
  376. )
  377. if results:
  378. await event_emitter(
  379. {
  380. "type": "status",
  381. "data": {
  382. "action": "web_search",
  383. "description": "Searched {{count}} sites",
  384. "query": searchQuery,
  385. "urls": results["filenames"],
  386. "done": True,
  387. },
  388. }
  389. )
  390. files = form_data.get("files", [])
  391. files.append(
  392. {
  393. "collection_name": results["collection_name"],
  394. "name": searchQuery,
  395. "type": "web_search_results",
  396. "urls": results["filenames"],
  397. }
  398. )
  399. form_data["files"] = files
  400. else:
  401. await event_emitter(
  402. {
  403. "type": "status",
  404. "data": {
  405. "action": "web_search",
  406. "description": "No search results found",
  407. "query": searchQuery,
  408. "done": True,
  409. "error": True,
  410. },
  411. }
  412. )
  413. except Exception as e:
  414. log.exception(e)
  415. await event_emitter(
  416. {
  417. "type": "status",
  418. "data": {
  419. "action": "web_search",
  420. "description": 'Error searching "{{searchQuery}}"',
  421. "query": searchQuery,
  422. "done": True,
  423. "error": True,
  424. },
  425. }
  426. )
  427. return form_data
  428. async def chat_image_generation_handler(
  429. request: Request, form_data: dict, extra_params: dict, user
  430. ):
  431. __event_emitter__ = extra_params["__event_emitter__"]
  432. await __event_emitter__(
  433. {
  434. "type": "status",
  435. "data": {"description": "Generating an image", "done": False},
  436. }
  437. )
  438. messages = form_data["messages"]
  439. user_message = get_last_user_message(messages)
  440. prompt = user_message
  441. negative_prompt = ""
  442. if request.app.state.config.ENABLE_IMAGE_PROMPT_GENERATION:
  443. try:
  444. res = await generate_image_prompt(
  445. request,
  446. {
  447. "model": form_data["model"],
  448. "messages": messages,
  449. },
  450. user,
  451. )
  452. response = res["choices"][0]["message"]["content"]
  453. try:
  454. bracket_start = response.find("{")
  455. bracket_end = response.rfind("}") + 1
  456. if bracket_start == -1 or bracket_end == -1:
  457. raise Exception("No JSON object found in the response")
  458. response = response[bracket_start:bracket_end]
  459. response = json.loads(response)
  460. prompt = response.get("prompt", [])
  461. except Exception as e:
  462. prompt = user_message
  463. except Exception as e:
  464. log.exception(e)
  465. prompt = user_message
  466. system_message_content = ""
  467. try:
  468. images = await image_generations(
  469. request=request,
  470. form_data=GenerateImageForm(**{"prompt": prompt}),
  471. user=user,
  472. )
  473. await __event_emitter__(
  474. {
  475. "type": "status",
  476. "data": {"description": "Generated an image", "done": True},
  477. }
  478. )
  479. for image in images:
  480. await __event_emitter__(
  481. {
  482. "type": "message",
  483. "data": {"content": f"![Generated Image]({image['url']})\n"},
  484. }
  485. )
  486. system_message_content = "<context>User is shown the generated image, tell the user that the image has been generated</context>"
  487. except Exception as e:
  488. log.exception(e)
  489. await __event_emitter__(
  490. {
  491. "type": "status",
  492. "data": {
  493. "description": f"An error occured while generating an image",
  494. "done": True,
  495. },
  496. }
  497. )
  498. system_message_content = "<context>Unable to generate an image, tell the user that an error occured</context>"
  499. if system_message_content:
  500. form_data["messages"] = add_or_update_system_message(
  501. system_message_content, form_data["messages"]
  502. )
  503. return form_data
  504. async def chat_completion_files_handler(
  505. request: Request, body: dict, user: UserModel
  506. ) -> tuple[dict, dict[str, list]]:
  507. sources = []
  508. if files := body.get("metadata", {}).get("files", None):
  509. try:
  510. queries_response = await generate_queries(
  511. request,
  512. {
  513. "model": body["model"],
  514. "messages": body["messages"],
  515. "type": "retrieval",
  516. },
  517. user,
  518. )
  519. queries_response = queries_response["choices"][0]["message"]["content"]
  520. try:
  521. bracket_start = queries_response.find("{")
  522. bracket_end = queries_response.rfind("}") + 1
  523. if bracket_start == -1 or bracket_end == -1:
  524. raise Exception("No JSON object found in the response")
  525. queries_response = queries_response[bracket_start:bracket_end]
  526. queries_response = json.loads(queries_response)
  527. except Exception as e:
  528. queries_response = {"queries": [queries_response]}
  529. queries = queries_response.get("queries", [])
  530. except Exception as e:
  531. queries = []
  532. if len(queries) == 0:
  533. queries = [get_last_user_message(body["messages"])]
  534. try:
  535. # Offload get_sources_from_files to a separate thread
  536. loop = asyncio.get_running_loop()
  537. with ThreadPoolExecutor() as executor:
  538. sources = await loop.run_in_executor(
  539. executor,
  540. lambda: get_sources_from_files(
  541. files=files,
  542. queries=queries,
  543. embedding_function=request.app.state.EMBEDDING_FUNCTION,
  544. k=request.app.state.config.TOP_K,
  545. reranking_function=request.app.state.rf,
  546. r=request.app.state.config.RELEVANCE_THRESHOLD,
  547. hybrid_search=request.app.state.config.ENABLE_RAG_HYBRID_SEARCH,
  548. ),
  549. )
  550. except Exception as e:
  551. log.exception(e)
  552. log.debug(f"rag_contexts:sources: {sources}")
  553. return body, {"sources": sources}
  554. def apply_params_to_form_data(form_data, model):
  555. params = form_data.pop("params", {})
  556. if model.get("ollama"):
  557. form_data["options"] = params
  558. if "format" in params:
  559. form_data["format"] = params["format"]
  560. if "keep_alive" in params:
  561. form_data["keep_alive"] = params["keep_alive"]
  562. else:
  563. if "seed" in params:
  564. form_data["seed"] = params["seed"]
  565. if "stop" in params:
  566. form_data["stop"] = params["stop"]
  567. if "temperature" in params:
  568. form_data["temperature"] = params["temperature"]
  569. if "max_tokens" in params:
  570. form_data["max_tokens"] = params["max_tokens"]
  571. if "top_p" in params:
  572. form_data["top_p"] = params["top_p"]
  573. if "frequency_penalty" in params:
  574. form_data["frequency_penalty"] = params["frequency_penalty"]
  575. if "reasoning_effort" in params:
  576. form_data["reasoning_effort"] = params["reasoning_effort"]
  577. return form_data
  578. async def process_chat_payload(request, form_data, metadata, user, model):
  579. form_data = apply_params_to_form_data(form_data, model)
  580. log.debug(f"form_data: {form_data}")
  581. event_emitter = get_event_emitter(metadata)
  582. event_call = get_event_call(metadata)
  583. extra_params = {
  584. "__event_emitter__": event_emitter,
  585. "__event_call__": event_call,
  586. "__user__": {
  587. "id": user.id,
  588. "email": user.email,
  589. "name": user.name,
  590. "role": user.role,
  591. },
  592. "__metadata__": metadata,
  593. "__request__": request,
  594. }
  595. # Initialize events to store additional event to be sent to the client
  596. # Initialize contexts and citation
  597. models = request.app.state.MODELS
  598. events = []
  599. sources = []
  600. user_message = get_last_user_message(form_data["messages"])
  601. model_knowledge = model.get("info", {}).get("meta", {}).get("knowledge", False)
  602. if model_knowledge:
  603. await event_emitter(
  604. {
  605. "type": "status",
  606. "data": {
  607. "action": "knowledge_search",
  608. "query": user_message,
  609. "done": False,
  610. },
  611. }
  612. )
  613. knowledge_files = []
  614. for item in model_knowledge:
  615. if item.get("collection_name"):
  616. knowledge_files.append(
  617. {
  618. "id": item.get("collection_name"),
  619. "name": item.get("name"),
  620. "legacy": True,
  621. }
  622. )
  623. elif item.get("collection_names"):
  624. knowledge_files.append(
  625. {
  626. "name": item.get("name"),
  627. "type": "collection",
  628. "collection_names": item.get("collection_names"),
  629. "legacy": True,
  630. }
  631. )
  632. else:
  633. knowledge_files.append(item)
  634. files = form_data.get("files", [])
  635. files.extend(knowledge_files)
  636. form_data["files"] = files
  637. variables = form_data.pop("variables", None)
  638. features = form_data.pop("features", None)
  639. if features:
  640. if "web_search" in features and features["web_search"]:
  641. form_data = await chat_web_search_handler(
  642. request, form_data, extra_params, user
  643. )
  644. if "image_generation" in features and features["image_generation"]:
  645. form_data = await chat_image_generation_handler(
  646. request, form_data, extra_params, user
  647. )
  648. if "code_interpreter" in features and features["code_interpreter"]:
  649. form_data["messages"] = add_or_update_user_message(
  650. DEFAULT_CODE_INTERPRETER_PROMPT, form_data["messages"]
  651. )
  652. try:
  653. form_data, flags = await chat_completion_filter_functions_handler(
  654. request, form_data, model, extra_params
  655. )
  656. except Exception as e:
  657. raise Exception(f"Error: {e}")
  658. tool_ids = form_data.pop("tool_ids", None)
  659. files = form_data.pop("files", None)
  660. # Remove files duplicates
  661. if files:
  662. files = list({json.dumps(f, sort_keys=True): f for f in files}.values())
  663. metadata = {
  664. **metadata,
  665. "tool_ids": tool_ids,
  666. "files": files,
  667. }
  668. form_data["metadata"] = metadata
  669. if not form_data["metadata"].get("function_calling") == "native":
  670. # If the function calling is not native, then call the tools function calling handler
  671. try:
  672. form_data, flags = await chat_completion_tools_handler(
  673. request, form_data, user, models, extra_params
  674. )
  675. sources.extend(flags.get("sources", []))
  676. except Exception as e:
  677. log.exception(e)
  678. try:
  679. form_data, flags = await chat_completion_files_handler(request, form_data, user)
  680. sources.extend(flags.get("sources", []))
  681. except Exception as e:
  682. log.exception(e)
  683. # If context is not empty, insert it into the messages
  684. if len(sources) > 0:
  685. context_string = ""
  686. for source_idx, source in enumerate(sources):
  687. source_id = source.get("source", {}).get("name", "")
  688. if "document" in source:
  689. for doc_idx, doc_context in enumerate(source["document"]):
  690. metadata = source.get("metadata")
  691. doc_source_id = None
  692. if metadata:
  693. doc_source_id = metadata[doc_idx].get("source", source_id)
  694. if source_id:
  695. context_string += f"<source><source_id>{doc_source_id if doc_source_id is not None else source_id}</source_id><source_context>{doc_context}</source_context></source>\n"
  696. else:
  697. # If there is no source_id, then do not include the source_id tag
  698. context_string += f"<source><source_context>{doc_context}</source_context></source>\n"
  699. context_string = context_string.strip()
  700. prompt = get_last_user_message(form_data["messages"])
  701. if prompt is None:
  702. raise Exception("No user message found")
  703. if (
  704. request.app.state.config.RELEVANCE_THRESHOLD == 0
  705. and context_string.strip() == ""
  706. ):
  707. log.debug(
  708. f"With a 0 relevancy threshold for RAG, the context cannot be empty"
  709. )
  710. # Workaround for Ollama 2.0+ system prompt issue
  711. # TODO: replace with add_or_update_system_message
  712. if model["owned_by"] == "ollama":
  713. form_data["messages"] = prepend_to_first_user_message_content(
  714. rag_template(
  715. request.app.state.config.RAG_TEMPLATE, context_string, prompt
  716. ),
  717. form_data["messages"],
  718. )
  719. else:
  720. form_data["messages"] = add_or_update_system_message(
  721. rag_template(
  722. request.app.state.config.RAG_TEMPLATE, context_string, prompt
  723. ),
  724. form_data["messages"],
  725. )
  726. # If there are citations, add them to the data_items
  727. sources = [source for source in sources if source.get("source", {}).get("name", "")]
  728. if len(sources) > 0:
  729. events.append({"sources": sources})
  730. if model_knowledge:
  731. await event_emitter(
  732. {
  733. "type": "status",
  734. "data": {
  735. "action": "knowledge_search",
  736. "query": user_message,
  737. "done": True,
  738. "hidden": True,
  739. },
  740. }
  741. )
  742. return form_data, events
  743. async def process_chat_response(
  744. request, response, form_data, user, events, metadata, tasks
  745. ):
  746. async def background_tasks_handler():
  747. message_map = Chats.get_messages_by_chat_id(metadata["chat_id"])
  748. message = message_map.get(metadata["message_id"]) if message_map else None
  749. if message:
  750. messages = get_message_list(message_map, message.get("id"))
  751. if tasks and messages:
  752. if TASKS.TITLE_GENERATION in tasks:
  753. if tasks[TASKS.TITLE_GENERATION]:
  754. res = await generate_title(
  755. request,
  756. {
  757. "model": message["model"],
  758. "messages": messages,
  759. "chat_id": metadata["chat_id"],
  760. },
  761. user,
  762. )
  763. if res and isinstance(res, dict):
  764. if len(res.get("choices", [])) == 1:
  765. title_string = (
  766. res.get("choices", [])[0]
  767. .get("message", {})
  768. .get("content", message.get("content", "New Chat"))
  769. )
  770. else:
  771. title_string = ""
  772. title_string = title_string[
  773. title_string.find("{") : title_string.rfind("}") + 1
  774. ]
  775. try:
  776. title = json.loads(title_string).get(
  777. "title", "New Chat"
  778. )
  779. except Exception as e:
  780. title = ""
  781. if not title:
  782. title = messages[0].get("content", "New Chat")
  783. Chats.update_chat_title_by_id(metadata["chat_id"], title)
  784. await event_emitter(
  785. {
  786. "type": "chat:title",
  787. "data": title,
  788. }
  789. )
  790. elif len(messages) == 2:
  791. title = messages[0].get("content", "New Chat")
  792. Chats.update_chat_title_by_id(metadata["chat_id"], title)
  793. await event_emitter(
  794. {
  795. "type": "chat:title",
  796. "data": message.get("content", "New Chat"),
  797. }
  798. )
  799. if TASKS.TAGS_GENERATION in tasks and tasks[TASKS.TAGS_GENERATION]:
  800. res = await generate_chat_tags(
  801. request,
  802. {
  803. "model": message["model"],
  804. "messages": messages,
  805. "chat_id": metadata["chat_id"],
  806. },
  807. user,
  808. )
  809. if res and isinstance(res, dict):
  810. if len(res.get("choices", [])) == 1:
  811. tags_string = (
  812. res.get("choices", [])[0]
  813. .get("message", {})
  814. .get("content", "")
  815. )
  816. else:
  817. tags_string = ""
  818. tags_string = tags_string[
  819. tags_string.find("{") : tags_string.rfind("}") + 1
  820. ]
  821. try:
  822. tags = json.loads(tags_string).get("tags", [])
  823. Chats.update_chat_tags_by_id(
  824. metadata["chat_id"], tags, user
  825. )
  826. await event_emitter(
  827. {
  828. "type": "chat:tags",
  829. "data": tags,
  830. }
  831. )
  832. except Exception as e:
  833. pass
  834. event_emitter = None
  835. event_caller = None
  836. if (
  837. "session_id" in metadata
  838. and metadata["session_id"]
  839. and "chat_id" in metadata
  840. and metadata["chat_id"]
  841. and "message_id" in metadata
  842. and metadata["message_id"]
  843. ):
  844. event_emitter = get_event_emitter(metadata)
  845. event_caller = get_event_call(metadata)
  846. # Non-streaming response
  847. if not isinstance(response, StreamingResponse):
  848. if event_emitter:
  849. if "selected_model_id" in response:
  850. Chats.upsert_message_to_chat_by_id_and_message_id(
  851. metadata["chat_id"],
  852. metadata["message_id"],
  853. {
  854. "selectedModelId": response["selected_model_id"],
  855. },
  856. )
  857. if response.get("choices", [])[0].get("message", {}).get("content"):
  858. content = response["choices"][0]["message"]["content"]
  859. if content:
  860. await event_emitter(
  861. {
  862. "type": "chat:completion",
  863. "data": response,
  864. }
  865. )
  866. title = Chats.get_chat_title_by_id(metadata["chat_id"])
  867. await event_emitter(
  868. {
  869. "type": "chat:completion",
  870. "data": {
  871. "done": True,
  872. "content": content,
  873. "title": title,
  874. },
  875. }
  876. )
  877. # Save message in the database
  878. Chats.upsert_message_to_chat_by_id_and_message_id(
  879. metadata["chat_id"],
  880. metadata["message_id"],
  881. {
  882. "content": content,
  883. },
  884. )
  885. # Send a webhook notification if the user is not active
  886. if get_active_status_by_user_id(user.id) is None:
  887. webhook_url = Users.get_user_webhook_url_by_id(user.id)
  888. if webhook_url:
  889. post_webhook(
  890. webhook_url,
  891. f"{title} - {request.app.state.config.WEBUI_URL}/c/{metadata['chat_id']}\n\n{content}",
  892. {
  893. "action": "chat",
  894. "message": content,
  895. "title": title,
  896. "url": f"{request.app.state.config.WEBUI_URL}/c/{metadata['chat_id']}",
  897. },
  898. )
  899. await background_tasks_handler()
  900. return response
  901. else:
  902. return response
  903. # Non standard response
  904. if not any(
  905. content_type in response.headers["Content-Type"]
  906. for content_type in ["text/event-stream", "application/x-ndjson"]
  907. ):
  908. return response
  909. # Streaming response
  910. if event_emitter and event_caller:
  911. task_id = str(uuid4()) # Create a unique task ID.
  912. model_id = form_data.get("model", "")
  913. Chats.upsert_message_to_chat_by_id_and_message_id(
  914. metadata["chat_id"],
  915. metadata["message_id"],
  916. {
  917. "model": model_id,
  918. },
  919. )
  920. # Handle as a background task
  921. async def post_response_handler(response, events):
  922. def serialize_content_blocks(content_blocks, raw=False):
  923. content = ""
  924. for block in content_blocks:
  925. if block["type"] == "text":
  926. content = f"{content}{block['content'].strip()}\n"
  927. elif block["type"] == "reasoning":
  928. reasoning_display_content = "\n".join(
  929. (f"> {line}" if not line.startswith(">") else line)
  930. for line in block["content"].splitlines()
  931. )
  932. reasoning_duration = block.get("duration", None)
  933. if reasoning_duration:
  934. if raw:
  935. content = f'{content}\n<{block["tag"]}>{block["content"]}</{block["tag"]}>\n'
  936. else:
  937. content = f'{content}\n<details type="reasoning" done="true" duration="{reasoning_duration}">\n<summary>Thought for {reasoning_duration} seconds</summary>\n{reasoning_display_content}\n</details>\n'
  938. else:
  939. if raw:
  940. content = f'{content}\n<{block["tag"]}>{block["content"]}</{block["tag"]}>\n'
  941. else:
  942. content = f'{content}\n<details type="reasoning" done="false">\n<summary>Thinking…</summary>\n{reasoning_display_content}\n</details>\n'
  943. elif block["type"] == "code_interpreter":
  944. attributes = block.get("attributes", {})
  945. output = block.get("output", None)
  946. lang = attributes.get("lang", "")
  947. if output:
  948. output = html.escape(json.dumps(output))
  949. if raw:
  950. content = f'{content}\n<code_interpreter type="code" lang="{lang}">\n{block["content"]}\n</code_interpreter>\n```output\n{output}\n```\n'
  951. else:
  952. content = f'{content}\n<details type="code_interpreter" done="true" output="{output}">\n<summary>Analyzed</summary>\n```{lang}\n{block["content"]}\n```\n</details>\n'
  953. else:
  954. if raw:
  955. content = f'{content}\n<code_interpreter type="code" lang="{lang}">\n{block["content"]}\n</code_interpreter>\n'
  956. else:
  957. content = f'{content}\n<details type="code_interpreter" done="false">\n<summary>Analyzing...</summary>\n```{lang}\n{block["content"]}\n```\n</details>\n'
  958. else:
  959. block_content = str(block["content"]).strip()
  960. content = f"{content}{block['type']}: {block_content}\n"
  961. return content
  962. def tag_content_handler(content_type, tags, content, content_blocks):
  963. end_flag = False
  964. def extract_attributes(tag_content):
  965. """Extract attributes from a tag if they exist."""
  966. attributes = {}
  967. # Match attributes in the format: key="value" (ignores single quotes for simplicity)
  968. matches = re.findall(r'(\w+)\s*=\s*"([^"]+)"', tag_content)
  969. for key, value in matches:
  970. attributes[key] = value
  971. return attributes
  972. if content_blocks[-1]["type"] == "text":
  973. for tag in tags:
  974. # Match start tag e.g., <tag> or <tag attr="value">
  975. start_tag_pattern = rf"<{tag}(.*?)>"
  976. match = re.search(start_tag_pattern, content)
  977. if match:
  978. # Extract attributes in the tag (if present)
  979. attributes = extract_attributes(match.group(1))
  980. # Remove the start tag from the currently handling text block
  981. content_blocks[-1]["content"] = content_blocks[-1][
  982. "content"
  983. ].replace(match.group(0), "")
  984. if not content_blocks[-1]["content"]:
  985. content_blocks.pop()
  986. # Append the new block
  987. content_blocks.append(
  988. {
  989. "type": content_type,
  990. "tag": tag,
  991. "attributes": attributes,
  992. "content": "",
  993. "started_at": time.time(),
  994. }
  995. )
  996. break
  997. elif content_blocks[-1]["type"] == content_type:
  998. tag = content_blocks[-1]["tag"]
  999. # Match end tag e.g., </tag>
  1000. end_tag_pattern = rf"</{tag}>"
  1001. if re.search(end_tag_pattern, content):
  1002. block_content = content_blocks[-1]["content"]
  1003. # Strip start and end tags from the content
  1004. start_tag_pattern = rf"<{tag}(.*?)>"
  1005. block_content = re.sub(
  1006. start_tag_pattern, "", block_content
  1007. ).strip()
  1008. block_content = re.sub(
  1009. end_tag_pattern, "", block_content
  1010. ).strip()
  1011. if block_content:
  1012. end_flag = True
  1013. content_blocks[-1]["content"] = block_content
  1014. content_blocks[-1]["ended_at"] = time.time()
  1015. content_blocks[-1]["duration"] = int(
  1016. content_blocks[-1]["ended_at"]
  1017. - content_blocks[-1]["started_at"]
  1018. )
  1019. # Reset the content_blocks by appending a new text block
  1020. content_blocks.append(
  1021. {
  1022. "type": "text",
  1023. "content": "",
  1024. }
  1025. )
  1026. # Clean processed content
  1027. content = re.sub(
  1028. rf"<{tag}(.*?)>(.|\n)*?</{tag}>",
  1029. "",
  1030. content,
  1031. flags=re.DOTALL,
  1032. )
  1033. else:
  1034. # Remove the block if content is empty
  1035. content_blocks.pop()
  1036. return content, content_blocks, end_flag
  1037. message = Chats.get_message_by_id_and_message_id(
  1038. metadata["chat_id"], metadata["message_id"]
  1039. )
  1040. content = message.get("content", "") if message else ""
  1041. content_blocks = [
  1042. {
  1043. "type": "text",
  1044. "content": content,
  1045. }
  1046. ]
  1047. # We might want to disable this by default
  1048. DETECT_REASONING = True
  1049. DETECT_CODE_INTERPRETER = metadata.get("features", {}).get(
  1050. "code_interpreter", False
  1051. )
  1052. reasoning_tags = ["think", "reason", "reasoning", "thought", "Thought"]
  1053. code_interpreter_tags = ["code_interpreter"]
  1054. try:
  1055. for event in events:
  1056. await event_emitter(
  1057. {
  1058. "type": "chat:completion",
  1059. "data": event,
  1060. }
  1061. )
  1062. # Save message in the database
  1063. Chats.upsert_message_to_chat_by_id_and_message_id(
  1064. metadata["chat_id"],
  1065. metadata["message_id"],
  1066. {
  1067. **event,
  1068. },
  1069. )
  1070. async def stream_body_handler(response):
  1071. nonlocal content
  1072. nonlocal content_blocks
  1073. async for line in response.body_iterator:
  1074. line = line.decode("utf-8") if isinstance(line, bytes) else line
  1075. data = line
  1076. # Skip empty lines
  1077. if not data.strip():
  1078. continue
  1079. # "data:" is the prefix for each event
  1080. if not data.startswith("data:"):
  1081. continue
  1082. # Remove the prefix
  1083. data = data[len("data:") :].strip()
  1084. try:
  1085. data = json.loads(data)
  1086. if "selected_model_id" in data:
  1087. model_id = data["selected_model_id"]
  1088. Chats.upsert_message_to_chat_by_id_and_message_id(
  1089. metadata["chat_id"],
  1090. metadata["message_id"],
  1091. {
  1092. "selectedModelId": model_id,
  1093. },
  1094. )
  1095. else:
  1096. choices = data.get("choices", [])
  1097. if not choices:
  1098. continue
  1099. value = choices[0].get("delta", {}).get("content")
  1100. if value:
  1101. content = f"{content}{value}"
  1102. content_blocks[-1]["content"] = (
  1103. content_blocks[-1]["content"] + value
  1104. )
  1105. if DETECT_REASONING:
  1106. content, content_blocks, _ = (
  1107. tag_content_handler(
  1108. "reasoning",
  1109. reasoning_tags,
  1110. content,
  1111. content_blocks,
  1112. )
  1113. )
  1114. if DETECT_CODE_INTERPRETER:
  1115. content, content_blocks, end = (
  1116. tag_content_handler(
  1117. "code_interpreter",
  1118. code_interpreter_tags,
  1119. content,
  1120. content_blocks,
  1121. )
  1122. )
  1123. if end:
  1124. break
  1125. if ENABLE_REALTIME_CHAT_SAVE:
  1126. # Save message in the database
  1127. Chats.upsert_message_to_chat_by_id_and_message_id(
  1128. metadata["chat_id"],
  1129. metadata["message_id"],
  1130. {
  1131. "content": serialize_content_blocks(
  1132. content_blocks
  1133. ),
  1134. },
  1135. )
  1136. else:
  1137. data = {
  1138. "content": serialize_content_blocks(
  1139. content_blocks
  1140. ),
  1141. }
  1142. await event_emitter(
  1143. {
  1144. "type": "chat:completion",
  1145. "data": data,
  1146. }
  1147. )
  1148. except Exception as e:
  1149. done = "data: [DONE]" in line
  1150. if done:
  1151. pass
  1152. else:
  1153. log.debug("Error: ", e)
  1154. continue
  1155. # Clean up the last text block
  1156. if content_blocks[-1]["type"] == "text":
  1157. content_blocks[-1]["content"] = content_blocks[-1][
  1158. "content"
  1159. ].strip()
  1160. if not content_blocks[-1]["content"]:
  1161. content_blocks.pop()
  1162. await event_emitter(
  1163. {
  1164. "type": "chat:completion",
  1165. "data": {
  1166. "content": serialize_content_blocks(content_blocks),
  1167. },
  1168. }
  1169. )
  1170. if response.background:
  1171. await response.background()
  1172. await stream_body_handler(response)
  1173. if DETECT_CODE_INTERPRETER:
  1174. MAX_RETRIES = 5
  1175. retries = 0
  1176. while (
  1177. content_blocks[-1]["type"] == "code_interpreter"
  1178. and retries < MAX_RETRIES
  1179. ):
  1180. retries += 1
  1181. log.debug(f"Attempt count: {retries}")
  1182. output = ""
  1183. try:
  1184. if content_blocks[-1]["attributes"].get("type") == "code":
  1185. output = await event_caller(
  1186. {
  1187. "type": "execute:python",
  1188. "data": {
  1189. "id": str(uuid4()),
  1190. "code": content_blocks[-1]["content"],
  1191. },
  1192. }
  1193. )
  1194. if isinstance(output, dict):
  1195. stdout = output.get("stdout", "")
  1196. if stdout:
  1197. stdoutLines = stdout.split("\n")
  1198. for idx, line in enumerate(stdoutLines):
  1199. if "data:image/png;base64" in line:
  1200. id = str(uuid4())
  1201. # ensure the path exists
  1202. os.makedirs(
  1203. os.path.join(CACHE_DIR, "images"),
  1204. exist_ok=True,
  1205. )
  1206. image_path = os.path.join(
  1207. CACHE_DIR,
  1208. f"images/{id}.png",
  1209. )
  1210. with open(image_path, "wb") as f:
  1211. f.write(
  1212. base64.b64decode(
  1213. line.split(",")[1]
  1214. )
  1215. )
  1216. stdoutLines[idx] = (
  1217. f"![Output Image {idx}](/cache/images/{id}.png)"
  1218. )
  1219. output["stdout"] = "\n".join(stdoutLines)
  1220. except Exception as e:
  1221. output = str(e)
  1222. content_blocks[-1]["output"] = output
  1223. content_blocks.append(
  1224. {
  1225. "type": "text",
  1226. "content": "",
  1227. }
  1228. )
  1229. await event_emitter(
  1230. {
  1231. "type": "chat:completion",
  1232. "data": {
  1233. "content": serialize_content_blocks(content_blocks),
  1234. },
  1235. }
  1236. )
  1237. try:
  1238. res = await generate_chat_completion(
  1239. request,
  1240. {
  1241. "model": model_id,
  1242. "stream": True,
  1243. "messages": [
  1244. *form_data["messages"],
  1245. {
  1246. "role": "assistant",
  1247. "content": serialize_content_blocks(
  1248. content_blocks, raw=True
  1249. ),
  1250. },
  1251. ],
  1252. },
  1253. user,
  1254. )
  1255. if isinstance(res, StreamingResponse):
  1256. await stream_body_handler(res)
  1257. else:
  1258. break
  1259. except Exception as e:
  1260. log.debug(e)
  1261. break
  1262. title = Chats.get_chat_title_by_id(metadata["chat_id"])
  1263. data = {
  1264. "done": True,
  1265. "content": serialize_content_blocks(content_blocks),
  1266. "title": title,
  1267. }
  1268. if not ENABLE_REALTIME_CHAT_SAVE:
  1269. # Save message in the database
  1270. Chats.upsert_message_to_chat_by_id_and_message_id(
  1271. metadata["chat_id"],
  1272. metadata["message_id"],
  1273. {
  1274. "content": serialize_content_blocks(content_blocks),
  1275. },
  1276. )
  1277. # Send a webhook notification if the user is not active
  1278. if get_active_status_by_user_id(user.id) is None:
  1279. webhook_url = Users.get_user_webhook_url_by_id(user.id)
  1280. if webhook_url:
  1281. post_webhook(
  1282. webhook_url,
  1283. f"{title} - {request.app.state.config.WEBUI_URL}/c/{metadata['chat_id']}\n\n{content}",
  1284. {
  1285. "action": "chat",
  1286. "message": content,
  1287. "title": title,
  1288. "url": f"{request.app.state.config.WEBUI_URL}/c/{metadata['chat_id']}",
  1289. },
  1290. )
  1291. await event_emitter(
  1292. {
  1293. "type": "chat:completion",
  1294. "data": data,
  1295. }
  1296. )
  1297. await background_tasks_handler()
  1298. except asyncio.CancelledError:
  1299. print("Task was cancelled!")
  1300. await event_emitter({"type": "task-cancelled"})
  1301. if not ENABLE_REALTIME_CHAT_SAVE:
  1302. # Save message in the database
  1303. Chats.upsert_message_to_chat_by_id_and_message_id(
  1304. metadata["chat_id"],
  1305. metadata["message_id"],
  1306. {
  1307. "content": serialize_content_blocks(content_blocks),
  1308. },
  1309. )
  1310. if response.background is not None:
  1311. await response.background()
  1312. # background_tasks.add_task(post_response_handler, response, events)
  1313. task_id, _ = create_task(post_response_handler(response, events))
  1314. return {"status": True, "task_id": task_id}
  1315. else:
  1316. # Fallback to the original response
  1317. async def stream_wrapper(original_generator, events):
  1318. def wrap_item(item):
  1319. return f"data: {item}\n\n"
  1320. for event in events:
  1321. yield wrap_item(json.dumps(event))
  1322. async for data in original_generator:
  1323. yield data
  1324. return StreamingResponse(
  1325. stream_wrapper(response.body_iterator, events),
  1326. headers=dict(response.headers),
  1327. background=response.background,
  1328. )