main.py 72 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264
  1. import base64
  2. import inspect
  3. import json
  4. import logging
  5. import mimetypes
  6. import os
  7. import shutil
  8. import sys
  9. import time
  10. import uuid
  11. import asyncio
  12. from contextlib import asynccontextmanager
  13. from typing import Optional
  14. import aiohttp
  15. import requests
  16. from open_webui.apps.audio.main import app as audio_app
  17. from open_webui.apps.images.main import app as images_app
  18. from open_webui.apps.ollama.main import app as ollama_app
  19. from open_webui.apps.ollama.main import (
  20. GenerateChatCompletionForm,
  21. generate_chat_completion as generate_ollama_chat_completion,
  22. generate_openai_chat_completion as generate_ollama_openai_chat_completion,
  23. )
  24. from open_webui.apps.ollama.main import get_all_models as get_ollama_models
  25. from open_webui.apps.openai.main import app as openai_app
  26. from open_webui.apps.openai.main import (
  27. generate_chat_completion as generate_openai_chat_completion,
  28. )
  29. from open_webui.apps.openai.main import get_all_models as get_openai_models
  30. from open_webui.apps.rag.main import app as rag_app
  31. from open_webui.apps.rag.utils import get_rag_context, rag_template
  32. from open_webui.apps.socket.main import app as socket_app, periodic_usage_pool_cleanup
  33. from open_webui.apps.socket.main import get_event_call, get_event_emitter
  34. from open_webui.apps.webui.internal.db import Session
  35. from open_webui.apps.webui.main import app as webui_app
  36. from open_webui.apps.webui.main import (
  37. generate_function_chat_completion,
  38. get_pipe_models,
  39. )
  40. from open_webui.apps.webui.models.auths import Auths
  41. from open_webui.apps.webui.models.functions import Functions
  42. from open_webui.apps.webui.models.models import Models
  43. from open_webui.apps.webui.models.users import UserModel, Users
  44. from open_webui.apps.webui.utils import load_function_module_by_id
  45. from open_webui.config import (
  46. CACHE_DIR,
  47. CORS_ALLOW_ORIGIN,
  48. DEFAULT_LOCALE,
  49. ENABLE_ADMIN_CHAT_ACCESS,
  50. ENABLE_ADMIN_EXPORT,
  51. ENABLE_MODEL_FILTER,
  52. ENABLE_OAUTH_SIGNUP,
  53. ENABLE_OLLAMA_API,
  54. ENABLE_OPENAI_API,
  55. ENV,
  56. FRONTEND_BUILD_DIR,
  57. MODEL_FILTER_LIST,
  58. OAUTH_MERGE_ACCOUNTS_BY_EMAIL,
  59. OAUTH_PROVIDERS,
  60. ENABLE_SEARCH_QUERY,
  61. SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE,
  62. STATIC_DIR,
  63. TASK_MODEL,
  64. TASK_MODEL_EXTERNAL,
  65. TITLE_GENERATION_PROMPT_TEMPLATE,
  66. TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  67. WEBHOOK_URL,
  68. WEBUI_AUTH,
  69. WEBUI_NAME,
  70. AppConfig,
  71. run_migrations,
  72. reset_config,
  73. )
  74. from open_webui.constants import ERROR_MESSAGES, TASKS, WEBHOOK_MESSAGES
  75. from open_webui.env import (
  76. CHANGELOG,
  77. GLOBAL_LOG_LEVEL,
  78. SAFE_MODE,
  79. SRC_LOG_LEVELS,
  80. VERSION,
  81. WEBUI_BUILD_HASH,
  82. WEBUI_SECRET_KEY,
  83. WEBUI_SESSION_COOKIE_SAME_SITE,
  84. WEBUI_SESSION_COOKIE_SECURE,
  85. WEBUI_URL,
  86. RESET_CONFIG_ON_START,
  87. )
  88. from fastapi import (
  89. Depends,
  90. FastAPI,
  91. File,
  92. Form,
  93. HTTPException,
  94. Request,
  95. UploadFile,
  96. status,
  97. )
  98. from fastapi.middleware.cors import CORSMiddleware
  99. from fastapi.responses import JSONResponse
  100. from fastapi.staticfiles import StaticFiles
  101. from pydantic import BaseModel
  102. from sqlalchemy import text
  103. from starlette.exceptions import HTTPException as StarletteHTTPException
  104. from starlette.middleware.base import BaseHTTPMiddleware
  105. from starlette.middleware.sessions import SessionMiddleware
  106. from starlette.responses import RedirectResponse, Response, StreamingResponse
  107. from open_webui.utils.security_headers import SecurityHeadersMiddleware
  108. from open_webui.utils.misc import (
  109. add_or_update_system_message,
  110. get_last_user_message,
  111. parse_duration,
  112. prepend_to_first_user_message_content,
  113. )
  114. from open_webui.utils.task import (
  115. moa_response_generation_template,
  116. search_query_generation_template,
  117. title_generation_template,
  118. tools_function_calling_generation_template,
  119. )
  120. from open_webui.utils.tools import get_tools
  121. from open_webui.utils.utils import (
  122. create_token,
  123. decode_token,
  124. get_admin_user,
  125. get_current_user,
  126. get_http_authorization_cred,
  127. get_password_hash,
  128. get_verified_user,
  129. )
  130. from open_webui.utils.webhook import post_webhook
  131. from open_webui.utils.payload import convert_payload_openai_to_ollama
  132. from open_webui.utils.response import (
  133. convert_response_ollama_to_openai,
  134. convert_streaming_response_ollama_to_openai,
  135. )
  136. from open_webui.utils.oauth import oauth_manager
  137. if SAFE_MODE:
  138. print("SAFE MODE ENABLED")
  139. Functions.deactivate_all_functions()
  140. logging.basicConfig(stream=sys.stdout, level=GLOBAL_LOG_LEVEL)
  141. log = logging.getLogger(__name__)
  142. log.setLevel(SRC_LOG_LEVELS["MAIN"])
  143. class SPAStaticFiles(StaticFiles):
  144. async def get_response(self, path: str, scope):
  145. try:
  146. return await super().get_response(path, scope)
  147. except (HTTPException, StarletteHTTPException) as ex:
  148. if ex.status_code == 404:
  149. return await super().get_response("index.html", scope)
  150. else:
  151. raise ex
  152. print(
  153. rf"""
  154. ___ __ __ _ _ _ ___
  155. / _ \ _ __ ___ _ __ \ \ / /__| |__ | | | |_ _|
  156. | | | | '_ \ / _ \ '_ \ \ \ /\ / / _ \ '_ \| | | || |
  157. | |_| | |_) | __/ | | | \ V V / __/ |_) | |_| || |
  158. \___/| .__/ \___|_| |_| \_/\_/ \___|_.__/ \___/|___|
  159. |_|
  160. v{VERSION} - building the best open-source AI user interface.
  161. {f"Commit: {WEBUI_BUILD_HASH}" if WEBUI_BUILD_HASH != "dev-build" else ""}
  162. https://github.com/open-webui/open-webui
  163. """
  164. )
  165. @asynccontextmanager
  166. async def lifespan(app: FastAPI):
  167. run_migrations()
  168. if RESET_CONFIG_ON_START:
  169. reset_config()
  170. asyncio.create_task(periodic_usage_pool_cleanup())
  171. yield
  172. app = FastAPI(
  173. docs_url="/docs" if ENV == "dev" else None, redoc_url=None, lifespan=lifespan
  174. )
  175. app.state.config = AppConfig()
  176. app.state.config.ENABLE_OPENAI_API = ENABLE_OPENAI_API
  177. app.state.config.ENABLE_OLLAMA_API = ENABLE_OLLAMA_API
  178. app.state.config.ENABLE_MODEL_FILTER = ENABLE_MODEL_FILTER
  179. app.state.config.MODEL_FILTER_LIST = MODEL_FILTER_LIST
  180. app.state.config.WEBHOOK_URL = WEBHOOK_URL
  181. app.state.config.TASK_MODEL = TASK_MODEL
  182. app.state.config.TASK_MODEL_EXTERNAL = TASK_MODEL_EXTERNAL
  183. app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE = TITLE_GENERATION_PROMPT_TEMPLATE
  184. app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE = (
  185. SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE
  186. )
  187. app.state.config.ENABLE_SEARCH_QUERY = ENABLE_SEARCH_QUERY
  188. app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE = (
  189. TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  190. )
  191. app.state.MODELS = {}
  192. ##################################
  193. #
  194. # ChatCompletion Middleware
  195. #
  196. ##################################
  197. def get_task_model_id(default_model_id):
  198. # Set the task model
  199. task_model_id = default_model_id
  200. # Check if the user has a custom task model and use that model
  201. if app.state.MODELS[task_model_id]["owned_by"] == "ollama":
  202. if (
  203. app.state.config.TASK_MODEL
  204. and app.state.config.TASK_MODEL in app.state.MODELS
  205. ):
  206. task_model_id = app.state.config.TASK_MODEL
  207. else:
  208. if (
  209. app.state.config.TASK_MODEL_EXTERNAL
  210. and app.state.config.TASK_MODEL_EXTERNAL in app.state.MODELS
  211. ):
  212. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  213. return task_model_id
  214. def get_filter_function_ids(model):
  215. def get_priority(function_id):
  216. function = Functions.get_function_by_id(function_id)
  217. if function is not None and hasattr(function, "valves"):
  218. # TODO: Fix FunctionModel
  219. return (function.valves if function.valves else {}).get("priority", 0)
  220. return 0
  221. filter_ids = [function.id for function in Functions.get_global_filter_functions()]
  222. if "info" in model and "meta" in model["info"]:
  223. filter_ids.extend(model["info"]["meta"].get("filterIds", []))
  224. filter_ids = list(set(filter_ids))
  225. enabled_filter_ids = [
  226. function.id
  227. for function in Functions.get_functions_by_type("filter", active_only=True)
  228. ]
  229. filter_ids = [
  230. filter_id for filter_id in filter_ids if filter_id in enabled_filter_ids
  231. ]
  232. filter_ids.sort(key=get_priority)
  233. return filter_ids
  234. async def chat_completion_filter_functions_handler(body, model, extra_params):
  235. skip_files = None
  236. filter_ids = get_filter_function_ids(model)
  237. for filter_id in filter_ids:
  238. filter = Functions.get_function_by_id(filter_id)
  239. if not filter:
  240. continue
  241. if filter_id in webui_app.state.FUNCTIONS:
  242. function_module = webui_app.state.FUNCTIONS[filter_id]
  243. else:
  244. function_module, _, _ = load_function_module_by_id(filter_id)
  245. webui_app.state.FUNCTIONS[filter_id] = function_module
  246. # Check if the function has a file_handler variable
  247. if hasattr(function_module, "file_handler"):
  248. skip_files = function_module.file_handler
  249. if hasattr(function_module, "valves") and hasattr(function_module, "Valves"):
  250. valves = Functions.get_function_valves_by_id(filter_id)
  251. function_module.valves = function_module.Valves(
  252. **(valves if valves else {})
  253. )
  254. if not hasattr(function_module, "inlet"):
  255. continue
  256. try:
  257. inlet = function_module.inlet
  258. # Get the signature of the function
  259. sig = inspect.signature(inlet)
  260. params = {"body": body} | {
  261. k: v
  262. for k, v in {
  263. **extra_params,
  264. "__model__": model,
  265. "__id__": filter_id,
  266. }.items()
  267. if k in sig.parameters
  268. }
  269. if "__user__" in params and hasattr(function_module, "UserValves"):
  270. try:
  271. params["__user__"]["valves"] = function_module.UserValves(
  272. **Functions.get_user_valves_by_id_and_user_id(
  273. filter_id, params["__user__"]["id"]
  274. )
  275. )
  276. except Exception as e:
  277. print(e)
  278. if inspect.iscoroutinefunction(inlet):
  279. body = await inlet(**params)
  280. else:
  281. body = inlet(**params)
  282. except Exception as e:
  283. print(f"Error: {e}")
  284. raise e
  285. if skip_files and "files" in body.get("metadata", {}):
  286. del body["metadata"]["files"]
  287. return body, {}
  288. def get_tools_function_calling_payload(messages, task_model_id, content):
  289. user_message = get_last_user_message(messages)
  290. history = "\n".join(
  291. f"{message['role'].upper()}: \"\"\"{message['content']}\"\"\""
  292. for message in messages[::-1][:4]
  293. )
  294. prompt = f"History:\n{history}\nQuery: {user_message}"
  295. return {
  296. "model": task_model_id,
  297. "messages": [
  298. {"role": "system", "content": content},
  299. {"role": "user", "content": f"Query: {prompt}"},
  300. ],
  301. "stream": False,
  302. "metadata": {"task": str(TASKS.FUNCTION_CALLING)},
  303. }
  304. async def get_content_from_response(response) -> Optional[str]:
  305. content = None
  306. if hasattr(response, "body_iterator"):
  307. async for chunk in response.body_iterator:
  308. data = json.loads(chunk.decode("utf-8"))
  309. content = data["choices"][0]["message"]["content"]
  310. # Cleanup any remaining background tasks if necessary
  311. if response.background is not None:
  312. await response.background()
  313. else:
  314. content = response["choices"][0]["message"]["content"]
  315. return content
  316. async def chat_completion_tools_handler(
  317. body: dict, user: UserModel, extra_params: dict
  318. ) -> tuple[dict, dict]:
  319. # If tool_ids field is present, call the functions
  320. metadata = body.get("metadata", {})
  321. tool_ids = metadata.get("tool_ids", None)
  322. log.debug(f"{tool_ids=}")
  323. if not tool_ids:
  324. return body, {}
  325. skip_files = False
  326. contexts = []
  327. citations = []
  328. task_model_id = get_task_model_id(body["model"])
  329. tools = get_tools(
  330. webui_app,
  331. tool_ids,
  332. user,
  333. {
  334. **extra_params,
  335. "__model__": app.state.MODELS[task_model_id],
  336. "__messages__": body["messages"],
  337. "__files__": metadata.get("files", []),
  338. },
  339. )
  340. log.info(f"{tools=}")
  341. specs = [tool["spec"] for tool in tools.values()]
  342. tools_specs = json.dumps(specs)
  343. if app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE != "":
  344. template = app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  345. else:
  346. template = """Available Tools: {{TOOLS}}\nReturn an empty string if no tools match the query. If a function tool matches, construct and return a JSON object in the format {\"name\": \"functionName\", \"parameters\": {\"requiredFunctionParamKey\": \"requiredFunctionParamValue\"}} using the appropriate tool and its parameters. Only return the object and limit the response to the JSON object without additional text."""
  347. tools_function_calling_prompt = tools_function_calling_generation_template(
  348. template, tools_specs
  349. )
  350. log.info(f"{tools_function_calling_prompt=}")
  351. payload = get_tools_function_calling_payload(
  352. body["messages"], task_model_id, tools_function_calling_prompt
  353. )
  354. try:
  355. payload = filter_pipeline(payload, user)
  356. except Exception as e:
  357. raise e
  358. try:
  359. response = await generate_chat_completions(form_data=payload, user=user)
  360. log.debug(f"{response=}")
  361. content = await get_content_from_response(response)
  362. log.debug(f"{content=}")
  363. if not content:
  364. return body, {}
  365. result = json.loads(content)
  366. tool_function_name = result.get("name", None)
  367. if tool_function_name not in tools:
  368. return body, {}
  369. tool_function_params = result.get("parameters", {})
  370. try:
  371. tool_output = await tools[tool_function_name]["callable"](
  372. **tool_function_params
  373. )
  374. except Exception as e:
  375. tool_output = str(e)
  376. if tools[tool_function_name]["citation"]:
  377. citations.append(
  378. {
  379. "source": {
  380. "name": f"TOOL:{tools[tool_function_name]['toolkit_id']}/{tool_function_name}"
  381. },
  382. "document": [tool_output],
  383. "metadata": [{"source": tool_function_name}],
  384. }
  385. )
  386. if tools[tool_function_name]["file_handler"]:
  387. skip_files = True
  388. if isinstance(tool_output, str):
  389. contexts.append(tool_output)
  390. except Exception as e:
  391. log.exception(f"Error: {e}")
  392. content = None
  393. log.debug(f"tool_contexts: {contexts}")
  394. if skip_files and "files" in body.get("metadata", {}):
  395. del body["metadata"]["files"]
  396. return body, {"contexts": contexts, "citations": citations}
  397. async def chat_completion_files_handler(body) -> tuple[dict, dict[str, list]]:
  398. contexts = []
  399. citations = []
  400. if files := body.get("metadata", {}).get("files", None):
  401. contexts, citations = get_rag_context(
  402. files=files,
  403. messages=body["messages"],
  404. embedding_function=rag_app.state.EMBEDDING_FUNCTION,
  405. k=rag_app.state.config.TOP_K,
  406. reranking_function=rag_app.state.sentence_transformer_rf,
  407. r=rag_app.state.config.RELEVANCE_THRESHOLD,
  408. hybrid_search=rag_app.state.config.ENABLE_RAG_HYBRID_SEARCH,
  409. )
  410. log.debug(f"rag_contexts: {contexts}, citations: {citations}")
  411. return body, {"contexts": contexts, "citations": citations}
  412. def is_chat_completion_request(request):
  413. return request.method == "POST" and any(
  414. endpoint in request.url.path
  415. for endpoint in ["/ollama/api/chat", "/chat/completions"]
  416. )
  417. async def get_body_and_model_and_user(request):
  418. # Read the original request body
  419. body = await request.body()
  420. body_str = body.decode("utf-8")
  421. body = json.loads(body_str) if body_str else {}
  422. model_id = body["model"]
  423. if model_id not in app.state.MODELS:
  424. raise Exception("Model not found")
  425. model = app.state.MODELS[model_id]
  426. user = get_current_user(
  427. request,
  428. get_http_authorization_cred(request.headers.get("Authorization")),
  429. )
  430. return body, model, user
  431. class ChatCompletionMiddleware(BaseHTTPMiddleware):
  432. async def dispatch(self, request: Request, call_next):
  433. if not is_chat_completion_request(request):
  434. return await call_next(request)
  435. log.debug(f"request.url.path: {request.url.path}")
  436. try:
  437. body, model, user = await get_body_and_model_and_user(request)
  438. except Exception as e:
  439. return JSONResponse(
  440. status_code=status.HTTP_400_BAD_REQUEST,
  441. content={"detail": str(e)},
  442. )
  443. metadata = {
  444. "chat_id": body.pop("chat_id", None),
  445. "message_id": body.pop("id", None),
  446. "session_id": body.pop("session_id", None),
  447. "tool_ids": body.get("tool_ids", None),
  448. "files": body.get("files", None),
  449. }
  450. body["metadata"] = metadata
  451. extra_params = {
  452. "__event_emitter__": get_event_emitter(metadata),
  453. "__event_call__": get_event_call(metadata),
  454. "__user__": {
  455. "id": user.id,
  456. "email": user.email,
  457. "name": user.name,
  458. "role": user.role,
  459. },
  460. }
  461. # Initialize data_items to store additional data to be sent to the client
  462. # Initalize contexts and citation
  463. data_items = []
  464. contexts = []
  465. citations = []
  466. try:
  467. body, flags = await chat_completion_filter_functions_handler(
  468. body, model, extra_params
  469. )
  470. except Exception as e:
  471. return JSONResponse(
  472. status_code=status.HTTP_400_BAD_REQUEST,
  473. content={"detail": str(e)},
  474. )
  475. metadata = {
  476. **metadata,
  477. "tool_ids": body.pop("tool_ids", None),
  478. "files": body.pop("files", None),
  479. }
  480. body["metadata"] = metadata
  481. try:
  482. body, flags = await chat_completion_tools_handler(body, user, extra_params)
  483. contexts.extend(flags.get("contexts", []))
  484. citations.extend(flags.get("citations", []))
  485. except Exception as e:
  486. log.exception(e)
  487. try:
  488. body, flags = await chat_completion_files_handler(body)
  489. contexts.extend(flags.get("contexts", []))
  490. citations.extend(flags.get("citations", []))
  491. except Exception as e:
  492. log.exception(e)
  493. # If context is not empty, insert it into the messages
  494. if len(contexts) > 0:
  495. context_string = "/n".join(contexts).strip()
  496. prompt = get_last_user_message(body["messages"])
  497. if prompt is None:
  498. raise Exception("No user message found")
  499. if (
  500. rag_app.state.config.RELEVANCE_THRESHOLD == 0
  501. and context_string.strip() == ""
  502. ):
  503. log.debug(
  504. f"With a 0 relevancy threshold for RAG, the context cannot be empty"
  505. )
  506. # Workaround for Ollama 2.0+ system prompt issue
  507. # TODO: replace with add_or_update_system_message
  508. if model["owned_by"] == "ollama":
  509. body["messages"] = prepend_to_first_user_message_content(
  510. rag_template(
  511. rag_app.state.config.RAG_TEMPLATE, context_string, prompt
  512. ),
  513. body["messages"],
  514. )
  515. else:
  516. body["messages"] = add_or_update_system_message(
  517. rag_template(
  518. rag_app.state.config.RAG_TEMPLATE, context_string, prompt
  519. ),
  520. body["messages"],
  521. )
  522. # If there are citations, add them to the data_items
  523. if len(citations) > 0:
  524. data_items.append({"citations": citations})
  525. modified_body_bytes = json.dumps(body).encode("utf-8")
  526. # Replace the request body with the modified one
  527. request._body = modified_body_bytes
  528. # Set custom header to ensure content-length matches new body length
  529. request.headers.__dict__["_list"] = [
  530. (b"content-length", str(len(modified_body_bytes)).encode("utf-8")),
  531. *[(k, v) for k, v in request.headers.raw if k.lower() != b"content-length"],
  532. ]
  533. response = await call_next(request)
  534. if not isinstance(response, StreamingResponse):
  535. return response
  536. content_type = response.headers["Content-Type"]
  537. is_openai = "text/event-stream" in content_type
  538. is_ollama = "application/x-ndjson" in content_type
  539. if not is_openai and not is_ollama:
  540. return response
  541. def wrap_item(item):
  542. return f"data: {item}\n\n" if is_openai else f"{item}\n"
  543. async def stream_wrapper(original_generator, data_items):
  544. for item in data_items:
  545. yield wrap_item(json.dumps(item))
  546. async for data in original_generator:
  547. yield data
  548. return StreamingResponse(
  549. stream_wrapper(response.body_iterator, data_items),
  550. headers=dict(response.headers),
  551. )
  552. async def _receive(self, body: bytes):
  553. return {"type": "http.request", "body": body, "more_body": False}
  554. app.add_middleware(ChatCompletionMiddleware)
  555. ##################################
  556. #
  557. # Pipeline Middleware
  558. #
  559. ##################################
  560. def get_sorted_filters(model_id):
  561. filters = [
  562. model
  563. for model in app.state.MODELS.values()
  564. if "pipeline" in model
  565. and "type" in model["pipeline"]
  566. and model["pipeline"]["type"] == "filter"
  567. and (
  568. model["pipeline"]["pipelines"] == ["*"]
  569. or any(
  570. model_id == target_model_id
  571. for target_model_id in model["pipeline"]["pipelines"]
  572. )
  573. )
  574. ]
  575. sorted_filters = sorted(filters, key=lambda x: x["pipeline"]["priority"])
  576. return sorted_filters
  577. def filter_pipeline(payload, user):
  578. user = {"id": user.id, "email": user.email, "name": user.name, "role": user.role}
  579. model_id = payload["model"]
  580. sorted_filters = get_sorted_filters(model_id)
  581. model = app.state.MODELS[model_id]
  582. if "pipeline" in model:
  583. sorted_filters.append(model)
  584. for filter in sorted_filters:
  585. r = None
  586. try:
  587. urlIdx = filter["urlIdx"]
  588. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  589. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  590. if key == "":
  591. continue
  592. headers = {"Authorization": f"Bearer {key}"}
  593. r = requests.post(
  594. f"{url}/{filter['id']}/filter/inlet",
  595. headers=headers,
  596. json={
  597. "user": user,
  598. "body": payload,
  599. },
  600. )
  601. r.raise_for_status()
  602. payload = r.json()
  603. except Exception as e:
  604. # Handle connection error here
  605. print(f"Connection error: {e}")
  606. if r is not None:
  607. res = r.json()
  608. if "detail" in res:
  609. raise Exception(r.status_code, res["detail"])
  610. return payload
  611. class PipelineMiddleware(BaseHTTPMiddleware):
  612. async def dispatch(self, request: Request, call_next):
  613. if not is_chat_completion_request(request):
  614. return await call_next(request)
  615. log.debug(f"request.url.path: {request.url.path}")
  616. # Read the original request body
  617. body = await request.body()
  618. # Decode body to string
  619. body_str = body.decode("utf-8")
  620. # Parse string to JSON
  621. data = json.loads(body_str) if body_str else {}
  622. user = get_current_user(
  623. request,
  624. get_http_authorization_cred(request.headers["Authorization"]),
  625. )
  626. try:
  627. data = filter_pipeline(data, user)
  628. except Exception as e:
  629. if len(e.args) > 1:
  630. return JSONResponse(
  631. status_code=e.args[0],
  632. content={"detail": e.args[1]},
  633. )
  634. else:
  635. return JSONResponse(
  636. status_code=status.HTTP_400_BAD_REQUEST,
  637. content={"detail": str(e)},
  638. )
  639. modified_body_bytes = json.dumps(data).encode("utf-8")
  640. # Replace the request body with the modified one
  641. request._body = modified_body_bytes
  642. # Set custom header to ensure content-length matches new body length
  643. request.headers.__dict__["_list"] = [
  644. (b"content-length", str(len(modified_body_bytes)).encode("utf-8")),
  645. *[(k, v) for k, v in request.headers.raw if k.lower() != b"content-length"],
  646. ]
  647. response = await call_next(request)
  648. return response
  649. async def _receive(self, body: bytes):
  650. return {"type": "http.request", "body": body, "more_body": False}
  651. app.add_middleware(PipelineMiddleware)
  652. app.add_middleware(
  653. CORSMiddleware,
  654. allow_origins=CORS_ALLOW_ORIGIN,
  655. allow_credentials=True,
  656. allow_methods=["*"],
  657. allow_headers=["*"],
  658. )
  659. app.add_middleware(SecurityHeadersMiddleware)
  660. @app.middleware("http")
  661. async def commit_session_after_request(request: Request, call_next):
  662. response = await call_next(request)
  663. log.debug("Commit session after request")
  664. Session.commit()
  665. return response
  666. @app.middleware("http")
  667. async def check_url(request: Request, call_next):
  668. if len(app.state.MODELS) == 0:
  669. await get_all_models()
  670. else:
  671. pass
  672. start_time = int(time.time())
  673. response = await call_next(request)
  674. process_time = int(time.time()) - start_time
  675. response.headers["X-Process-Time"] = str(process_time)
  676. return response
  677. @app.middleware("http")
  678. async def update_embedding_function(request: Request, call_next):
  679. response = await call_next(request)
  680. if "/embedding/update" in request.url.path:
  681. webui_app.state.EMBEDDING_FUNCTION = rag_app.state.EMBEDDING_FUNCTION
  682. return response
  683. @app.middleware("http")
  684. async def inspect_websocket(request: Request, call_next):
  685. if (
  686. "/ws/socket.io" in request.url.path
  687. and request.query_params.get("transport") == "websocket"
  688. ):
  689. upgrade = (request.headers.get("Upgrade") or "").lower()
  690. connection = (request.headers.get("Connection") or "").lower().split(",")
  691. # Check that there's the correct headers for an upgrade, else reject the connection
  692. # This is to work around this upstream issue: https://github.com/miguelgrinberg/python-engineio/issues/367
  693. if upgrade != "websocket" or "upgrade" not in connection:
  694. return JSONResponse(
  695. status_code=status.HTTP_400_BAD_REQUEST,
  696. content={"detail": "Invalid WebSocket upgrade request"},
  697. )
  698. return await call_next(request)
  699. app.mount("/ws", socket_app)
  700. app.mount("/ollama", ollama_app)
  701. app.mount("/openai", openai_app)
  702. app.mount("/images/api/v1", images_app)
  703. app.mount("/audio/api/v1", audio_app)
  704. app.mount("/rag/api/v1", rag_app)
  705. app.mount("/api/v1", webui_app)
  706. webui_app.state.EMBEDDING_FUNCTION = rag_app.state.EMBEDDING_FUNCTION
  707. async def get_all_models():
  708. # TODO: Optimize this function
  709. pipe_models = []
  710. openai_models = []
  711. ollama_models = []
  712. pipe_models = await get_pipe_models()
  713. if app.state.config.ENABLE_OPENAI_API:
  714. openai_models = await get_openai_models()
  715. openai_models = openai_models["data"]
  716. if app.state.config.ENABLE_OLLAMA_API:
  717. ollama_models = await get_ollama_models()
  718. ollama_models = [
  719. {
  720. "id": model["model"],
  721. "name": model["name"],
  722. "object": "model",
  723. "created": int(time.time()),
  724. "owned_by": "ollama",
  725. "ollama": model,
  726. }
  727. for model in ollama_models["models"]
  728. ]
  729. models = pipe_models + openai_models + ollama_models
  730. global_action_ids = [
  731. function.id for function in Functions.get_global_action_functions()
  732. ]
  733. enabled_action_ids = [
  734. function.id
  735. for function in Functions.get_functions_by_type("action", active_only=True)
  736. ]
  737. custom_models = Models.get_all_models()
  738. for custom_model in custom_models:
  739. if custom_model.base_model_id is None:
  740. for model in models:
  741. if (
  742. custom_model.id == model["id"]
  743. or custom_model.id == model["id"].split(":")[0]
  744. ):
  745. model["name"] = custom_model.name
  746. model["info"] = custom_model.model_dump()
  747. action_ids = []
  748. if "info" in model and "meta" in model["info"]:
  749. action_ids.extend(model["info"]["meta"].get("actionIds", []))
  750. model["action_ids"] = action_ids
  751. else:
  752. owned_by = "openai"
  753. pipe = None
  754. action_ids = []
  755. for model in models:
  756. if (
  757. custom_model.base_model_id == model["id"]
  758. or custom_model.base_model_id == model["id"].split(":")[0]
  759. ):
  760. owned_by = model["owned_by"]
  761. if "pipe" in model:
  762. pipe = model["pipe"]
  763. if "info" in model and "meta" in model["info"]:
  764. action_ids.extend(model["info"]["meta"].get("actionIds", []))
  765. break
  766. models.append(
  767. {
  768. "id": custom_model.id,
  769. "name": custom_model.name,
  770. "object": "model",
  771. "created": custom_model.created_at,
  772. "owned_by": owned_by,
  773. "info": custom_model.model_dump(),
  774. "preset": True,
  775. **({"pipe": pipe} if pipe is not None else {}),
  776. "action_ids": action_ids,
  777. }
  778. )
  779. for model in models:
  780. action_ids = []
  781. if "action_ids" in model:
  782. action_ids = model["action_ids"]
  783. del model["action_ids"]
  784. action_ids = action_ids + global_action_ids
  785. action_ids = list(set(action_ids))
  786. action_ids = [
  787. action_id for action_id in action_ids if action_id in enabled_action_ids
  788. ]
  789. model["actions"] = []
  790. for action_id in action_ids:
  791. action = Functions.get_function_by_id(action_id)
  792. if action is None:
  793. raise Exception(f"Action not found: {action_id}")
  794. if action_id in webui_app.state.FUNCTIONS:
  795. function_module = webui_app.state.FUNCTIONS[action_id]
  796. else:
  797. function_module, _, _ = load_function_module_by_id(action_id)
  798. webui_app.state.FUNCTIONS[action_id] = function_module
  799. __webui__ = False
  800. if hasattr(function_module, "__webui__"):
  801. __webui__ = function_module.__webui__
  802. if hasattr(function_module, "actions"):
  803. actions = function_module.actions
  804. model["actions"].extend(
  805. [
  806. {
  807. "id": f"{action_id}.{_action['id']}",
  808. "name": _action.get(
  809. "name", f"{action.name} ({_action['id']})"
  810. ),
  811. "description": action.meta.description,
  812. "icon_url": _action.get(
  813. "icon_url", action.meta.manifest.get("icon_url", None)
  814. ),
  815. **({"__webui__": __webui__} if __webui__ else {}),
  816. }
  817. for _action in actions
  818. ]
  819. )
  820. else:
  821. model["actions"].append(
  822. {
  823. "id": action_id,
  824. "name": action.name,
  825. "description": action.meta.description,
  826. "icon_url": action.meta.manifest.get("icon_url", None),
  827. **({"__webui__": __webui__} if __webui__ else {}),
  828. }
  829. )
  830. app.state.MODELS = {model["id"]: model for model in models}
  831. webui_app.state.MODELS = app.state.MODELS
  832. return models
  833. @app.get("/api/models")
  834. async def get_models(user=Depends(get_verified_user)):
  835. models = await get_all_models()
  836. # Filter out filter pipelines
  837. models = [
  838. model
  839. for model in models
  840. if "pipeline" not in model or model["pipeline"].get("type", None) != "filter"
  841. ]
  842. if app.state.config.ENABLE_MODEL_FILTER:
  843. if user.role == "user":
  844. models = list(
  845. filter(
  846. lambda model: model["id"] in app.state.config.MODEL_FILTER_LIST,
  847. models,
  848. )
  849. )
  850. return {"data": models}
  851. return {"data": models}
  852. @app.post("/api/chat/completions")
  853. async def generate_chat_completions(form_data: dict, user=Depends(get_verified_user)):
  854. model_id = form_data["model"]
  855. if model_id not in app.state.MODELS:
  856. raise HTTPException(
  857. status_code=status.HTTP_404_NOT_FOUND,
  858. detail="Model not found",
  859. )
  860. if app.state.config.ENABLE_MODEL_FILTER:
  861. if user.role == "user" and model_id not in app.state.config.MODEL_FILTER_LIST:
  862. raise HTTPException(
  863. status_code=status.HTTP_403_FORBIDDEN,
  864. detail="Model not found",
  865. )
  866. model = app.state.MODELS[model_id]
  867. if model.get("pipe"):
  868. return await generate_function_chat_completion(form_data, user=user)
  869. if model["owned_by"] == "ollama":
  870. # Using /ollama/api/chat endpoint
  871. form_data = convert_payload_openai_to_ollama(form_data)
  872. form_data = GenerateChatCompletionForm(**form_data)
  873. response = await generate_ollama_chat_completion(form_data=form_data, user=user)
  874. if form_data.stream:
  875. response.headers["content-type"] = "text/event-stream"
  876. return StreamingResponse(
  877. convert_streaming_response_ollama_to_openai(response),
  878. headers=dict(response.headers),
  879. )
  880. else:
  881. return convert_response_ollama_to_openai(response)
  882. else:
  883. return await generate_openai_chat_completion(form_data, user=user)
  884. @app.post("/api/chat/completed")
  885. async def chat_completed(form_data: dict, user=Depends(get_verified_user)):
  886. data = form_data
  887. model_id = data["model"]
  888. if model_id not in app.state.MODELS:
  889. raise HTTPException(
  890. status_code=status.HTTP_404_NOT_FOUND,
  891. detail="Model not found",
  892. )
  893. model = app.state.MODELS[model_id]
  894. sorted_filters = get_sorted_filters(model_id)
  895. if "pipeline" in model:
  896. sorted_filters = [model] + sorted_filters
  897. for filter in sorted_filters:
  898. r = None
  899. try:
  900. urlIdx = filter["urlIdx"]
  901. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  902. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  903. if key != "":
  904. headers = {"Authorization": f"Bearer {key}"}
  905. r = requests.post(
  906. f"{url}/{filter['id']}/filter/outlet",
  907. headers=headers,
  908. json={
  909. "user": {
  910. "id": user.id,
  911. "name": user.name,
  912. "email": user.email,
  913. "role": user.role,
  914. },
  915. "body": data,
  916. },
  917. )
  918. r.raise_for_status()
  919. data = r.json()
  920. except Exception as e:
  921. # Handle connection error here
  922. print(f"Connection error: {e}")
  923. if r is not None:
  924. try:
  925. res = r.json()
  926. if "detail" in res:
  927. return JSONResponse(
  928. status_code=r.status_code,
  929. content=res,
  930. )
  931. except Exception:
  932. pass
  933. else:
  934. pass
  935. __event_emitter__ = get_event_emitter(
  936. {
  937. "chat_id": data["chat_id"],
  938. "message_id": data["id"],
  939. "session_id": data["session_id"],
  940. }
  941. )
  942. __event_call__ = get_event_call(
  943. {
  944. "chat_id": data["chat_id"],
  945. "message_id": data["id"],
  946. "session_id": data["session_id"],
  947. }
  948. )
  949. def get_priority(function_id):
  950. function = Functions.get_function_by_id(function_id)
  951. if function is not None and hasattr(function, "valves"):
  952. # TODO: Fix FunctionModel to include vavles
  953. return (function.valves if function.valves else {}).get("priority", 0)
  954. return 0
  955. filter_ids = [function.id for function in Functions.get_global_filter_functions()]
  956. if "info" in model and "meta" in model["info"]:
  957. filter_ids.extend(model["info"]["meta"].get("filterIds", []))
  958. filter_ids = list(set(filter_ids))
  959. enabled_filter_ids = [
  960. function.id
  961. for function in Functions.get_functions_by_type("filter", active_only=True)
  962. ]
  963. filter_ids = [
  964. filter_id for filter_id in filter_ids if filter_id in enabled_filter_ids
  965. ]
  966. # Sort filter_ids by priority, using the get_priority function
  967. filter_ids.sort(key=get_priority)
  968. for filter_id in filter_ids:
  969. filter = Functions.get_function_by_id(filter_id)
  970. if not filter:
  971. continue
  972. if filter_id in webui_app.state.FUNCTIONS:
  973. function_module = webui_app.state.FUNCTIONS[filter_id]
  974. else:
  975. function_module, _, _ = load_function_module_by_id(filter_id)
  976. webui_app.state.FUNCTIONS[filter_id] = function_module
  977. if hasattr(function_module, "valves") and hasattr(function_module, "Valves"):
  978. valves = Functions.get_function_valves_by_id(filter_id)
  979. function_module.valves = function_module.Valves(
  980. **(valves if valves else {})
  981. )
  982. if not hasattr(function_module, "outlet"):
  983. continue
  984. try:
  985. outlet = function_module.outlet
  986. # Get the signature of the function
  987. sig = inspect.signature(outlet)
  988. params = {"body": data}
  989. # Extra parameters to be passed to the function
  990. extra_params = {
  991. "__model__": model,
  992. "__id__": filter_id,
  993. "__event_emitter__": __event_emitter__,
  994. "__event_call__": __event_call__,
  995. }
  996. # Add extra params in contained in function signature
  997. for key, value in extra_params.items():
  998. if key in sig.parameters:
  999. params[key] = value
  1000. if "__user__" in sig.parameters:
  1001. __user__ = {
  1002. "id": user.id,
  1003. "email": user.email,
  1004. "name": user.name,
  1005. "role": user.role,
  1006. }
  1007. try:
  1008. if hasattr(function_module, "UserValves"):
  1009. __user__["valves"] = function_module.UserValves(
  1010. **Functions.get_user_valves_by_id_and_user_id(
  1011. filter_id, user.id
  1012. )
  1013. )
  1014. except Exception as e:
  1015. print(e)
  1016. params = {**params, "__user__": __user__}
  1017. if inspect.iscoroutinefunction(outlet):
  1018. data = await outlet(**params)
  1019. else:
  1020. data = outlet(**params)
  1021. except Exception as e:
  1022. print(f"Error: {e}")
  1023. return JSONResponse(
  1024. status_code=status.HTTP_400_BAD_REQUEST,
  1025. content={"detail": str(e)},
  1026. )
  1027. return data
  1028. @app.post("/api/chat/actions/{action_id}")
  1029. async def chat_action(action_id: str, form_data: dict, user=Depends(get_verified_user)):
  1030. if "." in action_id:
  1031. action_id, sub_action_id = action_id.split(".")
  1032. else:
  1033. sub_action_id = None
  1034. action = Functions.get_function_by_id(action_id)
  1035. if not action:
  1036. raise HTTPException(
  1037. status_code=status.HTTP_404_NOT_FOUND,
  1038. detail="Action not found",
  1039. )
  1040. data = form_data
  1041. model_id = data["model"]
  1042. if model_id not in app.state.MODELS:
  1043. raise HTTPException(
  1044. status_code=status.HTTP_404_NOT_FOUND,
  1045. detail="Model not found",
  1046. )
  1047. model = app.state.MODELS[model_id]
  1048. __event_emitter__ = get_event_emitter(
  1049. {
  1050. "chat_id": data["chat_id"],
  1051. "message_id": data["id"],
  1052. "session_id": data["session_id"],
  1053. }
  1054. )
  1055. __event_call__ = get_event_call(
  1056. {
  1057. "chat_id": data["chat_id"],
  1058. "message_id": data["id"],
  1059. "session_id": data["session_id"],
  1060. }
  1061. )
  1062. if action_id in webui_app.state.FUNCTIONS:
  1063. function_module = webui_app.state.FUNCTIONS[action_id]
  1064. else:
  1065. function_module, _, _ = load_function_module_by_id(action_id)
  1066. webui_app.state.FUNCTIONS[action_id] = function_module
  1067. if hasattr(function_module, "valves") and hasattr(function_module, "Valves"):
  1068. valves = Functions.get_function_valves_by_id(action_id)
  1069. function_module.valves = function_module.Valves(**(valves if valves else {}))
  1070. if hasattr(function_module, "action"):
  1071. try:
  1072. action = function_module.action
  1073. # Get the signature of the function
  1074. sig = inspect.signature(action)
  1075. params = {"body": data}
  1076. # Extra parameters to be passed to the function
  1077. extra_params = {
  1078. "__model__": model,
  1079. "__id__": sub_action_id if sub_action_id is not None else action_id,
  1080. "__event_emitter__": __event_emitter__,
  1081. "__event_call__": __event_call__,
  1082. }
  1083. # Add extra params in contained in function signature
  1084. for key, value in extra_params.items():
  1085. if key in sig.parameters:
  1086. params[key] = value
  1087. if "__user__" in sig.parameters:
  1088. __user__ = {
  1089. "id": user.id,
  1090. "email": user.email,
  1091. "name": user.name,
  1092. "role": user.role,
  1093. }
  1094. try:
  1095. if hasattr(function_module, "UserValves"):
  1096. __user__["valves"] = function_module.UserValves(
  1097. **Functions.get_user_valves_by_id_and_user_id(
  1098. action_id, user.id
  1099. )
  1100. )
  1101. except Exception as e:
  1102. print(e)
  1103. params = {**params, "__user__": __user__}
  1104. if inspect.iscoroutinefunction(action):
  1105. data = await action(**params)
  1106. else:
  1107. data = action(**params)
  1108. except Exception as e:
  1109. print(f"Error: {e}")
  1110. return JSONResponse(
  1111. status_code=status.HTTP_400_BAD_REQUEST,
  1112. content={"detail": str(e)},
  1113. )
  1114. return data
  1115. ##################################
  1116. #
  1117. # Task Endpoints
  1118. #
  1119. ##################################
  1120. # TODO: Refactor task API endpoints below into a separate file
  1121. @app.get("/api/task/config")
  1122. async def get_task_config(user=Depends(get_verified_user)):
  1123. return {
  1124. "TASK_MODEL": app.state.config.TASK_MODEL,
  1125. "TASK_MODEL_EXTERNAL": app.state.config.TASK_MODEL_EXTERNAL,
  1126. "TITLE_GENERATION_PROMPT_TEMPLATE": app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE,
  1127. "ENABLE_SEARCH_QUERY": app.state.config.ENABLE_SEARCH_QUERY,
  1128. "SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE": app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE,
  1129. "TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE": app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  1130. }
  1131. class TaskConfigForm(BaseModel):
  1132. TASK_MODEL: Optional[str]
  1133. TASK_MODEL_EXTERNAL: Optional[str]
  1134. TITLE_GENERATION_PROMPT_TEMPLATE: str
  1135. SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE: str
  1136. ENABLE_SEARCH_QUERY: bool
  1137. TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE: str
  1138. @app.post("/api/task/config/update")
  1139. async def update_task_config(form_data: TaskConfigForm, user=Depends(get_admin_user)):
  1140. app.state.config.TASK_MODEL = form_data.TASK_MODEL
  1141. app.state.config.TASK_MODEL_EXTERNAL = form_data.TASK_MODEL_EXTERNAL
  1142. app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE = (
  1143. form_data.TITLE_GENERATION_PROMPT_TEMPLATE
  1144. )
  1145. app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE = (
  1146. form_data.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE
  1147. )
  1148. app.state.config.ENABLE_SEARCH_QUERY = form_data.ENABLE_SEARCH_QUERY
  1149. app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE = (
  1150. form_data.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  1151. )
  1152. return {
  1153. "TASK_MODEL": app.state.config.TASK_MODEL,
  1154. "TASK_MODEL_EXTERNAL": app.state.config.TASK_MODEL_EXTERNAL,
  1155. "TITLE_GENERATION_PROMPT_TEMPLATE": app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE,
  1156. "SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE": app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE,
  1157. "ENABLE_SEARCH_QUERY": app.state.config.ENABLE_SEARCH_QUERY,
  1158. "TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE": app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  1159. }
  1160. @app.post("/api/task/title/completions")
  1161. async def generate_title(form_data: dict, user=Depends(get_verified_user)):
  1162. print("generate_title")
  1163. model_id = form_data["model"]
  1164. if model_id not in app.state.MODELS:
  1165. raise HTTPException(
  1166. status_code=status.HTTP_404_NOT_FOUND,
  1167. detail="Model not found",
  1168. )
  1169. # Check if the user has a custom task model
  1170. # If the user has a custom task model, use that model
  1171. task_model_id = get_task_model_id(model_id)
  1172. print(task_model_id)
  1173. model = app.state.MODELS[task_model_id]
  1174. if app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE != "":
  1175. template = app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE
  1176. else:
  1177. template = """Create a concise, 3-5 word title with an emoji as a title for the prompt in the given language. Suitable Emojis for the summary can be used to enhance understanding but avoid quotation marks or special formatting. RESPOND ONLY WITH THE TITLE TEXT.
  1178. Examples of titles:
  1179. 📉 Stock Market Trends
  1180. 🍪 Perfect Chocolate Chip Recipe
  1181. Evolution of Music Streaming
  1182. Remote Work Productivity Tips
  1183. Artificial Intelligence in Healthcare
  1184. 🎮 Video Game Development Insights
  1185. Prompt: {{prompt:middletruncate:8000}}"""
  1186. content = title_generation_template(
  1187. template,
  1188. form_data["prompt"],
  1189. {
  1190. "name": user.name,
  1191. "location": user.info.get("location") if user.info else None,
  1192. },
  1193. )
  1194. payload = {
  1195. "model": task_model_id,
  1196. "messages": [{"role": "user", "content": content}],
  1197. "stream": False,
  1198. **(
  1199. {"max_tokens": 50}
  1200. if app.state.MODELS[task_model_id]["owned_by"] == "ollama"
  1201. else {
  1202. "max_completion_tokens": 50,
  1203. }
  1204. ),
  1205. "chat_id": form_data.get("chat_id", None),
  1206. "metadata": {"task": str(TASKS.TITLE_GENERATION)},
  1207. }
  1208. log.debug(payload)
  1209. # Handle pipeline filters
  1210. try:
  1211. payload = filter_pipeline(payload, user)
  1212. except Exception as e:
  1213. if len(e.args) > 1:
  1214. return JSONResponse(
  1215. status_code=e.args[0],
  1216. content={"detail": e.args[1]},
  1217. )
  1218. else:
  1219. return JSONResponse(
  1220. status_code=status.HTTP_400_BAD_REQUEST,
  1221. content={"detail": str(e)},
  1222. )
  1223. if "chat_id" in payload:
  1224. del payload["chat_id"]
  1225. return await generate_chat_completions(form_data=payload, user=user)
  1226. @app.post("/api/task/query/completions")
  1227. async def generate_search_query(form_data: dict, user=Depends(get_verified_user)):
  1228. print("generate_search_query")
  1229. if not app.state.config.ENABLE_SEARCH_QUERY:
  1230. raise HTTPException(
  1231. status_code=status.HTTP_400_BAD_REQUEST,
  1232. detail=f"Search query generation is disabled",
  1233. )
  1234. model_id = form_data["model"]
  1235. if model_id not in app.state.MODELS:
  1236. raise HTTPException(
  1237. status_code=status.HTTP_404_NOT_FOUND,
  1238. detail="Model not found",
  1239. )
  1240. # Check if the user has a custom task model
  1241. # If the user has a custom task model, use that model
  1242. task_model_id = get_task_model_id(model_id)
  1243. print(task_model_id)
  1244. model = app.state.MODELS[task_model_id]
  1245. if app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE != "":
  1246. template = app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE
  1247. else:
  1248. template = """Given the user's message and interaction history, decide if a web search is necessary. You must be concise and exclusively provide a search query if one is necessary. Refrain from verbose responses or any additional commentary. Prefer suggesting a search if uncertain to provide comprehensive or updated information. If a search isn't needed at all, respond with an empty string. Default to a search query when in doubt. Today's date is {{CURRENT_DATE}}.
  1249. User Message:
  1250. {{prompt:end:4000}}
  1251. Interaction History:
  1252. {{MESSAGES:END:6}}
  1253. Search Query:"""
  1254. content = search_query_generation_template(
  1255. template, form_data["messages"], {"name": user.name}
  1256. )
  1257. print("content", content)
  1258. payload = {
  1259. "model": task_model_id,
  1260. "messages": [{"role": "user", "content": content}],
  1261. "stream": False,
  1262. **(
  1263. {"max_tokens": 30}
  1264. if app.state.MODELS[task_model_id]["owned_by"] == "ollama"
  1265. else {
  1266. "max_completion_tokens": 30,
  1267. }
  1268. ),
  1269. "metadata": {"task": str(TASKS.QUERY_GENERATION)},
  1270. }
  1271. log.debug(payload)
  1272. # Handle pipeline filters
  1273. try:
  1274. payload = filter_pipeline(payload, user)
  1275. except Exception as e:
  1276. if len(e.args) > 1:
  1277. return JSONResponse(
  1278. status_code=e.args[0],
  1279. content={"detail": e.args[1]},
  1280. )
  1281. else:
  1282. return JSONResponse(
  1283. status_code=status.HTTP_400_BAD_REQUEST,
  1284. content={"detail": str(e)},
  1285. )
  1286. if "chat_id" in payload:
  1287. del payload["chat_id"]
  1288. return await generate_chat_completions(form_data=payload, user=user)
  1289. @app.post("/api/task/emoji/completions")
  1290. async def generate_emoji(form_data: dict, user=Depends(get_verified_user)):
  1291. print("generate_emoji")
  1292. model_id = form_data["model"]
  1293. if model_id not in app.state.MODELS:
  1294. raise HTTPException(
  1295. status_code=status.HTTP_404_NOT_FOUND,
  1296. detail="Model not found",
  1297. )
  1298. # Check if the user has a custom task model
  1299. # If the user has a custom task model, use that model
  1300. task_model_id = get_task_model_id(model_id)
  1301. print(task_model_id)
  1302. model = app.state.MODELS[task_model_id]
  1303. template = '''
  1304. Your task is to reflect the speaker's likely facial expression through a fitting emoji. Interpret emotions from the message and reflect their facial expression using fitting, diverse emojis (e.g., 😊, 😢, 😡, 😱).
  1305. Message: """{{prompt}}"""
  1306. '''
  1307. content = title_generation_template(
  1308. template,
  1309. form_data["prompt"],
  1310. {
  1311. "name": user.name,
  1312. "location": user.info.get("location") if user.info else None,
  1313. },
  1314. )
  1315. payload = {
  1316. "model": task_model_id,
  1317. "messages": [{"role": "user", "content": content}],
  1318. "stream": False,
  1319. **(
  1320. {"max_tokens": 4}
  1321. if app.state.MODELS[task_model_id]["owned_by"] == "ollama"
  1322. else {
  1323. "max_completion_tokens": 4,
  1324. }
  1325. ),
  1326. "chat_id": form_data.get("chat_id", None),
  1327. "metadata": {"task": str(TASKS.EMOJI_GENERATION)},
  1328. }
  1329. log.debug(payload)
  1330. # Handle pipeline filters
  1331. try:
  1332. payload = filter_pipeline(payload, user)
  1333. except Exception as e:
  1334. if len(e.args) > 1:
  1335. return JSONResponse(
  1336. status_code=e.args[0],
  1337. content={"detail": e.args[1]},
  1338. )
  1339. else:
  1340. return JSONResponse(
  1341. status_code=status.HTTP_400_BAD_REQUEST,
  1342. content={"detail": str(e)},
  1343. )
  1344. if "chat_id" in payload:
  1345. del payload["chat_id"]
  1346. return await generate_chat_completions(form_data=payload, user=user)
  1347. @app.post("/api/task/moa/completions")
  1348. async def generate_moa_response(form_data: dict, user=Depends(get_verified_user)):
  1349. print("generate_moa_response")
  1350. model_id = form_data["model"]
  1351. if model_id not in app.state.MODELS:
  1352. raise HTTPException(
  1353. status_code=status.HTTP_404_NOT_FOUND,
  1354. detail="Model not found",
  1355. )
  1356. # Check if the user has a custom task model
  1357. # If the user has a custom task model, use that model
  1358. task_model_id = get_task_model_id(model_id)
  1359. print(task_model_id)
  1360. model = app.state.MODELS[task_model_id]
  1361. template = """You have been provided with a set of responses from various models to the latest user query: "{{prompt}}"
  1362. Your task is to synthesize these responses into a single, high-quality response. It is crucial to critically evaluate the information provided in these responses, recognizing that some of it may be biased or incorrect. Your response should not simply replicate the given answers but should offer a refined, accurate, and comprehensive reply to the instruction. Ensure your response is well-structured, coherent, and adheres to the highest standards of accuracy and reliability.
  1363. Responses from models: {{responses}}"""
  1364. content = moa_response_generation_template(
  1365. template,
  1366. form_data["prompt"],
  1367. form_data["responses"],
  1368. )
  1369. payload = {
  1370. "model": task_model_id,
  1371. "messages": [{"role": "user", "content": content}],
  1372. "stream": form_data.get("stream", False),
  1373. "chat_id": form_data.get("chat_id", None),
  1374. "metadata": {"task": str(TASKS.MOA_RESPONSE_GENERATION)},
  1375. }
  1376. log.debug(payload)
  1377. try:
  1378. payload = filter_pipeline(payload, user)
  1379. except Exception as e:
  1380. if len(e.args) > 1:
  1381. return JSONResponse(
  1382. status_code=e.args[0],
  1383. content={"detail": e.args[1]},
  1384. )
  1385. else:
  1386. return JSONResponse(
  1387. status_code=status.HTTP_400_BAD_REQUEST,
  1388. content={"detail": str(e)},
  1389. )
  1390. if "chat_id" in payload:
  1391. del payload["chat_id"]
  1392. return await generate_chat_completions(form_data=payload, user=user)
  1393. ##################################
  1394. #
  1395. # Pipelines Endpoints
  1396. #
  1397. ##################################
  1398. # TODO: Refactor pipelines API endpoints below into a separate file
  1399. @app.get("/api/pipelines/list")
  1400. async def get_pipelines_list(user=Depends(get_admin_user)):
  1401. responses = await get_openai_models(raw=True)
  1402. print(responses)
  1403. urlIdxs = [
  1404. idx
  1405. for idx, response in enumerate(responses)
  1406. if response is not None and "pipelines" in response
  1407. ]
  1408. return {
  1409. "data": [
  1410. {
  1411. "url": openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx],
  1412. "idx": urlIdx,
  1413. }
  1414. for urlIdx in urlIdxs
  1415. ]
  1416. }
  1417. @app.post("/api/pipelines/upload")
  1418. async def upload_pipeline(
  1419. urlIdx: int = Form(...), file: UploadFile = File(...), user=Depends(get_admin_user)
  1420. ):
  1421. print("upload_pipeline", urlIdx, file.filename)
  1422. # Check if the uploaded file is a python file
  1423. if not (file.filename and file.filename.endswith(".py")):
  1424. raise HTTPException(
  1425. status_code=status.HTTP_400_BAD_REQUEST,
  1426. detail="Only Python (.py) files are allowed.",
  1427. )
  1428. upload_folder = f"{CACHE_DIR}/pipelines"
  1429. os.makedirs(upload_folder, exist_ok=True)
  1430. file_path = os.path.join(upload_folder, file.filename)
  1431. r = None
  1432. try:
  1433. # Save the uploaded file
  1434. with open(file_path, "wb") as buffer:
  1435. shutil.copyfileobj(file.file, buffer)
  1436. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1437. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1438. headers = {"Authorization": f"Bearer {key}"}
  1439. with open(file_path, "rb") as f:
  1440. files = {"file": f}
  1441. r = requests.post(f"{url}/pipelines/upload", headers=headers, files=files)
  1442. r.raise_for_status()
  1443. data = r.json()
  1444. return {**data}
  1445. except Exception as e:
  1446. # Handle connection error here
  1447. print(f"Connection error: {e}")
  1448. detail = "Pipeline not found"
  1449. status_code = status.HTTP_404_NOT_FOUND
  1450. if r is not None:
  1451. status_code = r.status_code
  1452. try:
  1453. res = r.json()
  1454. if "detail" in res:
  1455. detail = res["detail"]
  1456. except Exception:
  1457. pass
  1458. raise HTTPException(
  1459. status_code=status_code,
  1460. detail=detail,
  1461. )
  1462. finally:
  1463. # Ensure the file is deleted after the upload is completed or on failure
  1464. if os.path.exists(file_path):
  1465. os.remove(file_path)
  1466. class AddPipelineForm(BaseModel):
  1467. url: str
  1468. urlIdx: int
  1469. @app.post("/api/pipelines/add")
  1470. async def add_pipeline(form_data: AddPipelineForm, user=Depends(get_admin_user)):
  1471. r = None
  1472. try:
  1473. urlIdx = form_data.urlIdx
  1474. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1475. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1476. headers = {"Authorization": f"Bearer {key}"}
  1477. r = requests.post(
  1478. f"{url}/pipelines/add", headers=headers, json={"url": form_data.url}
  1479. )
  1480. r.raise_for_status()
  1481. data = r.json()
  1482. return {**data}
  1483. except Exception as e:
  1484. # Handle connection error here
  1485. print(f"Connection error: {e}")
  1486. detail = "Pipeline not found"
  1487. if r is not None:
  1488. try:
  1489. res = r.json()
  1490. if "detail" in res:
  1491. detail = res["detail"]
  1492. except Exception:
  1493. pass
  1494. raise HTTPException(
  1495. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1496. detail=detail,
  1497. )
  1498. class DeletePipelineForm(BaseModel):
  1499. id: str
  1500. urlIdx: int
  1501. @app.delete("/api/pipelines/delete")
  1502. async def delete_pipeline(form_data: DeletePipelineForm, user=Depends(get_admin_user)):
  1503. r = None
  1504. try:
  1505. urlIdx = form_data.urlIdx
  1506. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1507. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1508. headers = {"Authorization": f"Bearer {key}"}
  1509. r = requests.delete(
  1510. f"{url}/pipelines/delete", headers=headers, json={"id": form_data.id}
  1511. )
  1512. r.raise_for_status()
  1513. data = r.json()
  1514. return {**data}
  1515. except Exception as e:
  1516. # Handle connection error here
  1517. print(f"Connection error: {e}")
  1518. detail = "Pipeline not found"
  1519. if r is not None:
  1520. try:
  1521. res = r.json()
  1522. if "detail" in res:
  1523. detail = res["detail"]
  1524. except Exception:
  1525. pass
  1526. raise HTTPException(
  1527. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1528. detail=detail,
  1529. )
  1530. @app.get("/api/pipelines")
  1531. async def get_pipelines(urlIdx: Optional[int] = None, user=Depends(get_admin_user)):
  1532. r = None
  1533. try:
  1534. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1535. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1536. headers = {"Authorization": f"Bearer {key}"}
  1537. r = requests.get(f"{url}/pipelines", headers=headers)
  1538. r.raise_for_status()
  1539. data = r.json()
  1540. return {**data}
  1541. except Exception as e:
  1542. # Handle connection error here
  1543. print(f"Connection error: {e}")
  1544. detail = "Pipeline not found"
  1545. if r is not None:
  1546. try:
  1547. res = r.json()
  1548. if "detail" in res:
  1549. detail = res["detail"]
  1550. except Exception:
  1551. pass
  1552. raise HTTPException(
  1553. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1554. detail=detail,
  1555. )
  1556. @app.get("/api/pipelines/{pipeline_id}/valves")
  1557. async def get_pipeline_valves(
  1558. urlIdx: Optional[int],
  1559. pipeline_id: str,
  1560. user=Depends(get_admin_user),
  1561. ):
  1562. r = None
  1563. try:
  1564. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1565. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1566. headers = {"Authorization": f"Bearer {key}"}
  1567. r = requests.get(f"{url}/{pipeline_id}/valves", headers=headers)
  1568. r.raise_for_status()
  1569. data = r.json()
  1570. return {**data}
  1571. except Exception as e:
  1572. # Handle connection error here
  1573. print(f"Connection error: {e}")
  1574. detail = "Pipeline not found"
  1575. if r is not None:
  1576. try:
  1577. res = r.json()
  1578. if "detail" in res:
  1579. detail = res["detail"]
  1580. except Exception:
  1581. pass
  1582. raise HTTPException(
  1583. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1584. detail=detail,
  1585. )
  1586. @app.get("/api/pipelines/{pipeline_id}/valves/spec")
  1587. async def get_pipeline_valves_spec(
  1588. urlIdx: Optional[int],
  1589. pipeline_id: str,
  1590. user=Depends(get_admin_user),
  1591. ):
  1592. r = None
  1593. try:
  1594. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1595. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1596. headers = {"Authorization": f"Bearer {key}"}
  1597. r = requests.get(f"{url}/{pipeline_id}/valves/spec", headers=headers)
  1598. r.raise_for_status()
  1599. data = r.json()
  1600. return {**data}
  1601. except Exception as e:
  1602. # Handle connection error here
  1603. print(f"Connection error: {e}")
  1604. detail = "Pipeline not found"
  1605. if r is not None:
  1606. try:
  1607. res = r.json()
  1608. if "detail" in res:
  1609. detail = res["detail"]
  1610. except Exception:
  1611. pass
  1612. raise HTTPException(
  1613. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1614. detail=detail,
  1615. )
  1616. @app.post("/api/pipelines/{pipeline_id}/valves/update")
  1617. async def update_pipeline_valves(
  1618. urlIdx: Optional[int],
  1619. pipeline_id: str,
  1620. form_data: dict,
  1621. user=Depends(get_admin_user),
  1622. ):
  1623. r = None
  1624. try:
  1625. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1626. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1627. headers = {"Authorization": f"Bearer {key}"}
  1628. r = requests.post(
  1629. f"{url}/{pipeline_id}/valves/update",
  1630. headers=headers,
  1631. json={**form_data},
  1632. )
  1633. r.raise_for_status()
  1634. data = r.json()
  1635. return {**data}
  1636. except Exception as e:
  1637. # Handle connection error here
  1638. print(f"Connection error: {e}")
  1639. detail = "Pipeline not found"
  1640. if r is not None:
  1641. try:
  1642. res = r.json()
  1643. if "detail" in res:
  1644. detail = res["detail"]
  1645. except Exception:
  1646. pass
  1647. raise HTTPException(
  1648. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1649. detail=detail,
  1650. )
  1651. ##################################
  1652. #
  1653. # Config Endpoints
  1654. #
  1655. ##################################
  1656. @app.get("/api/config")
  1657. async def get_app_config(request: Request):
  1658. user = None
  1659. if "token" in request.cookies:
  1660. token = request.cookies.get("token")
  1661. data = decode_token(token)
  1662. if data is not None and "id" in data:
  1663. user = Users.get_user_by_id(data["id"])
  1664. return {
  1665. "status": True,
  1666. "name": WEBUI_NAME,
  1667. "version": VERSION,
  1668. "default_locale": str(DEFAULT_LOCALE),
  1669. "oauth": {
  1670. "providers": {
  1671. name: config.get("name", name)
  1672. for name, config in OAUTH_PROVIDERS.items()
  1673. }
  1674. },
  1675. "features": {
  1676. "auth": WEBUI_AUTH,
  1677. "auth_trusted_header": bool(webui_app.state.AUTH_TRUSTED_EMAIL_HEADER),
  1678. "enable_signup": webui_app.state.config.ENABLE_SIGNUP,
  1679. "enable_login_form": webui_app.state.config.ENABLE_LOGIN_FORM,
  1680. **(
  1681. {
  1682. "enable_web_search": rag_app.state.config.ENABLE_RAG_WEB_SEARCH,
  1683. "enable_image_generation": images_app.state.config.ENABLED,
  1684. "enable_community_sharing": webui_app.state.config.ENABLE_COMMUNITY_SHARING,
  1685. "enable_message_rating": webui_app.state.config.ENABLE_MESSAGE_RATING,
  1686. "enable_admin_export": ENABLE_ADMIN_EXPORT,
  1687. "enable_admin_chat_access": ENABLE_ADMIN_CHAT_ACCESS,
  1688. }
  1689. if user is not None
  1690. else {}
  1691. ),
  1692. },
  1693. **(
  1694. {
  1695. "default_models": webui_app.state.config.DEFAULT_MODELS,
  1696. "default_prompt_suggestions": webui_app.state.config.DEFAULT_PROMPT_SUGGESTIONS,
  1697. "audio": {
  1698. "tts": {
  1699. "engine": audio_app.state.config.TTS_ENGINE,
  1700. "voice": audio_app.state.config.TTS_VOICE,
  1701. "split_on": audio_app.state.config.TTS_SPLIT_ON,
  1702. },
  1703. "stt": {
  1704. "engine": audio_app.state.config.STT_ENGINE,
  1705. },
  1706. },
  1707. "file": {
  1708. "max_size": rag_app.state.config.FILE_MAX_SIZE,
  1709. "max_count": rag_app.state.config.FILE_MAX_COUNT,
  1710. },
  1711. "permissions": {**webui_app.state.config.USER_PERMISSIONS},
  1712. }
  1713. if user is not None
  1714. else {}
  1715. ),
  1716. }
  1717. @app.get("/api/config/model/filter")
  1718. async def get_model_filter_config(user=Depends(get_admin_user)):
  1719. return {
  1720. "enabled": app.state.config.ENABLE_MODEL_FILTER,
  1721. "models": app.state.config.MODEL_FILTER_LIST,
  1722. }
  1723. class ModelFilterConfigForm(BaseModel):
  1724. enabled: bool
  1725. models: list[str]
  1726. @app.post("/api/config/model/filter")
  1727. async def update_model_filter_config(
  1728. form_data: ModelFilterConfigForm, user=Depends(get_admin_user)
  1729. ):
  1730. app.state.config.ENABLE_MODEL_FILTER = form_data.enabled
  1731. app.state.config.MODEL_FILTER_LIST = form_data.models
  1732. return {
  1733. "enabled": app.state.config.ENABLE_MODEL_FILTER,
  1734. "models": app.state.config.MODEL_FILTER_LIST,
  1735. }
  1736. # TODO: webhook endpoint should be under config endpoints
  1737. @app.get("/api/webhook")
  1738. async def get_webhook_url(user=Depends(get_admin_user)):
  1739. return {
  1740. "url": app.state.config.WEBHOOK_URL,
  1741. }
  1742. class UrlForm(BaseModel):
  1743. url: str
  1744. @app.post("/api/webhook")
  1745. async def update_webhook_url(form_data: UrlForm, user=Depends(get_admin_user)):
  1746. app.state.config.WEBHOOK_URL = form_data.url
  1747. webui_app.state.WEBHOOK_URL = app.state.config.WEBHOOK_URL
  1748. return {"url": app.state.config.WEBHOOK_URL}
  1749. @app.get("/api/version")
  1750. async def get_app_version():
  1751. return {
  1752. "version": VERSION,
  1753. }
  1754. @app.get("/api/changelog")
  1755. async def get_app_changelog():
  1756. return {key: CHANGELOG[key] for idx, key in enumerate(CHANGELOG) if idx < 5}
  1757. @app.get("/api/version/updates")
  1758. async def get_app_latest_release_version():
  1759. try:
  1760. async with aiohttp.ClientSession(trust_env=True) as session:
  1761. async with session.get(
  1762. "https://api.github.com/repos/open-webui/open-webui/releases/latest"
  1763. ) as response:
  1764. response.raise_for_status()
  1765. data = await response.json()
  1766. latest_version = data["tag_name"]
  1767. return {"current": VERSION, "latest": latest_version[1:]}
  1768. except aiohttp.ClientError:
  1769. raise HTTPException(
  1770. status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
  1771. detail=ERROR_MESSAGES.RATE_LIMIT_EXCEEDED,
  1772. )
  1773. ############################
  1774. # OAuth Login & Callback
  1775. ############################
  1776. # SessionMiddleware is used by authlib for oauth
  1777. if len(OAUTH_PROVIDERS) > 0:
  1778. app.add_middleware(
  1779. SessionMiddleware,
  1780. secret_key=WEBUI_SECRET_KEY,
  1781. session_cookie="oui-session",
  1782. same_site=WEBUI_SESSION_COOKIE_SAME_SITE,
  1783. https_only=WEBUI_SESSION_COOKIE_SECURE,
  1784. )
  1785. @app.get("/oauth/{provider}/login")
  1786. async def oauth_login(provider: str, request: Request):
  1787. return oauth_manager.handle_login(provider, request)
  1788. # OAuth login logic is as follows:
  1789. # 1. Attempt to find a user with matching subject ID, tied to the provider
  1790. # 2. If OAUTH_MERGE_ACCOUNTS_BY_EMAIL is true, find a user with the email address provided via OAuth
  1791. # - This is considered insecure in general, as OAuth providers do not always verify email addresses
  1792. # 3. If there is no user, and ENABLE_OAUTH_SIGNUP is true, create a user
  1793. # - Email addresses are considered unique, so we fail registration if the email address is alreayd taken
  1794. @app.get("/oauth/{provider}/callback")
  1795. async def oauth_callback(provider: str, request: Request, response: Response):
  1796. return oauth_manager.handle_callback(provider, request, response)
  1797. @app.get("/manifest.json")
  1798. async def get_manifest_json():
  1799. return {
  1800. "name": WEBUI_NAME,
  1801. "short_name": WEBUI_NAME,
  1802. "description": "Open WebUI is an open, extensible, user-friendly interface for AI that adapts to your workflow.",
  1803. "start_url": "/",
  1804. "display": "standalone",
  1805. "background_color": "#343541",
  1806. "orientation": "any",
  1807. "icons": [
  1808. {
  1809. "src": "/static/logo.png",
  1810. "type": "image/png",
  1811. "sizes": "500x500",
  1812. "purpose": "any",
  1813. },
  1814. {
  1815. "src": "/static/logo.png",
  1816. "type": "image/png",
  1817. "sizes": "500x500",
  1818. "purpose": "maskable",
  1819. },
  1820. ],
  1821. }
  1822. @app.get("/opensearch.xml")
  1823. async def get_opensearch_xml():
  1824. xml_content = rf"""
  1825. <OpenSearchDescription xmlns="http://a9.com/-/spec/opensearch/1.1/" xmlns:moz="http://www.mozilla.org/2006/browser/search/">
  1826. <ShortName>{WEBUI_NAME}</ShortName>
  1827. <Description>Search {WEBUI_NAME}</Description>
  1828. <InputEncoding>UTF-8</InputEncoding>
  1829. <Image width="16" height="16" type="image/x-icon">{WEBUI_URL}/static/favicon.png</Image>
  1830. <Url type="text/html" method="get" template="{WEBUI_URL}/?q={"{searchTerms}"}"/>
  1831. <moz:SearchForm>{WEBUI_URL}</moz:SearchForm>
  1832. </OpenSearchDescription>
  1833. """
  1834. return Response(content=xml_content, media_type="application/xml")
  1835. @app.get("/health")
  1836. async def healthcheck():
  1837. return {"status": True}
  1838. @app.get("/health/db")
  1839. async def healthcheck_with_db():
  1840. Session.execute(text("SELECT 1;")).all()
  1841. return {"status": True}
  1842. app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static")
  1843. app.mount("/cache", StaticFiles(directory=CACHE_DIR), name="cache")
  1844. if os.path.exists(FRONTEND_BUILD_DIR):
  1845. mimetypes.add_type("text/javascript", ".js")
  1846. app.mount(
  1847. "/",
  1848. SPAStaticFiles(directory=FRONTEND_BUILD_DIR, html=True),
  1849. name="spa-static-files",
  1850. )
  1851. else:
  1852. log.warning(
  1853. f"Frontend build directory not found at '{FRONTEND_BUILD_DIR}'. Serving API only."
  1854. )