Kaynağa Gözat

fix: run litellm as subprocess

Timothy J. Baek 1 yıl önce
ebeveyn
işleme
5e458d490a
2 değiştirilmiş dosya ile 58 ekleme ve 20 silme
  1. 57 14
      backend/apps/litellm/main.py
  2. 1 6
      backend/main.py

+ 57 - 14
backend/apps/litellm/main.py

@@ -1,8 +1,8 @@
-import logging
-
-from litellm.proxy.proxy_server import ProxyConfig, initialize
-from litellm.proxy.proxy_server import app
+from fastapi import FastAPI, Depends
+from fastapi.routing import APIRoute
+from fastapi.middleware.cors import CORSMiddleware
 
+import logging
 from fastapi import FastAPI, Request, Depends, status, Response
 from fastapi.responses import JSONResponse
 
@@ -23,24 +23,39 @@ from config import (
 )
 
 
-proxy_config = ProxyConfig()
+import asyncio
+import subprocess
 
 
-async def config():
-    router, model_list, general_settings = await proxy_config.load_config(
-        router=None, config_file_path="./data/litellm/config.yaml"
-    )
+app = FastAPI()
 
-    await initialize(config="./data/litellm/config.yaml", telemetry=False)
+origins = ["*"]
 
+app.add_middleware(
+    CORSMiddleware,
+    allow_origins=origins,
+    allow_credentials=True,
+    allow_methods=["*"],
+    allow_headers=["*"],
+)
 
-async def startup():
-    await config()
+
+async def run_background_process(command):
+    process = await asyncio.create_subprocess_exec(
+        *command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE
+    )
+    return process
+
+
+async def start_litellm_background():
+    # Command to run in the background
+    command = "litellm --config ./data/litellm/config.yaml"
+    await run_background_process(command)
 
 
 @app.on_event("startup")
-async def on_startup():
-    await startup()
+async def startup_event():
+    asyncio.create_task(start_litellm_background())
 
 
 app.state.MODEL_FILTER_ENABLED = MODEL_FILTER_ENABLED
@@ -63,6 +78,11 @@ async def auth_middleware(request: Request, call_next):
     return response
 
 
+@app.get("/")
+async def get_status():
+    return {"status": True}
+
+
 class ModifyModelsResponseMiddleware(BaseHTTPMiddleware):
     async def dispatch(
         self, request: Request, call_next: RequestResponseEndpoint
@@ -98,3 +118,26 @@ class ModifyModelsResponseMiddleware(BaseHTTPMiddleware):
 
 
 app.add_middleware(ModifyModelsResponseMiddleware)
+
+
+# from litellm.proxy.proxy_server import ProxyConfig, initialize
+# from litellm.proxy.proxy_server import app
+
+# proxy_config = ProxyConfig()
+
+
+# async def config():
+#     router, model_list, general_settings = await proxy_config.load_config(
+#         router=None, config_file_path="./data/litellm/config.yaml"
+#     )
+
+#     await initialize(config="./data/litellm/config.yaml", telemetry=False)
+
+
+# async def startup():
+#     await config()
+
+
+# @app.on_event("startup")
+# async def on_startup():
+#     await startup()

+ 1 - 6
backend/main.py

@@ -20,7 +20,7 @@ from starlette.middleware.base import BaseHTTPMiddleware
 from apps.ollama.main import app as ollama_app
 from apps.openai.main import app as openai_app
 
-from apps.litellm.main import app as litellm_app, startup as litellm_app_startup
+from apps.litellm.main import app as litellm_app
 from apps.audio.main import app as audio_app
 from apps.images.main import app as images_app
 from apps.rag.main import app as rag_app
@@ -168,11 +168,6 @@ async def check_url(request: Request, call_next):
     return response
 
 
-@app.on_event("startup")
-async def on_startup():
-    await litellm_app_startup()
-
-
 app.mount("/api/v1", webui_app)
 app.mount("/litellm/api", litellm_app)