Browse Source

Merge pull request #907 from open-webui/dev

0.1.103
Timothy Jaeryang Baek 1 year ago
parent
commit
02fb517bbe

+ 21 - 0
CHANGELOG.md

@@ -5,6 +5,27 @@ All notable changes to this project will be documented in this file.
 The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
 The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
 and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
 and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
 
 
+## [0.1.103] - 2024-02-25
+
+### Added
+
+- **🔗 Built-in LiteLLM Proxy**: Now includes LiteLLM proxy within Open WebUI for enhanced functionality.
+
+  - Easily integrate existing LiteLLM configurations using `-v /path/to/config.yaml:/app/backend/data/litellm/config.yaml` flag.
+  - When utilizing Docker container to run Open WebUI, ensure connections to localhost use `host.docker.internal`.
+
+- **🖼️ Image Generation Enhancements**: Introducing Advanced Settings with Image Preview Feature.
+  - Customize image generation by setting the number of steps; defaults to A1111 value.
+
+### Fixed
+
+- Resolved issue with RAG scan halting document loading upon encountering unsupported MIME types or exceptions (Issue #866).
+
+### Changed
+
+- Ollama is no longer required to run Open WebUI.
+- Access our comprehensive documentation at [Open WebUI Documentation](https://docs.openwebui.com/).
+
 ## [0.1.102] - 2024-02-22
 ## [0.1.102] - 2024-02-22
 
 
 ### Added
 ### Added

+ 12 - 2
README.md

@@ -103,14 +103,24 @@ Don't forget to explore our sibling project, [Open WebUI Community](https://open
 
 
 - After installation, you can access Open WebUI at [http://localhost:3000](http://localhost:3000). Enjoy! 😄
 - After installation, you can access Open WebUI at [http://localhost:3000](http://localhost:3000). Enjoy! 😄
 
 
-#### Troubleshooting
+#### Open WebUI: Server Connection Error
 
 
-Encountering connection issues? Our [Open WebUI Documentation](https://docs.openwebui.com/getting-started/troubleshooting/) has got you covered. For further assistance and to join our vibrant community, visit the [Open WebUI Discord](https://discord.gg/5rJgQTnV4s).
+If you're experiencing connection issues, it’s often due to the WebUI docker container not being able to reach the Ollama server at 127.0.0.1:11434 (host.docker.internal:11434) inside the container . Use the `--network=host` flag in your docker command to resolve this. Note that the port changes from 3000 to 8080, resulting in the link: `http://localhost:8080`.
+
+**Example Docker Command**:
+
+```bash
+docker run -d --network=host -v open-webui:/app/backend/data -e OLLAMA_API_BASE_URL=http://127.0.0.1:11434/api --name open-webui --restart always ghcr.io/open-webui/open-webui:main
+```
 
 
 ### Other Installation Methods
 ### Other Installation Methods
 
 
 We offer various installation alternatives, including non-Docker methods, Docker Compose, Kustomize, and Helm. Visit our [Open WebUI Documentation](https://docs.openwebui.com/getting-started/) or join our [Discord community](https://discord.gg/5rJgQTnV4s) for comprehensive guidance.
 We offer various installation alternatives, including non-Docker methods, Docker Compose, Kustomize, and Helm. Visit our [Open WebUI Documentation](https://docs.openwebui.com/getting-started/) or join our [Discord community](https://discord.gg/5rJgQTnV4s) for comprehensive guidance.
 
 
+### Troubleshooting
+
+Encountering connection issues? Our [Open WebUI Documentation](https://docs.openwebui.com/getting-started/troubleshooting/) has got you covered. For further assistance and to join our vibrant community, visit the [Open WebUI Discord](https://discord.gg/5rJgQTnV4s).
+
 ### Keeping Your Docker Installation Up-to-Date
 ### Keeping Your Docker Installation Up-to-Date
 
 
 In case you want to update your local Docker installation to the latest version, you can do it with [Watchtower](https://containrrr.dev/watchtower/):
 In case you want to update your local Docker installation to the latest version, you can do it with [Watchtower](https://containrrr.dev/watchtower/):

+ 8 - 1
backend/.dockerignore

@@ -4,4 +4,11 @@ _old
 uploads
 uploads
 .ipynb_checkpoints
 .ipynb_checkpoints
 *.db
 *.db
-_test
+_test
+!/data
+/data/*
+!/data/litellm
+/data/litellm/*
+!data/litellm/config.yaml
+
+!data/config.json

+ 6 - 1
backend/.gitignore

@@ -6,6 +6,11 @@ uploads
 *.db
 *.db
 _test
 _test
 Pipfile
 Pipfile
-data/*
+!/data
+/data/*
+!/data/litellm
+/data/litellm/*
+!data/litellm/config.yaml
+
 !data/config.json
 !data/config.json
 .webui_secret_key
 .webui_secret_key

+ 36 - 4
backend/apps/images/main.py

@@ -35,6 +35,7 @@ app.add_middleware(
 app.state.AUTOMATIC1111_BASE_URL = AUTOMATIC1111_BASE_URL
 app.state.AUTOMATIC1111_BASE_URL = AUTOMATIC1111_BASE_URL
 app.state.ENABLED = app.state.AUTOMATIC1111_BASE_URL != ""
 app.state.ENABLED = app.state.AUTOMATIC1111_BASE_URL != ""
 app.state.IMAGE_SIZE = "512x512"
 app.state.IMAGE_SIZE = "512x512"
+app.state.IMAGE_STEPS = 50
 
 
 
 
 @app.get("/enabled", response_model=bool)
 @app.get("/enabled", response_model=bool)
@@ -49,7 +50,7 @@ async def toggle_enabled(request: Request, user=Depends(get_admin_user)):
         app.state.ENABLED = not app.state.ENABLED
         app.state.ENABLED = not app.state.ENABLED
         return app.state.ENABLED
         return app.state.ENABLED
     except Exception as e:
     except Exception as e:
-        raise HTTPException(status_code=r.status_code, detail=ERROR_MESSAGES.DEFAULT(e))
+        raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(e))
 
 
 
 
 class UrlUpdateForm(BaseModel):
 class UrlUpdateForm(BaseModel):
@@ -102,6 +103,32 @@ async def update_image_size(
         )
         )
 
 
 
 
+class ImageStepsUpdateForm(BaseModel):
+    steps: int
+
+
+@app.get("/steps")
+async def get_image_size(user=Depends(get_admin_user)):
+    return {"IMAGE_STEPS": app.state.IMAGE_STEPS}
+
+
+@app.post("/steps/update")
+async def update_image_size(
+    form_data: ImageStepsUpdateForm, user=Depends(get_admin_user)
+):
+    if form_data.steps >= 0:
+        app.state.IMAGE_STEPS = form_data.steps
+        return {
+            "IMAGE_STEPS": app.state.IMAGE_STEPS,
+            "status": True,
+        }
+    else:
+        raise HTTPException(
+            status_code=400,
+            detail=ERROR_MESSAGES.INCORRECT_FORMAT("  (e.g., 50)."),
+        )
+
+
 @app.get("/models")
 @app.get("/models")
 def get_models(user=Depends(get_current_user)):
 def get_models(user=Depends(get_current_user)):
     try:
     try:
@@ -109,7 +136,8 @@ def get_models(user=Depends(get_current_user)):
         models = r.json()
         models = r.json()
         return models
         return models
     except Exception as e:
     except Exception as e:
-        raise HTTPException(status_code=r.status_code, detail=ERROR_MESSAGES.DEFAULT(e))
+        app.state.ENABLED = False
+        raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(e))
 
 
 
 
 @app.get("/models/default")
 @app.get("/models/default")
@@ -120,7 +148,8 @@ async def get_default_model(user=Depends(get_admin_user)):
 
 
         return {"model": options["sd_model_checkpoint"]}
         return {"model": options["sd_model_checkpoint"]}
     except Exception as e:
     except Exception as e:
-        raise HTTPException(status_code=r.status_code, detail=ERROR_MESSAGES.DEFAULT(e))
+        app.state.ENABLED = False
+        raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(e))
 
 
 
 
 class UpdateModelForm(BaseModel):
 class UpdateModelForm(BaseModel):
@@ -177,6 +206,9 @@ def generate_image(
             "height": height,
             "height": height,
         }
         }
 
 
+        if app.state.IMAGE_STEPS != None:
+            data["steps"] = app.state.IMAGE_STEPS
+
         if form_data.negative_prompt != None:
         if form_data.negative_prompt != None:
             data["negative_prompt"] = form_data.negative_prompt
             data["negative_prompt"] = form_data.negative_prompt
 
 
@@ -190,4 +222,4 @@ def generate_image(
         return r.json()
         return r.json()
     except Exception as e:
     except Exception as e:
         print(e)
         print(e)
-        raise HTTPException(status_code=r.status_code, detail=ERROR_MESSAGES.DEFAULT(e))
+        raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(e))

+ 36 - 2
backend/config.py

@@ -6,6 +6,8 @@ from bs4 import BeautifulSoup
 
 
 from pathlib import Path
 from pathlib import Path
 import json
 import json
+import yaml
+
 import markdown
 import markdown
 import requests
 import requests
 import shutil
 import shutil
@@ -83,8 +85,6 @@ for version in soup.find_all("h2"):
     # Find the next sibling that is a h3 tag (section title)
     # Find the next sibling that is a h3 tag (section title)
     current = version.find_next_sibling()
     current = version.find_next_sibling()
 
 
-    print(current)
-
     while current and current.name != "h2":
     while current and current.name != "h2":
         if current.name == "h3":
         if current.name == "h3":
             section_title = current.get_text().lower()  # e.g., "added", "fixed"
             section_title = current.get_text().lower()  # e.g., "added", "fixed"
@@ -165,6 +165,40 @@ Path(CACHE_DIR).mkdir(parents=True, exist_ok=True)
 DOCS_DIR = f"{DATA_DIR}/docs"
 DOCS_DIR = f"{DATA_DIR}/docs"
 Path(DOCS_DIR).mkdir(parents=True, exist_ok=True)
 Path(DOCS_DIR).mkdir(parents=True, exist_ok=True)
 
 
+
+####################################
+# LITELLM_CONFIG
+####################################
+
+
+def create_config_file(file_path):
+    directory = os.path.dirname(file_path)
+
+    # Check if directory exists, if not, create it
+    if not os.path.exists(directory):
+        os.makedirs(directory)
+
+    # Data to write into the YAML file
+    config_data = {
+        "general_settings": {},
+        "litellm_settings": {},
+        "model_list": [],
+        "router_settings": {},
+    }
+
+    # Write data to YAML file
+    with open(file_path, "w") as file:
+        yaml.dump(config_data, file)
+
+
+LITELLM_CONFIG_PATH = f"{DATA_DIR}/litellm/config.yaml"
+
+if not os.path.exists(LITELLM_CONFIG_PATH):
+    print("Config file doesn't exist. Creating...")
+    create_config_file(LITELLM_CONFIG_PATH)
+    print("Config file created successfully.")
+
+
 ####################################
 ####################################
 # OLLAMA_API_BASE_URL
 # OLLAMA_API_BASE_URL
 ####################################
 ####################################

+ 4 - 0
backend/data/litellm/config.yaml

@@ -0,0 +1,4 @@
+general_settings: {}
+litellm_settings: {}
+model_list: []
+router_settings: {}

+ 45 - 3
backend/main.py

@@ -2,25 +2,31 @@ from bs4 import BeautifulSoup
 import json
 import json
 import markdown
 import markdown
 import time
 import time
+import os
+import sys
 
 
-
-from fastapi import FastAPI, Request
+from fastapi import FastAPI, Request, Depends
 from fastapi.staticfiles import StaticFiles
 from fastapi.staticfiles import StaticFiles
 from fastapi import HTTPException
 from fastapi import HTTPException
+from fastapi.responses import JSONResponse
 from fastapi.middleware.wsgi import WSGIMiddleware
 from fastapi.middleware.wsgi import WSGIMiddleware
 from fastapi.middleware.cors import CORSMiddleware
 from fastapi.middleware.cors import CORSMiddleware
 from starlette.exceptions import HTTPException as StarletteHTTPException
 from starlette.exceptions import HTTPException as StarletteHTTPException
 
 
 
 
+from litellm.proxy.proxy_server import ProxyConfig, initialize
+from litellm.proxy.proxy_server import app as litellm_app
+
 from apps.ollama.main import app as ollama_app
 from apps.ollama.main import app as ollama_app
 from apps.openai.main import app as openai_app
 from apps.openai.main import app as openai_app
 from apps.audio.main import app as audio_app
 from apps.audio.main import app as audio_app
 from apps.images.main import app as images_app
 from apps.images.main import app as images_app
 from apps.rag.main import app as rag_app
 from apps.rag.main import app as rag_app
-
 from apps.web.main import app as webui_app
 from apps.web.main import app as webui_app
 
 
+
 from config import WEBUI_NAME, ENV, VERSION, CHANGELOG, FRONTEND_BUILD_DIR
 from config import WEBUI_NAME, ENV, VERSION, CHANGELOG, FRONTEND_BUILD_DIR
+from utils.utils import get_http_authorization_cred, get_current_user
 
 
 
 
 class SPAStaticFiles(StaticFiles):
 class SPAStaticFiles(StaticFiles):
@@ -34,6 +40,21 @@ class SPAStaticFiles(StaticFiles):
                 raise ex
                 raise ex
 
 
 
 
+proxy_config = ProxyConfig()
+
+
+async def config():
+    router, model_list, general_settings = await proxy_config.load_config(
+        router=None, config_file_path="./data/litellm/config.yaml"
+    )
+
+    await initialize(config="./data/litellm/config.yaml", telemetry=False)
+
+
+async def startup():
+    await config()
+
+
 app = FastAPI(docs_url="/docs" if ENV == "dev" else None, redoc_url=None)
 app = FastAPI(docs_url="/docs" if ENV == "dev" else None, redoc_url=None)
 
 
 origins = ["*"]
 origins = ["*"]
@@ -47,6 +68,11 @@ app.add_middleware(
 )
 )
 
 
 
 
+@app.on_event("startup")
+async def on_startup():
+    await startup()
+
+
 @app.middleware("http")
 @app.middleware("http")
 async def check_url(request: Request, call_next):
 async def check_url(request: Request, call_next):
     start_time = int(time.time())
     start_time = int(time.time())
@@ -57,7 +83,23 @@ async def check_url(request: Request, call_next):
     return response
     return response
 
 
 
 
+@litellm_app.middleware("http")
+async def auth_middleware(request: Request, call_next):
+    auth_header = request.headers.get("Authorization", "")
+
+    if ENV != "dev":
+        try:
+            user = get_current_user(get_http_authorization_cred(auth_header))
+            print(user)
+        except Exception as e:
+            return JSONResponse(status_code=400, content={"detail": str(e)})
+
+    response = await call_next(request)
+    return response
+
+
 app.mount("/api/v1", webui_app)
 app.mount("/api/v1", webui_app)
+app.mount("/litellm/api", litellm_app)
 
 
 app.mount("/ollama/api", ollama_app)
 app.mount("/ollama/api", ollama_app)
 app.mount("/openai/api", openai_app)
 app.mount("/openai/api", openai_app)

+ 4 - 0
backend/requirements.txt

@@ -16,6 +16,10 @@ aiohttp
 peewee
 peewee
 bcrypt
 bcrypt
 
 
+litellm
+apscheduler
+google-generativeai
+
 langchain
 langchain
 langchain-community
 langchain-community
 chromadb
 chromadb

+ 8 - 0
backend/utils/utils.py

@@ -58,6 +58,14 @@ def extract_token_from_auth_header(auth_header: str):
     return auth_header[len("Bearer ") :]
     return auth_header[len("Bearer ") :]
 
 
 
 
+def get_http_authorization_cred(auth_header: str):
+    try:
+        scheme, credentials = auth_header.split(" ")
+        return HTTPAuthorizationCredentials(scheme=scheme, credentials=credentials)
+    except:
+        raise ValueError(ERROR_MESSAGES.INVALID_TOKEN)
+
+
 def get_current_user(
 def get_current_user(
     auth_token: HTTPAuthorizationCredentials = Depends(bearer_security),
     auth_token: HTTPAuthorizationCredentials = Depends(bearer_security),
 ):
 ):

+ 1 - 1
package.json

@@ -1,6 +1,6 @@
 {
 {
 	"name": "open-webui",
 	"name": "open-webui",
-	"version": "0.1.102",
+	"version": "0.1.103",
 	"private": true,
 	"private": true,
 	"scripts": {
 	"scripts": {
 		"dev": "vite dev --host",
 		"dev": "vite dev --host",

+ 1 - 0
src/app.html

@@ -5,6 +5,7 @@
 		<link rel="icon" href="%sveltekit.assets%/favicon.png" />
 		<link rel="icon" href="%sveltekit.assets%/favicon.png" />
 		<link rel="manifest" href="%sveltekit.assets%/manifest.json" />
 		<link rel="manifest" href="%sveltekit.assets%/manifest.json" />
 		<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1" />
 		<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1" />
+		<meta name="robots" content="noindex,nofollow" />
 		<script>
 		<script>
 			// On page load or when changing themes, best to add inline in `head` to avoid FOUC
 			// On page load or when changing themes, best to add inline in `head` to avoid FOUC
 			if (
 			if (

+ 65 - 0
src/lib/apis/images/index.ts

@@ -198,6 +198,71 @@ export const updateImageSize = async (token: string = '', size: string) => {
 	return res.IMAGE_SIZE;
 	return res.IMAGE_SIZE;
 };
 };
 
 
+export const getImageSteps = async (token: string = '') => {
+	let error = null;
+
+	const res = await fetch(`${IMAGES_API_BASE_URL}/steps`, {
+		method: 'GET',
+		headers: {
+			Accept: 'application/json',
+			'Content-Type': 'application/json',
+			...(token && { authorization: `Bearer ${token}` })
+		}
+	})
+		.then(async (res) => {
+			if (!res.ok) throw await res.json();
+			return res.json();
+		})
+		.catch((err) => {
+			console.log(err);
+			if ('detail' in err) {
+				error = err.detail;
+			} else {
+				error = 'Server connection failed';
+			}
+			return null;
+		});
+
+	if (error) {
+		throw error;
+	}
+
+	return res.IMAGE_STEPS;
+};
+
+export const updateImageSteps = async (token: string = '', steps: number) => {
+	let error = null;
+
+	const res = await fetch(`${IMAGES_API_BASE_URL}/steps/update`, {
+		method: 'POST',
+		headers: {
+			Accept: 'application/json',
+			'Content-Type': 'application/json',
+			...(token && { authorization: `Bearer ${token}` })
+		},
+		body: JSON.stringify({ steps })
+	})
+		.then(async (res) => {
+			if (!res.ok) throw await res.json();
+			return res.json();
+		})
+		.catch((err) => {
+			console.log(err);
+			if ('detail' in err) {
+				error = err.detail;
+			} else {
+				error = 'Server connection failed';
+			}
+			return null;
+		});
+
+	if (error) {
+		throw error;
+	}
+
+	return res.IMAGE_STEPS;
+};
+
 export const getDiffusionModels = async (token: string = '') => {
 export const getDiffusionModels = async (token: string = '') => {
 	let error = null;
 	let error = null;
 
 

+ 148 - 0
src/lib/apis/litellm/index.ts

@@ -0,0 +1,148 @@
+import { LITELLM_API_BASE_URL } from '$lib/constants';
+
+export const getLiteLLMModels = async (token: string = '') => {
+	let error = null;
+
+	const res = await fetch(`${LITELLM_API_BASE_URL}/v1/models`, {
+		method: 'GET',
+		headers: {
+			Accept: 'application/json',
+			'Content-Type': 'application/json',
+			...(token && { authorization: `Bearer ${token}` })
+		}
+	})
+		.then(async (res) => {
+			if (!res.ok) throw await res.json();
+			return res.json();
+		})
+		.catch((err) => {
+			console.log(err);
+			error = `LiteLLM: ${err?.error?.message ?? 'Network Problem'}`;
+			return [];
+		});
+
+	if (error) {
+		throw error;
+	}
+
+	const models = Array.isArray(res) ? res : res?.data ?? null;
+
+	return models
+		? models
+				.map((model) => ({
+					id: model.id,
+					name: model.name ?? model.id,
+					external: true,
+					source: 'litellm'
+				}))
+				.sort((a, b) => {
+					return a.name.localeCompare(b.name);
+				})
+		: models;
+};
+
+export const getLiteLLMModelInfo = async (token: string = '') => {
+	let error = null;
+
+	const res = await fetch(`${LITELLM_API_BASE_URL}/model/info`, {
+		method: 'GET',
+		headers: {
+			Accept: 'application/json',
+			'Content-Type': 'application/json',
+			...(token && { authorization: `Bearer ${token}` })
+		}
+	})
+		.then(async (res) => {
+			if (!res.ok) throw await res.json();
+			return res.json();
+		})
+		.catch((err) => {
+			console.log(err);
+			error = `LiteLLM: ${err?.error?.message ?? 'Network Problem'}`;
+			return [];
+		});
+
+	if (error) {
+		throw error;
+	}
+
+	const models = Array.isArray(res) ? res : res?.data ?? null;
+
+	return models;
+};
+
+type AddLiteLLMModelForm = {
+	name: string;
+	model: string;
+	api_base: string;
+	api_key: string;
+	rpm: string;
+};
+
+export const addLiteLLMModel = async (token: string = '', payload: AddLiteLLMModelForm) => {
+	let error = null;
+
+	const res = await fetch(`${LITELLM_API_BASE_URL}/model/new`, {
+		method: 'POST',
+		headers: {
+			Accept: 'application/json',
+			'Content-Type': 'application/json',
+			...(token && { authorization: `Bearer ${token}` })
+		},
+		body: JSON.stringify({
+			model_name: payload.name,
+			litellm_params: {
+				model: payload.model,
+				...(payload.api_base === '' ? {} : { api_base: payload.api_base }),
+				...(payload.api_key === '' ? {} : { api_key: payload.api_key }),
+				...(isNaN(parseInt(payload.rpm)) ? {} : { rpm: parseInt(payload.rpm) })
+			}
+		})
+	})
+		.then(async (res) => {
+			if (!res.ok) throw await res.json();
+			return res.json();
+		})
+		.catch((err) => {
+			console.log(err);
+			error = `LiteLLM: ${err?.error?.message ?? 'Network Problem'}`;
+			return [];
+		});
+
+	if (error) {
+		throw error;
+	}
+
+	return res;
+};
+
+export const deleteLiteLLMModel = async (token: string = '', id: string) => {
+	let error = null;
+
+	const res = await fetch(`${LITELLM_API_BASE_URL}/model/delete`, {
+		method: 'POST',
+		headers: {
+			Accept: 'application/json',
+			'Content-Type': 'application/json',
+			...(token && { authorization: `Bearer ${token}` })
+		},
+		body: JSON.stringify({
+			id: id
+		})
+	})
+		.then(async (res) => {
+			if (!res.ok) throw await res.json();
+			return res.json();
+		})
+		.catch((err) => {
+			console.log(err);
+			error = `LiteLLM: ${err?.error?.message ?? 'Network Problem'}`;
+			return [];
+		});
+
+	if (error) {
+		throw error;
+	}
+
+	return res;
+};

+ 5 - 3
src/lib/apis/ollama/index.ts

@@ -128,9 +128,11 @@ export const getOllamaModels = async (token: string = '') => {
 		throw error;
 		throw error;
 	}
 	}
 
 
-	return (res?.models ?? []).sort((a, b) => {
-		return a.name.localeCompare(b.name);
-	});
+	return (res?.models ?? [])
+		.map((model) => ({ id: model.model, name: model.name ?? model.model, ...model }))
+		.sort((a, b) => {
+			return a.name.localeCompare(b.name);
+		});
 };
 };
 
 
 // TODO: migrate to backend
 // TODO: migrate to backend

+ 8 - 4
src/lib/apis/openai/index.ts

@@ -163,7 +163,7 @@ export const getOpenAIModels = async (token: string = '') => {
 
 
 	return models
 	return models
 		? models
 		? models
-				.map((model) => ({ name: model.id, external: true }))
+				.map((model) => ({ id: model.id, name: model.name ?? model.id, external: true }))
 				.sort((a, b) => {
 				.sort((a, b) => {
 					return a.name.localeCompare(b.name);
 					return a.name.localeCompare(b.name);
 				})
 				})
@@ -200,17 +200,21 @@ export const getOpenAIModelsDirect = async (
 	const models = Array.isArray(res) ? res : res?.data ?? null;
 	const models = Array.isArray(res) ? res : res?.data ?? null;
 
 
 	return models
 	return models
-		.map((model) => ({ name: model.id, external: true }))
+		.map((model) => ({ id: model.id, name: model.name ?? model.id, external: true }))
 		.filter((model) => (base_url.includes('openai') ? model.name.includes('gpt') : true))
 		.filter((model) => (base_url.includes('openai') ? model.name.includes('gpt') : true))
 		.sort((a, b) => {
 		.sort((a, b) => {
 			return a.name.localeCompare(b.name);
 			return a.name.localeCompare(b.name);
 		});
 		});
 };
 };
 
 
-export const generateOpenAIChatCompletion = async (token: string = '', body: object) => {
+export const generateOpenAIChatCompletion = async (
+	token: string = '',
+	body: object,
+	url: string = OPENAI_API_BASE_URL
+) => {
 	let error = null;
 	let error = null;
 
 
-	const res = await fetch(`${OPENAI_API_BASE_URL}/chat/completions`, {
+	const res = await fetch(`${url}/chat/completions`, {
 		method: 'POST',
 		method: 'POST',
 		headers: {
 		headers: {
 			Authorization: `Bearer ${token}`,
 			Authorization: `Bearer ${token}`,

+ 2 - 2
src/lib/components/chat/ModelSelector.svelte

@@ -25,7 +25,7 @@
 
 
 	$: if (selectedModels.length > 0 && $models.length > 0) {
 	$: if (selectedModels.length > 0 && $models.length > 0) {
 		selectedModels = selectedModels.map((model) =>
 		selectedModels = selectedModels.map((model) =>
-			$models.map((m) => m.name).includes(model) ? model : ''
+			$models.map((m) => m.id).includes(model) ? model : ''
 		);
 		);
 	}
 	}
 </script>
 </script>
@@ -45,7 +45,7 @@
 					{#if model.name === 'hr'}
 					{#if model.name === 'hr'}
 						<hr />
 						<hr />
 					{:else}
 					{:else}
-						<option value={model.name} class="text-gray-700 text-lg"
+						<option value={model.id} class="text-gray-700 text-lg"
 							>{model.name +
 							>{model.name +
 								`${model.size ? ` (${(model.size / 1024 ** 3).toFixed(1)}GB)` : ''}`}</option
 								`${model.size ? ` (${(model.size / 1024 ** 3).toFixed(1)}GB)` : ''}`}</option
 						>
 						>

+ 9 - 7
src/lib/components/chat/Settings/About.svelte

@@ -38,16 +38,18 @@
 			</div>
 			</div>
 		</div>
 		</div>
 
 
-		<hr class=" dark:border-gray-700" />
+		{#if ollamaVersion}
+			<hr class=" dark:border-gray-700" />
 
 
-		<div>
-			<div class=" mb-2.5 text-sm font-medium">Ollama Version</div>
-			<div class="flex w-full">
-				<div class="flex-1 text-xs text-gray-700 dark:text-gray-200">
-					{ollamaVersion ?? 'N/A'}
+			<div>
+				<div class=" mb-2.5 text-sm font-medium">Ollama Version</div>
+				<div class="flex w-full">
+					<div class="flex-1 text-xs text-gray-700 dark:text-gray-200">
+						{ollamaVersion ?? 'N/A'}
+					</div>
 				</div>
 				</div>
 			</div>
 			</div>
-		</div>
+		{/if}
 
 
 		<hr class=" dark:border-gray-700" />
 		<hr class=" dark:border-gray-700" />
 
 

+ 93 - 67
src/lib/components/chat/Settings/Connections.svelte

@@ -3,7 +3,7 @@
 	import { createEventDispatcher, onMount } from 'svelte';
 	import { createEventDispatcher, onMount } from 'svelte';
 	const dispatch = createEventDispatcher();
 	const dispatch = createEventDispatcher();
 
 
-	import { getOllamaAPIUrl, updateOllamaAPIUrl } from '$lib/apis/ollama';
+	import { getOllamaAPIUrl, getOllamaVersion, updateOllamaAPIUrl } from '$lib/apis/ollama';
 	import { getOpenAIKey, getOpenAIUrl, updateOpenAIKey, updateOpenAIUrl } from '$lib/apis/openai';
 	import { getOpenAIKey, getOpenAIUrl, updateOpenAIKey, updateOpenAIUrl } from '$lib/apis/openai';
 	import toast from 'svelte-french-toast';
 	import toast from 'svelte-french-toast';
 
 
@@ -15,6 +15,9 @@
 	let OPENAI_API_KEY = '';
 	let OPENAI_API_KEY = '';
 	let OPENAI_API_BASE_URL = '';
 	let OPENAI_API_BASE_URL = '';
 
 
+	let showOpenAI = false;
+	let showLiteLLM = false;
+
 	const updateOpenAIHandler = async () => {
 	const updateOpenAIHandler = async () => {
 		OPENAI_API_BASE_URL = await updateOpenAIUrl(localStorage.token, OPENAI_API_BASE_URL);
 		OPENAI_API_BASE_URL = await updateOpenAIUrl(localStorage.token, OPENAI_API_BASE_URL);
 		OPENAI_API_KEY = await updateOpenAIKey(localStorage.token, OPENAI_API_KEY);
 		OPENAI_API_KEY = await updateOpenAIKey(localStorage.token, OPENAI_API_KEY);
@@ -24,11 +27,15 @@
 
 
 	const updateOllamaAPIUrlHandler = async () => {
 	const updateOllamaAPIUrlHandler = async () => {
 		API_BASE_URL = await updateOllamaAPIUrl(localStorage.token, API_BASE_URL);
 		API_BASE_URL = await updateOllamaAPIUrl(localStorage.token, API_BASE_URL);
-		const _models = await getModels('ollama');
 
 
-		if (_models.length > 0) {
+		const ollamaVersion = await getOllamaVersion(localStorage.token).catch((error) => {
+			toast.error(error);
+			return null;
+		});
+
+		if (ollamaVersion) {
 			toast.success('Server connection verified');
 			toast.success('Server connection verified');
-			await models.set(_models);
+			await models.set(await getModels());
 		}
 		}
 	};
 	};
 
 
@@ -42,7 +49,7 @@
 </script>
 </script>
 
 
 <form
 <form
-	class="flex flex-col h-full space-y-3 text-sm"
+	class="flex flex-col h-full justify-between text-sm"
 	on:submit|preventDefault={() => {
 	on:submit|preventDefault={() => {
 		updateOpenAIHandler();
 		updateOpenAIHandler();
 		dispatch('save');
 		dispatch('save');
@@ -53,81 +60,100 @@
 		// });
 		// });
 	}}
 	}}
 >
 >
-	<div>
-		<div class=" mb-2.5 text-sm font-medium">Ollama API URL</div>
-		<div class="flex w-full">
-			<div class="flex-1 mr-2">
-				<input
-					class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none"
-					placeholder="Enter URL (e.g. http://localhost:11434/api)"
-					bind:value={API_BASE_URL}
-				/>
-			</div>
-			<button
-				class="px-3 bg-gray-200 hover:bg-gray-300 dark:bg-gray-600 dark:hover:bg-gray-700 rounded transition"
-				on:click={() => {
-					updateOllamaAPIUrlHandler();
-				}}
-				type="button"
-			>
-				<svg
-					xmlns="http://www.w3.org/2000/svg"
-					viewBox="0 0 20 20"
-					fill="currentColor"
-					class="w-4 h-4"
-				>
-					<path
-						fill-rule="evenodd"
-						d="M15.312 11.424a5.5 5.5 0 01-9.201 2.466l-.312-.311h2.433a.75.75 0 000-1.5H3.989a.75.75 0 00-.75.75v4.242a.75.75 0 001.5 0v-2.43l.31.31a7 7 0 0011.712-3.138.75.75 0 00-1.449-.39zm1.23-3.723a.75.75 0 00.219-.53V2.929a.75.75 0 00-1.5 0V5.36l-.31-.31A7 7 0 003.239 8.188a.75.75 0 101.448.389A5.5 5.5 0 0113.89 6.11l.311.31h-2.432a.75.75 0 000 1.5h4.243a.75.75 0 00.53-.219z"
-						clip-rule="evenodd"
-					/>
-				</svg>
-			</button>
-		</div>
+	<div class="  pr-1.5 overflow-y-scroll max-h-[20.5rem] space-y-3">
+		<div class=" space-y-3">
+			<div class="mt-2 space-y-2 pr-1.5">
+				<div class="flex justify-between items-center text-sm">
+					<div class="  font-medium">OpenAI API</div>
+					<button
+						class=" text-xs font-medium text-gray-500"
+						type="button"
+						on:click={() => {
+							showOpenAI = !showOpenAI;
+						}}>{showOpenAI ? 'Hide' : 'Show'}</button
+					>
+				</div>
 
 
-		<div class="mt-2 text-xs text-gray-400 dark:text-gray-500">
-			Trouble accessing Ollama?
-			<a
-				class=" text-gray-300 font-medium"
-				href="https://github.com/open-webui/open-webui#troubleshooting"
-				target="_blank"
-			>
-				Click here for help.
-			</a>
+				{#if showOpenAI}
+					<div>
+						<div class=" mb-2.5 text-sm font-medium">API Key</div>
+						<div class="flex w-full">
+							<div class="flex-1">
+								<input
+									class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none"
+									placeholder="Enter OpenAI API Key"
+									bind:value={OPENAI_API_KEY}
+									autocomplete="off"
+								/>
+							</div>
+						</div>
+					</div>
+
+					<div>
+						<div class=" mb-2.5 text-sm font-medium">API Base URL</div>
+						<div class="flex w-full">
+							<div class="flex-1">
+								<input
+									class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none"
+									placeholder="Enter OpenAI API Base URL"
+									bind:value={OPENAI_API_BASE_URL}
+									autocomplete="off"
+								/>
+							</div>
+						</div>
+						<div class="mt-2 text-xs text-gray-400 dark:text-gray-500">
+							WebUI will make requests to <span class=" text-gray-200"
+								>'{OPENAI_API_BASE_URL}/chat'</span
+							>
+						</div>
+					</div>
+				{/if}
+			</div>
 		</div>
 		</div>
-	</div>
 
 
-	<hr class=" dark:border-gray-700" />
+		<hr class=" dark:border-gray-700" />
 
 
-	<div class=" space-y-3">
 		<div>
 		<div>
-			<div class=" mb-2.5 text-sm font-medium">OpenAI API Key</div>
+			<div class=" mb-2.5 text-sm font-medium">Ollama API URL</div>
 			<div class="flex w-full">
 			<div class="flex w-full">
-				<div class="flex-1">
+				<div class="flex-1 mr-2">
 					<input
 					<input
 						class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none"
 						class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none"
-						placeholder="Enter OpenAI API Key"
-						bind:value={OPENAI_API_KEY}
-						autocomplete="off"
+						placeholder="Enter URL (e.g. http://localhost:11434/api)"
+						bind:value={API_BASE_URL}
 					/>
 					/>
 				</div>
 				</div>
+				<button
+					class="px-3 bg-gray-200 hover:bg-gray-300 dark:bg-gray-600 dark:hover:bg-gray-700 rounded transition"
+					on:click={() => {
+						updateOllamaAPIUrlHandler();
+					}}
+					type="button"
+				>
+					<svg
+						xmlns="http://www.w3.org/2000/svg"
+						viewBox="0 0 20 20"
+						fill="currentColor"
+						class="w-4 h-4"
+					>
+						<path
+							fill-rule="evenodd"
+							d="M15.312 11.424a5.5 5.5 0 01-9.201 2.466l-.312-.311h2.433a.75.75 0 000-1.5H3.989a.75.75 0 00-.75.75v4.242a.75.75 0 001.5 0v-2.43l.31.31a7 7 0 0011.712-3.138.75.75 0 00-1.449-.39zm1.23-3.723a.75.75 0 00.219-.53V2.929a.75.75 0 00-1.5 0V5.36l-.31-.31A7 7 0 003.239 8.188a.75.75 0 101.448.389A5.5 5.5 0 0113.89 6.11l.311.31h-2.432a.75.75 0 000 1.5h4.243a.75.75 0 00.53-.219z"
+							clip-rule="evenodd"
+						/>
+					</svg>
+				</button>
 			</div>
 			</div>
-		</div>
 
 
-		<div>
-			<div class=" mb-2.5 text-sm font-medium">OpenAI API Base URL</div>
-			<div class="flex w-full">
-				<div class="flex-1">
-					<input
-						class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none"
-						placeholder="Enter OpenAI API Base URL"
-						bind:value={OPENAI_API_BASE_URL}
-						autocomplete="off"
-					/>
-				</div>
-			</div>
 			<div class="mt-2 text-xs text-gray-400 dark:text-gray-500">
 			<div class="mt-2 text-xs text-gray-400 dark:text-gray-500">
-				WebUI will make requests to <span class=" text-gray-200">'{OPENAI_API_BASE_URL}/chat'</span>
+				Trouble accessing Ollama?
+				<a
+					class=" text-gray-300 font-medium"
+					href="https://github.com/open-webui/open-webui#troubleshooting"
+					target="_blank"
+				>
+					Click here for help.
+				</a>
 			</div>
 			</div>
 		</div>
 		</div>
 	</div>
 	</div>

+ 1 - 1
src/lib/components/chat/Settings/General.svelte

@@ -84,7 +84,7 @@
 </script>
 </script>
 
 
 <div class="flex flex-col h-full justify-between text-sm">
 <div class="flex flex-col h-full justify-between text-sm">
-	<div class="  pr-1.5 overflow-y-scroll max-h-[21rem]">
+	<div class="  pr-1.5 overflow-y-scroll max-h-[20.5rem]">
 		<div class="">
 		<div class="">
 			<div class=" mb-1 text-sm font-medium">WebUI Settings</div>
 			<div class=" mb-1 text-sm font-medium">WebUI Settings</div>
 
 

+ 29 - 6
src/lib/components/chat/Settings/Images.svelte

@@ -12,7 +12,9 @@
 		toggleImageGenerationEnabledStatus,
 		toggleImageGenerationEnabledStatus,
 		updateAUTOMATIC1111Url,
 		updateAUTOMATIC1111Url,
 		updateDefaultDiffusionModel,
 		updateDefaultDiffusionModel,
-		updateImageSize
+		updateImageSize,
+		getImageSteps,
+		updateImageSteps
 	} from '$lib/apis/images';
 	} from '$lib/apis/images';
 	import { getBackendConfig } from '$lib/apis';
 	import { getBackendConfig } from '$lib/apis';
 	const dispatch = createEventDispatcher();
 	const dispatch = createEventDispatcher();
@@ -21,20 +23,23 @@
 
 
 	let loading = false;
 	let loading = false;
 
 
-	let enableImageGeneration = true;
+	let enableImageGeneration = false;
 	let AUTOMATIC1111_BASE_URL = '';
 	let AUTOMATIC1111_BASE_URL = '';
 
 
 	let selectedModel = '';
 	let selectedModel = '';
-	let models = [];
+	let models = null;
 
 
 	let imageSize = '';
 	let imageSize = '';
+	let steps = 50;
 
 
 	const getModels = async () => {
 	const getModels = async () => {
 		models = await getDiffusionModels(localStorage.token).catch((error) => {
 		models = await getDiffusionModels(localStorage.token).catch((error) => {
 			toast.error(error);
 			toast.error(error);
 			return null;
 			return null;
 		});
 		});
-		selectedModel = await getDefaultDiffusionModel(localStorage.token);
+		selectedModel = await getDefaultDiffusionModel(localStorage.token).catch((error) => {
+			return '';
+		});
 	};
 	};
 
 
 	const updateAUTOMATIC1111UrlHandler = async () => {
 	const updateAUTOMATIC1111UrlHandler = async () => {
@@ -83,6 +88,7 @@
 
 
 			if (enableImageGeneration && AUTOMATIC1111_BASE_URL) {
 			if (enableImageGeneration && AUTOMATIC1111_BASE_URL) {
 				imageSize = await getImageSize(localStorage.token);
 				imageSize = await getImageSize(localStorage.token);
+				steps = await getImageSteps(localStorage.token);
 				getModels();
 				getModels();
 			}
 			}
 		}
 		}
@@ -98,12 +104,16 @@
 			toast.error(error);
 			toast.error(error);
 			return null;
 			return null;
 		});
 		});
+		await updateImageSteps(localStorage.token, steps).catch((error) => {
+			toast.error(error);
+			return null;
+		});
 
 
 		dispatch('save');
 		dispatch('save');
 		loading = false;
 		loading = false;
 	}}
 	}}
 >
 >
-	<div class=" space-y-3 pr-1.5 overflow-y-scroll max-h-[21rem]">
+	<div class=" space-y-3 pr-1.5 overflow-y-scroll max-h-[20.5rem]">
 		<div>
 		<div>
 			<div class=" mb-1 text-sm font-medium">Image Settings</div>
 			<div class=" mb-1 text-sm font-medium">Image Settings</div>
 
 
@@ -188,7 +198,7 @@
 							{#if !selectedModel}
 							{#if !selectedModel}
 								<option value="" disabled selected>Select a model</option>
 								<option value="" disabled selected>Select a model</option>
 							{/if}
 							{/if}
-							{#each models as model}
+							{#each models ?? [] as model}
 								<option value={model.title} class="bg-gray-100 dark:bg-gray-700"
 								<option value={model.title} class="bg-gray-100 dark:bg-gray-700"
 									>{model.model_name}</option
 									>{model.model_name}</option
 								>
 								>
@@ -210,6 +220,19 @@
 					</div>
 					</div>
 				</div>
 				</div>
 			</div>
 			</div>
+
+			<div>
+				<div class=" mb-2.5 text-sm font-medium">Set Steps</div>
+				<div class="flex w-full">
+					<div class="flex-1 mr-2">
+						<input
+							class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none"
+							placeholder="Enter Number of Steps (e.g. 50)"
+							bind:value={steps}
+						/>
+					</div>
+				</div>
+			</div>
 		{/if}
 		{/if}
 	</div>
 	</div>
 
 

+ 596 - 265
src/lib/components/chat/Settings/Models.svelte

@@ -2,14 +2,33 @@
 	import queue from 'async/queue';
 	import queue from 'async/queue';
 	import toast from 'svelte-french-toast';
 	import toast from 'svelte-french-toast';
 
 
-	import { createModel, deleteModel, pullModel } from '$lib/apis/ollama';
+	import { createModel, deleteModel, getOllamaVersion, pullModel } from '$lib/apis/ollama';
 	import { WEBUI_API_BASE_URL, WEBUI_BASE_URL } from '$lib/constants';
 	import { WEBUI_API_BASE_URL, WEBUI_BASE_URL } from '$lib/constants';
 	import { WEBUI_NAME, models, user } from '$lib/stores';
 	import { WEBUI_NAME, models, user } from '$lib/stores';
 	import { splitStream } from '$lib/utils';
 	import { splitStream } from '$lib/utils';
+	import { onMount } from 'svelte';
+	import { addLiteLLMModel, deleteLiteLLMModel, getLiteLLMModelInfo } from '$lib/apis/litellm';
 
 
 	export let getModels: Function;
 	export let getModels: Function;
 
 
+	let showLiteLLM = false;
+	let showLiteLLMParams = false;
+
+	let liteLLMModelInfo = [];
+
+	let liteLLMModel = '';
+	let liteLLMModelName = '';
+	let liteLLMAPIBase = '';
+	let liteLLMAPIKey = '';
+	let liteLLMRPM = '';
+
+	let deleteLiteLLMModelId = '';
+
+	$: liteLLMModelName = liteLLMModel;
+
 	// Models
 	// Models
+	let showExperimentalOllama = false;
+	let ollamaVersion = '';
 	const MAX_PARALLEL_DOWNLOADS = 3;
 	const MAX_PARALLEL_DOWNLOADS = 3;
 	const modelDownloadQueue = queue(
 	const modelDownloadQueue = queue(
 		(task: { modelName: string }, cb) =>
 		(task: { modelName: string }, cb) =>
@@ -286,311 +305,623 @@
 			opts.callback({ success: true, modelName: opts.modelName });
 			opts.callback({ success: true, modelName: opts.modelName });
 		}
 		}
 	};
 	};
+
+	const addLiteLLMModelHandler = async () => {
+		if (!liteLLMModelInfo.find((info) => info.model_name === liteLLMModelName)) {
+			const res = await addLiteLLMModel(localStorage.token, {
+				name: liteLLMModelName,
+				model: liteLLMModel,
+				api_base: liteLLMAPIBase,
+				api_key: liteLLMAPIKey,
+				rpm: liteLLMRPM
+			}).catch((error) => {
+				toast.error(error);
+				return null;
+			});
+
+			if (res) {
+				if (res.message) {
+					toast.success(res.message);
+				}
+			}
+		} else {
+			toast.error(`Model ${liteLLMModelName} already exists.`);
+		}
+
+		liteLLMModelName = '';
+		liteLLMModel = '';
+		liteLLMAPIBase = '';
+		liteLLMAPIKey = '';
+		liteLLMRPM = '';
+
+		liteLLMModelInfo = await getLiteLLMModelInfo(localStorage.token);
+		models.set(await getModels());
+	};
+
+	const deleteLiteLLMModelHandler = async () => {
+		const res = await deleteLiteLLMModel(localStorage.token, deleteLiteLLMModelId).catch(
+			(error) => {
+				toast.error(error);
+				return null;
+			}
+		);
+
+		if (res) {
+			if (res.message) {
+				toast.success(res.message);
+			}
+		}
+
+		deleteLiteLLMModelId = '';
+		liteLLMModelInfo = await getLiteLLMModelInfo(localStorage.token);
+		models.set(await getModels());
+	};
+
+	onMount(async () => {
+		ollamaVersion = await getOllamaVersion(localStorage.token).catch((error) => false);
+		liteLLMModelInfo = await getLiteLLMModelInfo(localStorage.token);
+	});
 </script>
 </script>
 
 
 <div class="flex flex-col h-full justify-between text-sm">
 <div class="flex flex-col h-full justify-between text-sm">
-	<div class=" space-y-3 pr-1.5 overflow-y-scroll h-80">
-		<div>
-			<div class=" mb-2.5 text-sm font-medium">Pull a model from Ollama.com</div>
-			<div class="flex w-full">
-				<div class="flex-1 mr-2">
-					<input
-						class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none"
-						placeholder="Enter model tag (e.g. mistral:7b)"
-						bind:value={modelTag}
-					/>
-				</div>
-				<button
-					class="px-3 text-gray-100 bg-emerald-600 hover:bg-emerald-700 disabled:bg-gray-700 disabled:cursor-not-allowed rounded transition"
-					on:click={() => {
-						pullModelHandler();
-					}}
-					disabled={modelTransferring}
-				>
-					{#if modelTransferring}
-						<div class="self-center">
-							<svg
-								class=" w-4 h-4"
-								viewBox="0 0 24 24"
-								fill="currentColor"
-								xmlns="http://www.w3.org/2000/svg"
-								><style>
-									.spinner_ajPY {
-										transform-origin: center;
-										animation: spinner_AtaB 0.75s infinite linear;
-									}
-									@keyframes spinner_AtaB {
-										100% {
-											transform: rotate(360deg);
-										}
-									}
-								</style><path
-									d="M12,1A11,11,0,1,0,23,12,11,11,0,0,0,12,1Zm0,19a8,8,0,1,1,8-8A8,8,0,0,1,12,20Z"
-									opacity=".25"
-								/><path
-									d="M10.14,1.16a11,11,0,0,0-9,8.92A1.59,1.59,0,0,0,2.46,12,1.52,1.52,0,0,0,4.11,10.7a8,8,0,0,1,6.66-6.61A1.42,1.42,0,0,0,12,2.69h0A1.57,1.57,0,0,0,10.14,1.16Z"
-									class="spinner_ajPY"
-								/></svg
-							>
-						</div>
-					{:else}
-						<svg
-							xmlns="http://www.w3.org/2000/svg"
-							viewBox="0 0 16 16"
-							fill="currentColor"
-							class="w-4 h-4"
-						>
-							<path
-								d="M8.75 2.75a.75.75 0 0 0-1.5 0v5.69L5.03 6.22a.75.75 0 0 0-1.06 1.06l3.5 3.5a.75.75 0 0 0 1.06 0l3.5-3.5a.75.75 0 0 0-1.06-1.06L8.75 8.44V2.75Z"
-							/>
-							<path
-								d="M3.5 9.75a.75.75 0 0 0-1.5 0v1.5A2.75 2.75 0 0 0 4.75 14h6.5A2.75 2.75 0 0 0 14 11.25v-1.5a.75.75 0 0 0-1.5 0v1.5c0 .69-.56 1.25-1.25 1.25h-6.5c-.69 0-1.25-.56-1.25-1.25v-1.5Z"
-							/>
-						</svg>
-					{/if}
-				</button>
-			</div>
-
-			<div class="mt-2 mb-1 text-xs text-gray-400 dark:text-gray-500">
-				To access the available model names for downloading, <a
-					class=" text-gray-500 dark:text-gray-300 font-medium"
-					href="https://ollama.com/library"
-					target="_blank">click here.</a
-				>
-			</div>
+	<div class=" space-y-3 pr-1.5 overflow-y-scroll h-[23rem]">
+		{#if ollamaVersion}
+			<div class="space-y-2 pr-1.5">
+				<div>
+					<div class=" mb-2 text-sm font-medium">Manage Ollama Models</div>
 
 
-			{#if Object.keys(modelDownloadStatus).length > 0}
-				{#each Object.keys(modelDownloadStatus) as model}
-					<div class="flex flex-col">
-						<div class="font-medium mb-1">{model}</div>
-						<div class="">
-							<div
-								class="dark:bg-gray-600 bg-gray-500 text-xs font-medium text-gray-100 text-center p-0.5 leading-none rounded-full"
-								style="width: {Math.max(15, modelDownloadStatus[model].pullProgress ?? 0)}%"
-							>
-								{modelDownloadStatus[model].pullProgress ?? 0}%
-							</div>
-							<div class="mt-1 text-xs dark:text-gray-500" style="font-size: 0.5rem;">
-								{modelDownloadStatus[model].digest}
-							</div>
+					<div class=" mb-2 text-sm font-medium">Pull a model from Ollama.com</div>
+					<div class="flex w-full">
+						<div class="flex-1 mr-2">
+							<input
+								class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
+								placeholder="Enter model tag (e.g. mistral:7b)"
+								bind:value={modelTag}
+							/>
 						</div>
 						</div>
+						<button
+							class="px-3 bg-gray-100 hover:bg-gray-200 text-gray-800 dark:bg-gray-850 dark:hover:bg-gray-800 dark:text-gray-100 rounded transition"
+							on:click={() => {
+								pullModelHandler();
+							}}
+							disabled={modelTransferring}
+						>
+							{#if modelTransferring}
+								<div class="self-center">
+									<svg
+										class=" w-4 h-4"
+										viewBox="0 0 24 24"
+										fill="currentColor"
+										xmlns="http://www.w3.org/2000/svg"
+										><style>
+											.spinner_ajPY {
+												transform-origin: center;
+												animation: spinner_AtaB 0.75s infinite linear;
+											}
+											@keyframes spinner_AtaB {
+												100% {
+													transform: rotate(360deg);
+												}
+											}
+										</style><path
+											d="M12,1A11,11,0,1,0,23,12,11,11,0,0,0,12,1Zm0,19a8,8,0,1,1,8-8A8,8,0,0,1,12,20Z"
+											opacity=".25"
+										/><path
+											d="M10.14,1.16a11,11,0,0,0-9,8.92A1.59,1.59,0,0,0,2.46,12,1.52,1.52,0,0,0,4.11,10.7a8,8,0,0,1,6.66-6.61A1.42,1.42,0,0,0,12,2.69h0A1.57,1.57,0,0,0,10.14,1.16Z"
+											class="spinner_ajPY"
+										/></svg
+									>
+								</div>
+							{:else}
+								<svg
+									xmlns="http://www.w3.org/2000/svg"
+									viewBox="0 0 16 16"
+									fill="currentColor"
+									class="w-4 h-4"
+								>
+									<path
+										d="M8.75 2.75a.75.75 0 0 0-1.5 0v5.69L5.03 6.22a.75.75 0 0 0-1.06 1.06l3.5 3.5a.75.75 0 0 0 1.06 0l3.5-3.5a.75.75 0 0 0-1.06-1.06L8.75 8.44V2.75Z"
+									/>
+									<path
+										d="M3.5 9.75a.75.75 0 0 0-1.5 0v1.5A2.75 2.75 0 0 0 4.75 14h6.5A2.75 2.75 0 0 0 14 11.25v-1.5a.75.75 0 0 0-1.5 0v1.5c0 .69-.56 1.25-1.25 1.25h-6.5c-.69 0-1.25-.56-1.25-1.25v-1.5Z"
+									/>
+								</svg>
+							{/if}
+						</button>
 					</div>
 					</div>
-				{/each}
-			{/if}
-		</div>
 
 
-		<hr class=" dark:border-gray-700" />
+					<div class="mt-2 mb-1 text-xs text-gray-400 dark:text-gray-500">
+						To access the available model names for downloading, <a
+							class=" text-gray-500 dark:text-gray-300 font-medium"
+							href="https://ollama.com/library"
+							target="_blank">click here.</a
+						>
+					</div>
 
 
-		<div>
-			<div class=" mb-2.5 text-sm font-medium">Delete a model</div>
-			<div class="flex w-full">
-				<div class="flex-1 mr-2">
-					<select
-						class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none"
-						bind:value={deleteModelTag}
-						placeholder="Select a model"
-					>
-						{#if !deleteModelTag}
-							<option value="" disabled selected>Select a model</option>
-						{/if}
-						{#each $models.filter((m) => m.size != null) as model}
-							<option value={model.name} class="bg-gray-100 dark:bg-gray-700"
-								>{model.name + ' (' + (model.size / 1024 ** 3).toFixed(1) + ' GB)'}</option
-							>
+					{#if Object.keys(modelDownloadStatus).length > 0}
+						{#each Object.keys(modelDownloadStatus) as model}
+							<div class="flex flex-col">
+								<div class="font-medium mb-1">{model}</div>
+								<div class="">
+									<div
+										class="dark:bg-gray-600 bg-gray-500 text-xs font-medium text-gray-100 text-center p-0.5 leading-none rounded-full"
+										style="width: {Math.max(15, modelDownloadStatus[model].pullProgress ?? 0)}%"
+									>
+										{modelDownloadStatus[model].pullProgress ?? 0}%
+									</div>
+									<div class="mt-1 text-xs dark:text-gray-500" style="font-size: 0.5rem;">
+										{modelDownloadStatus[model].digest}
+									</div>
+								</div>
+							</div>
 						{/each}
 						{/each}
-					</select>
+					{/if}
 				</div>
 				</div>
-				<button
-					class="px-3 bg-red-700 hover:bg-red-800 text-gray-100 rounded transition"
-					on:click={() => {
-						deleteModelHandler();
-					}}
-				>
-					<svg
-						xmlns="http://www.w3.org/2000/svg"
-						viewBox="0 0 16 16"
-						fill="currentColor"
-						class="w-4 h-4"
-					>
-						<path
-							fill-rule="evenodd"
-							d="M5 3.25V4H2.75a.75.75 0 0 0 0 1.5h.3l.815 8.15A1.5 1.5 0 0 0 5.357 15h5.285a1.5 1.5 0 0 0 1.493-1.35l.815-8.15h.3a.75.75 0 0 0 0-1.5H11v-.75A2.25 2.25 0 0 0 8.75 1h-1.5A2.25 2.25 0 0 0 5 3.25Zm2.25-.75a.75.75 0 0 0-.75.75V4h3v-.75a.75.75 0 0 0-.75-.75h-1.5ZM6.05 6a.75.75 0 0 1 .787.713l.275 5.5a.75.75 0 0 1-1.498.075l-.275-5.5A.75.75 0 0 1 6.05 6Zm3.9 0a.75.75 0 0 1 .712.787l-.275 5.5a.75.75 0 0 1-1.498-.075l.275-5.5a.75.75 0 0 1 .786-.711Z"
-							clip-rule="evenodd"
-						/>
-					</svg>
-				</button>
-			</div>
-		</div>
 
 
-		<hr class=" dark:border-gray-700" />
-
-		<form
-			on:submit|preventDefault={() => {
-				uploadModelHandler();
-			}}
-		>
-			<div class=" mb-2 flex w-full justify-between">
-				<div class="  text-sm font-medium">
-					Upload a GGUF model <a
-						class=" text-xs font-medium text-gray-500 underline"
-						href="https://github.com/jmorganca/ollama/blob/main/README.md#import-from-gguf"
-						target="_blank">(Experimental)</a
-					>
+				<div>
+					<div class=" mb-2 text-sm font-medium">Delete a model</div>
+					<div class="flex w-full">
+						<div class="flex-1 mr-2">
+							<select
+								class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
+								bind:value={deleteModelTag}
+								placeholder="Select a model"
+							>
+								{#if !deleteModelTag}
+									<option value="" disabled selected>Select a model</option>
+								{/if}
+								{#each $models.filter((m) => m.size != null) as model}
+									<option value={model.name} class="bg-gray-100 dark:bg-gray-700"
+										>{model.name + ' (' + (model.size / 1024 ** 3).toFixed(1) + ' GB)'}</option
+									>
+								{/each}
+							</select>
+						</div>
+						<button
+							class="px-3 bg-gray-100 hover:bg-gray-200 text-gray-800 dark:bg-gray-850 dark:hover:bg-gray-800 dark:text-gray-100 rounded transition"
+							on:click={() => {
+								deleteModelHandler();
+							}}
+						>
+							<svg
+								xmlns="http://www.w3.org/2000/svg"
+								viewBox="0 0 16 16"
+								fill="currentColor"
+								class="w-4 h-4"
+							>
+								<path
+									fill-rule="evenodd"
+									d="M5 3.25V4H2.75a.75.75 0 0 0 0 1.5h.3l.815 8.15A1.5 1.5 0 0 0 5.357 15h5.285a1.5 1.5 0 0 0 1.493-1.35l.815-8.15h.3a.75.75 0 0 0 0-1.5H11v-.75A2.25 2.25 0 0 0 8.75 1h-1.5A2.25 2.25 0 0 0 5 3.25Zm2.25-.75a.75.75 0 0 0-.75.75V4h3v-.75a.75.75 0 0 0-.75-.75h-1.5ZM6.05 6a.75.75 0 0 1 .787.713l.275 5.5a.75.75 0 0 1-1.498.075l-.275-5.5A.75.75 0 0 1 6.05 6Zm3.9 0a.75.75 0 0 1 .712.787l-.275 5.5a.75.75 0 0 1-1.498-.075l.275-5.5a.75.75 0 0 1 .786-.711Z"
+									clip-rule="evenodd"
+								/>
+							</svg>
+						</button>
+					</div>
 				</div>
 				</div>
 
 
-				<button
-					class="p-1 px-3 text-xs flex rounded transition"
-					on:click={() => {
-						if (modelUploadMode === 'file') {
-							modelUploadMode = 'url';
-						} else {
-							modelUploadMode = 'file';
-						}
-					}}
-					type="button"
-				>
-					{#if modelUploadMode === 'file'}
-						<span class="ml-2 self-center">File Mode</span>
-					{:else}
-						<span class="ml-2 self-center">URL Mode</span>
-					{/if}
-				</button>
-			</div>
+				<div>
+					<div class="flex justify-between items-center text-xs">
+						<div class=" text-sm font-medium">Experimental</div>
+						<button
+							class=" text-xs font-medium text-gray-500"
+							type="button"
+							on:click={() => {
+								showExperimentalOllama = !showExperimentalOllama;
+							}}>{showExperimentalOllama ? 'Show' : 'Hide'}</button
+						>
+					</div>
+				</div>
 
 
-			<div class="flex w-full mb-1.5">
-				<div class="flex flex-col w-full">
-					{#if modelUploadMode === 'file'}
-						<div class="flex-1 {modelInputFile && modelInputFile.length > 0 ? 'mr-2' : ''}">
-							<input
-								id="model-upload-input"
-								type="file"
-								bind:files={modelInputFile}
-								on:change={() => {
-									console.log(modelInputFile);
-								}}
-								accept=".gguf"
-								required
-								hidden
-							/>
+				{#if showExperimentalOllama}
+					<form
+						on:submit|preventDefault={() => {
+							uploadModelHandler();
+						}}
+					>
+						<div class=" mb-2 flex w-full justify-between">
+							<div class="  text-sm font-medium">Upload a GGUF model</div>
 
 
 							<button
 							<button
-								type="button"
-								class="w-full rounded text-left py-2 px-4 dark:text-gray-300 dark:bg-gray-800"
+								class="p-1 px-3 text-xs flex rounded transition"
 								on:click={() => {
 								on:click={() => {
-									document.getElementById('model-upload-input').click();
+									if (modelUploadMode === 'file') {
+										modelUploadMode = 'url';
+									} else {
+										modelUploadMode = 'file';
+									}
 								}}
 								}}
+								type="button"
 							>
 							>
-								{#if modelInputFile && modelInputFile.length > 0}
-									{modelInputFile[0].name}
+								{#if modelUploadMode === 'file'}
+									<span class="ml-2 self-center">File Mode</span>
 								{:else}
 								{:else}
-									Click here to select
+									<span class="ml-2 self-center">URL Mode</span>
 								{/if}
 								{/if}
 							</button>
 							</button>
 						</div>
 						</div>
-					{:else}
-						<div class="flex-1 {modelFileUrl !== '' ? 'mr-2' : ''}">
-							<input
-								class="w-full rounded text-left py-2 px-4 dark:text-gray-300 dark:bg-gray-800 outline-none {modelFileUrl !==
-								''
-									? 'mr-2'
-									: ''}"
-								type="url"
-								required
-								bind:value={modelFileUrl}
-								placeholder="Type HuggingFace Resolve (Download) URL"
-							/>
-						</div>
-					{/if}
-				</div>
 
 
-				{#if (modelUploadMode === 'file' && modelInputFile && modelInputFile.length > 0) || (modelUploadMode === 'url' && modelFileUrl !== '')}
-					<button
-						class="px-3 text-gray-100 bg-emerald-600 hover:bg-emerald-700 disabled:bg-gray-700 disabled:cursor-not-allowed rounded transition"
-						type="submit"
-						disabled={modelTransferring}
-					>
-						{#if modelTransferring}
-							<div class="self-center">
-								<svg
-									class=" w-4 h-4"
-									viewBox="0 0 24 24"
-									fill="currentColor"
-									xmlns="http://www.w3.org/2000/svg"
-									><style>
-										.spinner_ajPY {
-											transform-origin: center;
-											animation: spinner_AtaB 0.75s infinite linear;
-										}
-										@keyframes spinner_AtaB {
-											100% {
-												transform: rotate(360deg);
-											}
-										}
-									</style><path
-										d="M12,1A11,11,0,1,0,23,12,11,11,0,0,0,12,1Zm0,19a8,8,0,1,1,8-8A8,8,0,0,1,12,20Z"
-										opacity=".25"
-									/><path
-										d="M10.14,1.16a11,11,0,0,0-9,8.92A1.59,1.59,0,0,0,2.46,12,1.52,1.52,0,0,0,4.11,10.7a8,8,0,0,1,6.66-6.61A1.42,1.42,0,0,0,12,2.69h0A1.57,1.57,0,0,0,10.14,1.16Z"
-										class="spinner_ajPY"
-									/></svg
+						<div class="flex w-full mb-1.5">
+							<div class="flex flex-col w-full">
+								{#if modelUploadMode === 'file'}
+									<div class="flex-1 {modelInputFile && modelInputFile.length > 0 ? 'mr-2' : ''}">
+										<input
+											id="model-upload-input"
+											type="file"
+											bind:files={modelInputFile}
+											on:change={() => {
+												console.log(modelInputFile);
+											}}
+											accept=".gguf"
+											required
+											hidden
+										/>
+
+										<button
+											type="button"
+											class="w-full rounded text-left py-2 px-4 dark:text-gray-300 dark:bg-gray-850"
+											on:click={() => {
+												document.getElementById('model-upload-input').click();
+											}}
+										>
+											{#if modelInputFile && modelInputFile.length > 0}
+												{modelInputFile[0].name}
+											{:else}
+												Click here to select
+											{/if}
+										</button>
+									</div>
+								{:else}
+									<div class="flex-1 {modelFileUrl !== '' ? 'mr-2' : ''}">
+										<input
+											class="w-full rounded text-left py-2 px-4 dark:text-gray-300 dark:bg-gray-850 outline-none {modelFileUrl !==
+											''
+												? 'mr-2'
+												: ''}"
+											type="url"
+											required
+											bind:value={modelFileUrl}
+											placeholder="Type HuggingFace Resolve (Download) URL"
+										/>
+									</div>
+								{/if}
+							</div>
+
+							{#if (modelUploadMode === 'file' && modelInputFile && modelInputFile.length > 0) || (modelUploadMode === 'url' && modelFileUrl !== '')}
+								<button
+									class="px-3 text-gray-100 bg-emerald-600 hover:bg-emerald-700 disabled:bg-gray-700 disabled:cursor-not-allowed rounded transition"
+									type="submit"
+									disabled={modelTransferring}
 								>
 								>
+									{#if modelTransferring}
+										<div class="self-center">
+											<svg
+												class=" w-4 h-4"
+												viewBox="0 0 24 24"
+												fill="currentColor"
+												xmlns="http://www.w3.org/2000/svg"
+												><style>
+													.spinner_ajPY {
+														transform-origin: center;
+														animation: spinner_AtaB 0.75s infinite linear;
+													}
+													@keyframes spinner_AtaB {
+														100% {
+															transform: rotate(360deg);
+														}
+													}
+												</style><path
+													d="M12,1A11,11,0,1,0,23,12,11,11,0,0,0,12,1Zm0,19a8,8,0,1,1,8-8A8,8,0,0,1,12,20Z"
+													opacity=".25"
+												/><path
+													d="M10.14,1.16a11,11,0,0,0-9,8.92A1.59,1.59,0,0,0,2.46,12,1.52,1.52,0,0,0,4.11,10.7a8,8,0,0,1,6.66-6.61A1.42,1.42,0,0,0,12,2.69h0A1.57,1.57,0,0,0,10.14,1.16Z"
+													class="spinner_ajPY"
+												/></svg
+											>
+										</div>
+									{:else}
+										<svg
+											xmlns="http://www.w3.org/2000/svg"
+											viewBox="0 0 16 16"
+											fill="currentColor"
+											class="w-4 h-4"
+										>
+											<path
+												d="M7.25 10.25a.75.75 0 0 0 1.5 0V4.56l2.22 2.22a.75.75 0 1 0 1.06-1.06l-3.5-3.5a.75.75 0 0 0-1.06 0l-3.5 3.5a.75.75 0 0 0 1.06 1.06l2.22-2.22v5.69Z"
+											/>
+											<path
+												d="M3.5 9.75a.75.75 0 0 0-1.5 0v1.5A2.75 2.75 0 0 0 4.75 14h6.5A2.75 2.75 0 0 0 14 11.25v-1.5a.75.75 0 0 0-1.5 0v1.5c0 .69-.56 1.25-1.25 1.25h-6.5c-.69 0-1.25-.56-1.25-1.25v-1.5Z"
+											/>
+										</svg>
+									{/if}
+								</button>
+							{/if}
+						</div>
+
+						{#if (modelUploadMode === 'file' && modelInputFile && modelInputFile.length > 0) || (modelUploadMode === 'url' && modelFileUrl !== '')}
+							<div>
+								<div>
+									<div class=" my-2.5 text-sm font-medium">Modelfile Content</div>
+									<textarea
+										bind:value={modelFileContent}
+										class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none resize-none"
+										rows="6"
+									/>
+								</div>
 							</div>
 							</div>
-						{:else}
-							<svg
-								xmlns="http://www.w3.org/2000/svg"
-								viewBox="0 0 16 16"
-								fill="currentColor"
-								class="w-4 h-4"
+						{/if}
+						<div class=" mt-1 text-xs text-gray-400 dark:text-gray-500">
+							To access the GGUF models available for downloading, <a
+								class=" text-gray-500 dark:text-gray-300 font-medium"
+								href="https://huggingface.co/models?search=gguf"
+								target="_blank">click here.</a
 							>
 							>
-								<path
-									d="M7.25 10.25a.75.75 0 0 0 1.5 0V4.56l2.22 2.22a.75.75 0 1 0 1.06-1.06l-3.5-3.5a.75.75 0 0 0-1.06 0l-3.5 3.5a.75.75 0 0 0 1.06 1.06l2.22-2.22v5.69Z"
-								/>
-								<path
-									d="M3.5 9.75a.75.75 0 0 0-1.5 0v1.5A2.75 2.75 0 0 0 4.75 14h6.5A2.75 2.75 0 0 0 14 11.25v-1.5a.75.75 0 0 0-1.5 0v1.5c0 .69-.56 1.25-1.25 1.25h-6.5c-.69 0-1.25-.56-1.25-1.25v-1.5Z"
-								/>
-							</svg>
+						</div>
+
+						{#if uploadProgress !== null}
+							<div class="mt-2">
+								<div class=" mb-2 text-xs">Upload Progress</div>
+
+								<div class="w-full rounded-full dark:bg-gray-800">
+									<div
+										class="dark:bg-gray-600 bg-gray-500 text-xs font-medium text-gray-100 text-center p-0.5 leading-none rounded-full"
+										style="width: {Math.max(15, uploadProgress ?? 0)}%"
+									>
+										{uploadProgress ?? 0}%
+									</div>
+								</div>
+								<div class="mt-1 text-xs dark:text-gray-500" style="font-size: 0.5rem;">
+									{modelFileDigest}
+								</div>
+							</div>
 						{/if}
 						{/if}
-					</button>
+					</form>
 				{/if}
 				{/if}
 			</div>
 			</div>
+			<hr class=" dark:border-gray-700 my-2" />
+		{/if}
 
 
-			{#if (modelUploadMode === 'file' && modelInputFile && modelInputFile.length > 0) || (modelUploadMode === 'url' && modelFileUrl !== '')}
+		<div class=" space-y-3">
+			<div class="mt-2 space-y-3 pr-1.5">
 				<div>
 				<div>
+					<div class=" mb-2 text-sm font-medium">Manage LiteLLM Models</div>
+
+					<div>
+						<div class="flex justify-between items-center text-xs">
+							<div class=" text-sm font-medium">Add a model</div>
+							<button
+								class=" text-xs font-medium text-gray-500"
+								type="button"
+								on:click={() => {
+									showLiteLLMParams = !showLiteLLMParams;
+								}}>{showLiteLLMParams ? 'Advanced' : 'Default'}</button
+							>
+						</div>
+					</div>
+
+					<div class="my-2 space-y-2">
+						<div class="flex w-full mb-1.5">
+							<div class="flex-1 mr-2">
+								<input
+									class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
+									placeholder="Enter LiteLLM Model (litellm_params.model)"
+									bind:value={liteLLMModel}
+									autocomplete="off"
+								/>
+							</div>
+
+							<button
+								class="px-3 bg-gray-100 hover:bg-gray-200 text-gray-800 dark:bg-gray-850 dark:hover:bg-gray-800 dark:text-gray-100 rounded transition"
+								on:click={() => {
+									addLiteLLMModelHandler();
+								}}
+							>
+								<svg
+									xmlns="http://www.w3.org/2000/svg"
+									viewBox="0 0 16 16"
+									fill="currentColor"
+									class="w-4 h-4"
+								>
+									<path
+										d="M8.75 3.75a.75.75 0 0 0-1.5 0v3.5h-3.5a.75.75 0 0 0 0 1.5h3.5v3.5a.75.75 0 0 0 1.5 0v-3.5h3.5a.75.75 0 0 0 0-1.5h-3.5v-3.5Z"
+									/>
+								</svg>
+							</button>
+						</div>
+
+						{#if showLiteLLMParams}
+							<div>
+								<div class=" mb-1.5 text-sm font-medium">Model Name</div>
+								<div class="flex w-full">
+									<div class="flex-1">
+										<input
+											class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
+											placeholder="Enter Model Name (model_name)"
+											bind:value={liteLLMModelName}
+											autocomplete="off"
+										/>
+									</div>
+								</div>
+							</div>
+
+							<div>
+								<div class=" mb-1.5 text-sm font-medium">API Base URL</div>
+								<div class="flex w-full">
+									<div class="flex-1">
+										<input
+											class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
+											placeholder="Enter LiteLLM API Base URL (litellm_params.api_base)"
+											bind:value={liteLLMAPIBase}
+											autocomplete="off"
+										/>
+									</div>
+								</div>
+							</div>
+
+							<div>
+								<div class=" mb-1.5 text-sm font-medium">API Key</div>
+								<div class="flex w-full">
+									<div class="flex-1">
+										<input
+											class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
+											placeholder="Enter LiteLLM API Key (litellm_params.api_key)"
+											bind:value={liteLLMAPIKey}
+											autocomplete="off"
+										/>
+									</div>
+								</div>
+							</div>
+
+							<div>
+								<div class="mb-1.5 text-sm font-medium">API RPM</div>
+								<div class="flex w-full">
+									<div class="flex-1">
+										<input
+											class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
+											placeholder="Enter LiteLLM API RPM (litellm_params.rpm)"
+											bind:value={liteLLMRPM}
+											autocomplete="off"
+										/>
+									</div>
+								</div>
+							</div>
+						{/if}
+					</div>
+
+					<div class="mb-2 text-xs text-gray-400 dark:text-gray-500">
+						Not sure what to add?
+						<a
+							class=" text-gray-300 font-medium"
+							href="https://litellm.vercel.app/docs/proxy/configs#quick-start"
+							target="_blank"
+						>
+							Click here for help.
+						</a>
+					</div>
+
 					<div>
 					<div>
-						<div class=" my-2.5 text-sm font-medium">Modelfile Content</div>
-						<textarea
-							bind:value={modelFileContent}
-							class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none resize-none"
-							rows="6"
-						/>
+						<div class=" mb-2.5 text-sm font-medium">Delete a model</div>
+						<div class="flex w-full">
+							<div class="flex-1 mr-2">
+								<select
+									class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
+									bind:value={deleteLiteLLMModelId}
+									placeholder="Select a model"
+								>
+									{#if !deleteLiteLLMModelId}
+										<option value="" disabled selected>Select a model</option>
+									{/if}
+									{#each liteLLMModelInfo as model}
+										<option value={model.model_info.id} class="bg-gray-100 dark:bg-gray-700"
+											>{model.model_name}</option
+										>
+									{/each}
+								</select>
+							</div>
+							<button
+								class="px-3 bg-gray-100 hover:bg-gray-200 text-gray-800 dark:bg-gray-850 dark:hover:bg-gray-800 dark:text-gray-100 rounded transition"
+								on:click={() => {
+									deleteLiteLLMModelHandler();
+								}}
+							>
+								<svg
+									xmlns="http://www.w3.org/2000/svg"
+									viewBox="0 0 16 16"
+									fill="currentColor"
+									class="w-4 h-4"
+								>
+									<path
+										fill-rule="evenodd"
+										d="M5 3.25V4H2.75a.75.75 0 0 0 0 1.5h.3l.815 8.15A1.5 1.5 0 0 0 5.357 15h5.285a1.5 1.5 0 0 0 1.493-1.35l.815-8.15h.3a.75.75 0 0 0 0-1.5H11v-.75A2.25 2.25 0 0 0 8.75 1h-1.5A2.25 2.25 0 0 0 5 3.25Zm2.25-.75a.75.75 0 0 0-.75.75V4h3v-.75a.75.75 0 0 0-.75-.75h-1.5ZM6.05 6a.75.75 0 0 1 .787.713l.275 5.5a.75.75 0 0 1-1.498.075l-.275-5.5A.75.75 0 0 1 6.05 6Zm3.9 0a.75.75 0 0 1 .712.787l-.275 5.5a.75.75 0 0 1-1.498-.075l.275-5.5a.75.75 0 0 1 .786-.711Z"
+										clip-rule="evenodd"
+									/>
+								</svg>
+							</button>
+						</div>
 					</div>
 					</div>
 				</div>
 				</div>
-			{/if}
-			<div class=" mt-1 text-xs text-gray-400 dark:text-gray-500">
-				To access the GGUF models available for downloading, <a
-					class=" text-gray-500 dark:text-gray-300 font-medium"
-					href="https://huggingface.co/models?search=gguf"
-					target="_blank">click here.</a
-				>
 			</div>
 			</div>
 
 
-			{#if uploadProgress !== null}
-				<div class="mt-2">
-					<div class=" mb-2 text-xs">Upload Progress</div>
+			<!-- <div class="mt-2 space-y-3 pr-1.5">
+				<div>
+					<div class=" mb-2.5 text-sm font-medium">Add LiteLLM Model</div>
+					<div class="flex w-full mb-2">
+						<div class="flex-1">
+							<input
+								class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none"
+								placeholder="Enter LiteLLM Model (e.g. ollama/mistral)"
+								bind:value={liteLLMModel}
+								autocomplete="off"
+							/>
+						</div>
+					</div>
 
 
-					<div class="w-full rounded-full dark:bg-gray-800">
-						<div
-							class="dark:bg-gray-600 bg-gray-500 text-xs font-medium text-gray-100 text-center p-0.5 leading-none rounded-full"
-							style="width: {Math.max(15, uploadProgress ?? 0)}%"
+					<div class="flex justify-between items-center text-sm">
+						<div class="  font-medium">Advanced Model Params</div>
+						<button
+							class=" text-xs font-medium text-gray-500"
+							type="button"
+							on:click={() => {
+								showLiteLLMParams = !showLiteLLMParams;
+							}}>{showLiteLLMParams ? 'Hide' : 'Show'}</button
 						>
 						>
-							{uploadProgress ?? 0}%
-						</div>
 					</div>
 					</div>
-					<div class="mt-1 text-xs dark:text-gray-500" style="font-size: 0.5rem;">
-						{modelFileDigest}
+
+					{#if showLiteLLMParams}
+						<div>
+							<div class=" mb-2.5 text-sm font-medium">LiteLLM API Key</div>
+							<div class="flex w-full">
+								<div class="flex-1">
+									<input
+										class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none"
+										placeholder="Enter LiteLLM API Key (e.g. os.environ/AZURE_API_KEY_CA)"
+										bind:value={liteLLMAPIKey}
+										autocomplete="off"
+									/>
+								</div>
+							</div>
+						</div>
+
+						<div>
+							<div class=" mb-2.5 text-sm font-medium">LiteLLM API Base URL</div>
+							<div class="flex w-full">
+								<div class="flex-1">
+									<input
+										class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none"
+										placeholder="Enter LiteLLM API Base URL"
+										bind:value={liteLLMAPIBase}
+										autocomplete="off"
+									/>
+								</div>
+							</div>
+						</div>
+
+						<div>
+							<div class=" mb-2.5 text-sm font-medium">LiteLLM API RPM</div>
+							<div class="flex w-full">
+								<div class="flex-1">
+									<input
+										class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none"
+										placeholder="Enter LiteLLM API RPM"
+										bind:value={liteLLMRPM}
+										autocomplete="off"
+									/>
+								</div>
+							</div>
+						</div>
+					{/if}
+
+					<div class="mt-2 text-xs text-gray-400 dark:text-gray-500">
+						Not sure what to add?
+						<a
+							class=" text-gray-300 font-medium"
+							href="https://litellm.vercel.app/docs/proxy/configs#quick-start"
+							target="_blank"
+						>
+							Click here for help.
+						</a>
 					</div>
 					</div>
 				</div>
 				</div>
-			{/if}
-		</form>
+			</div> -->
+		</div>
 	</div>
 	</div>
 </div>
 </div>

+ 21 - 14
src/lib/components/chat/SettingsModal.svelte

@@ -4,6 +4,7 @@
 
 
 	import { getOllamaModels } from '$lib/apis/ollama';
 	import { getOllamaModels } from '$lib/apis/ollama';
 	import { getOpenAIModels } from '$lib/apis/openai';
 	import { getOpenAIModels } from '$lib/apis/openai';
+	import { getLiteLLMModels } from '$lib/apis/litellm';
 
 
 	import Modal from '../common/Modal.svelte';
 	import Modal from '../common/Modal.svelte';
 	import Account from './Settings/Account.svelte';
 	import Account from './Settings/Account.svelte';
@@ -27,23 +28,29 @@
 
 
 	let selectedTab = 'general';
 	let selectedTab = 'general';
 
 
-	const getModels = async (type = 'all') => {
-		const models = [];
-		models.push(
-			...(await getOllamaModels(localStorage.token).catch((error) => {
-				toast.error(error);
-				return [];
-			}))
-		);
-
-		if (type === 'all') {
-			const openAIModels = await getOpenAIModels(localStorage.token).catch((error) => {
+	const getModels = async () => {
+		let models = await Promise.all([
+			await getOllamaModels(localStorage.token).catch((error) => {
+				console.log(error);
+				return null;
+			}),
+			await getOpenAIModels(localStorage.token).catch((error) => {
 				console.log(error);
 				console.log(error);
 				return null;
 				return null;
-			});
-			models.push(...(openAIModels ? [{ name: 'hr' }, ...openAIModels] : []));
-		}
+			}),
+			await getLiteLLMModels(localStorage.token).catch((error) => {
+				console.log(error);
+				return null;
+			})
+		]);
+
+		models = models
+			.filter((models) => models)
+			.reduce((a, e, i, arr) => a.concat(e, ...(i < arr.length - 1 ? [{ name: 'hr' }] : [])), []);
 
 
+		// models.push(...(ollamaModels ? [{ name: 'hr' }, ...ollamaModels] : []));
+		// models.push(...(openAIModels ? [{ name: 'hr' }, ...openAIModels] : []));
+		// models.push(...(liteLLMModels ? [{ name: 'hr' }, ...liteLLMModels] : []));
 		return models;
 		return models;
 	};
 	};
 </script>
 </script>

+ 2 - 0
src/lib/constants.ts

@@ -5,6 +5,8 @@ export const APP_NAME = 'Open WebUI';
 export const WEBUI_BASE_URL = dev ? `http://${location.hostname}:8080` : ``;
 export const WEBUI_BASE_URL = dev ? `http://${location.hostname}:8080` : ``;
 
 
 export const WEBUI_API_BASE_URL = `${WEBUI_BASE_URL}/api/v1`;
 export const WEBUI_API_BASE_URL = `${WEBUI_BASE_URL}/api/v1`;
+
+export const LITELLM_API_BASE_URL = `${WEBUI_BASE_URL}/litellm/api`;
 export const OLLAMA_API_BASE_URL = `${WEBUI_BASE_URL}/ollama/api`;
 export const OLLAMA_API_BASE_URL = `${WEBUI_BASE_URL}/ollama/api`;
 export const OPENAI_API_BASE_URL = `${WEBUI_BASE_URL}/openai/api`;
 export const OPENAI_API_BASE_URL = `${WEBUI_BASE_URL}/openai/api`;
 export const AUDIO_API_BASE_URL = `${WEBUI_BASE_URL}/audio/api/v1`;
 export const AUDIO_API_BASE_URL = `${WEBUI_BASE_URL}/audio/api/v1`;

+ 23 - 74
src/routes/(app)/+layout.svelte

@@ -11,6 +11,7 @@
 	import { getModelfiles } from '$lib/apis/modelfiles';
 	import { getModelfiles } from '$lib/apis/modelfiles';
 	import { getPrompts } from '$lib/apis/prompts';
 	import { getPrompts } from '$lib/apis/prompts';
 	import { getOpenAIModels } from '$lib/apis/openai';
 	import { getOpenAIModels } from '$lib/apis/openai';
+	import { getLiteLLMModels } from '$lib/apis/litellm';
 	import { getDocs } from '$lib/apis/documents';
 	import { getDocs } from '$lib/apis/documents';
 	import { getAllChatTags } from '$lib/apis/chats';
 	import { getAllChatTags } from '$lib/apis/chats';
 
 
@@ -43,24 +44,28 @@
 	let showShortcuts = false;
 	let showShortcuts = false;
 
 
 	const getModels = async () => {
 	const getModels = async () => {
-		let models = [];
-		models.push(
-			...(await getOllamaModels(localStorage.token).catch((error) => {
-				toast.error(error);
-				return [];
-			}))
-		);
-
-		// $settings.OPENAI_API_BASE_URL ?? 'https://api.openai.com/v1',
-		// 		$settings.OPENAI_API_KEY
-
-		const openAIModels = await getOpenAIModels(localStorage.token).catch((error) => {
-			console.log(error);
-			return null;
-		});
-
-		models.push(...(openAIModels ? [{ name: 'hr' }, ...openAIModels] : []));
-
+		let models = await Promise.all([
+			await getOllamaModels(localStorage.token).catch((error) => {
+				console.log(error);
+				return null;
+			}),
+			await getOpenAIModels(localStorage.token).catch((error) => {
+				console.log(error);
+				return null;
+			}),
+			await getLiteLLMModels(localStorage.token).catch((error) => {
+				console.log(error);
+				return null;
+			})
+		]);
+
+		models = models
+			.filter((models) => models)
+			.reduce((a, e, i, arr) => a.concat(e, ...(i < arr.length - 1 ? [{ name: 'hr' }] : [])), []);
+
+		// models.push(...(ollamaModels ? [{ name: 'hr' }, ...ollamaModels] : []));
+		// models.push(...(openAIModels ? [{ name: 'hr' }, ...openAIModels] : []));
+		// models.push(...(liteLLMModels ? [{ name: 'hr' }, ...liteLLMModels] : []));
 		return models;
 		return models;
 	};
 	};
 
 
@@ -117,8 +122,6 @@
 				await models.set(await getModels());
 				await models.set(await getModels());
 			});
 			});
 
 
-			await setOllamaVersion();
-
 			document.addEventListener('keydown', function (event) {
 			document.addEventListener('keydown', function (event) {
 				const isCtrlPressed = event.ctrlKey || event.metaKey; // metaKey is for Cmd key on Mac
 				const isCtrlPressed = event.ctrlKey || event.metaKey; // metaKey is for Cmd key on Mac
 				// Check if the Shift key is pressed
 				// Check if the Shift key is pressed
@@ -250,60 +253,6 @@
 					</div>
 					</div>
 				</div>
 				</div>
 			</div>
 			</div>
-		{:else if checkVersion(REQUIRED_OLLAMA_VERSION, ollamaVersion ?? '0')}
-			<div class="fixed w-full h-full flex z-50">
-				<div
-					class="absolute w-full h-full backdrop-blur-md bg-white/20 dark:bg-gray-900/50 flex justify-center"
-				>
-					<div class="m-auto pb-44 flex flex-col justify-center">
-						<div class="max-w-md">
-							<div class="text-center dark:text-white text-2xl font-medium z-50">
-								Connection Issue or Update Needed
-							</div>
-
-							<div class=" mt-4 text-center text-sm dark:text-gray-200 w-full">
-								Oops! It seems like your Ollama needs a little attention. <br
-									class=" hidden sm:flex"
-								/>We've detected either a connection hiccup or observed that you're using an older
-								version. Ensure you're on the latest Ollama version
-								<br class=" hidden sm:flex" />(version
-								<span class=" dark:text-white font-medium">{REQUIRED_OLLAMA_VERSION} or higher</span
-								>) or check your connection.
-
-								<div class="mt-1 text-sm">
-									Trouble accessing Ollama?
-									<a
-										class=" text-black dark:text-white font-semibold underline"
-										href="https://github.com/open-webui/open-webui#troubleshooting"
-										target="_blank"
-									>
-										Click here for help.
-									</a>
-								</div>
-							</div>
-
-							<div class=" mt-6 mx-auto relative group w-fit">
-								<button
-									class="relative z-20 flex px-5 py-2 rounded-full bg-white border border-gray-100 dark:border-none hover:bg-gray-100 transition font-medium text-sm"
-									on:click={async () => {
-										location.href = '/';
-										// await setOllamaVersion();
-									}}
-								>
-									Check Again
-								</button>
-
-								<button
-									class="text-xs text-center w-full mt-2 text-gray-400 underline"
-									on:click={async () => {
-										await setOllamaVersion(REQUIRED_OLLAMA_VERSION);
-									}}>Close</button
-								>
-							</div>
-						</div>
-					</div>
-				</div>
-			</div>
 		{:else if localDBChats.length > 0}
 		{:else if localDBChats.length > 0}
 			<div class="fixed w-full h-full flex z-50">
 			<div class="fixed w-full h-full flex z-50">
 				<div
 				<div

+ 93 - 78
src/routes/(app)/+page.svelte

@@ -36,6 +36,7 @@
 	import ModelSelector from '$lib/components/chat/ModelSelector.svelte';
 	import ModelSelector from '$lib/components/chat/ModelSelector.svelte';
 	import Navbar from '$lib/components/layout/Navbar.svelte';
 	import Navbar from '$lib/components/layout/Navbar.svelte';
 	import { RAGTemplate } from '$lib/utils/rag';
 	import { RAGTemplate } from '$lib/utils/rag';
+	import { LITELLM_API_BASE_URL, OPENAI_API_BASE_URL } from '$lib/constants';
 	import { WEBUI_BASE_URL } from '$lib/constants';
 	import { WEBUI_BASE_URL } from '$lib/constants';
 
 
 	let stopResponseFlag = false;
 	let stopResponseFlag = false;
@@ -132,6 +133,10 @@
 			selectedModels = [''];
 			selectedModels = [''];
 		}
 		}
 
 
+		selectedModels = selectedModels.map((modelId) =>
+			$models.map((m) => m.id).includes(modelId) ? modelId : ''
+		);
+
 		let _settings = JSON.parse(localStorage.getItem('settings') ?? '{}');
 		let _settings = JSON.parse(localStorage.getItem('settings') ?? '{}');
 		settings.set({
 		settings.set({
 			..._settings
 			..._settings
@@ -150,6 +155,10 @@
 	const submitPrompt = async (userPrompt, _user = null) => {
 	const submitPrompt = async (userPrompt, _user = null) => {
 		console.log('submitPrompt', $chatId);
 		console.log('submitPrompt', $chatId);
 
 
+		selectedModels = selectedModels.map((modelId) =>
+			$models.map((m) => m.id).includes(modelId) ? modelId : ''
+		);
+
 		if (selectedModels.includes('')) {
 		if (selectedModels.includes('')) {
 			toast.error('Model not selected');
 			toast.error('Model not selected');
 		} else if (messages.length != 0 && messages.at(-1).done != true) {
 		} else if (messages.length != 0 && messages.at(-1).done != true) {
@@ -278,40 +287,41 @@
 		}
 		}
 
 
 		await Promise.all(
 		await Promise.all(
-			selectedModels.map(async (model) => {
-				console.log(model);
-				const modelTag = $models.filter((m) => m.name === model).at(0);
-
-				// Create response message
-				let responseMessageId = uuidv4();
-				let responseMessage = {
-					parentId: parentId,
-					id: responseMessageId,
-					childrenIds: [],
-					role: 'assistant',
-					content: '',
-					model: model,
-					timestamp: Math.floor(Date.now() / 1000) // Unix epoch
-				};
-
-				// Add message to history and Set currentId to messageId
-				history.messages[responseMessageId] = responseMessage;
-				history.currentId = responseMessageId;
-
-				// Append messageId to childrenIds of parent message
-				if (parentId !== null) {
-					history.messages[parentId].childrenIds = [
-						...history.messages[parentId].childrenIds,
-						responseMessageId
-					];
-				}
+			selectedModels.map(async (modelId) => {
+				const model = $models.filter((m) => m.id === modelId).at(0);
+
+				if (model) {
+					// Create response message
+					let responseMessageId = uuidv4();
+					let responseMessage = {
+						parentId: parentId,
+						id: responseMessageId,
+						childrenIds: [],
+						role: 'assistant',
+						content: '',
+						model: model.id,
+						timestamp: Math.floor(Date.now() / 1000) // Unix epoch
+					};
+
+					// Add message to history and Set currentId to messageId
+					history.messages[responseMessageId] = responseMessage;
+					history.currentId = responseMessageId;
+
+					// Append messageId to childrenIds of parent message
+					if (parentId !== null) {
+						history.messages[parentId].childrenIds = [
+							...history.messages[parentId].childrenIds,
+							responseMessageId
+						];
+					}
 
 
-				if (modelTag?.external) {
-					await sendPromptOpenAI(model, prompt, responseMessageId, _chatId);
-				} else if (modelTag) {
-					await sendPromptOllama(model, prompt, responseMessageId, _chatId);
+					if (model?.external) {
+						await sendPromptOpenAI(model, prompt, responseMessageId, _chatId);
+					} else if (model) {
+						await sendPromptOllama(model, prompt, responseMessageId, _chatId);
+					}
 				} else {
 				} else {
-					toast.error(`Model ${model} not found`);
+					toast.error(`Model ${modelId} not found`);
 				}
 				}
 			})
 			})
 		);
 		);
@@ -320,6 +330,7 @@
 	};
 	};
 
 
 	const sendPromptOllama = async (model, userPrompt, responseMessageId, _chatId) => {
 	const sendPromptOllama = async (model, userPrompt, responseMessageId, _chatId) => {
+		model = model.id;
 		const responseMessage = history.messages[responseMessageId];
 		const responseMessage = history.messages[responseMessageId];
 
 
 		// Wait until history/message have been updated
 		// Wait until history/message have been updated
@@ -531,54 +542,58 @@
 		const responseMessage = history.messages[responseMessageId];
 		const responseMessage = history.messages[responseMessageId];
 		scrollToBottom();
 		scrollToBottom();
 
 
-		const res = await generateOpenAIChatCompletion(localStorage.token, {
-			model: model,
-			stream: true,
-			messages: [
-				$settings.system
-					? {
-							role: 'system',
-							content: $settings.system
-					  }
-					: undefined,
-				...messages.filter((message) => !message.deleted)
-			]
-				.filter((message) => message)
-				.map((message, idx, arr) => ({
-					role: message.role,
-					...(message.files?.filter((file) => file.type === 'image').length > 0 ?? false
+		const res = await generateOpenAIChatCompletion(
+			localStorage.token,
+			{
+				model: model.id,
+				stream: true,
+				messages: [
+					$settings.system
 						? {
 						? {
-								content: [
-									{
-										type: 'text',
-										text:
-											arr.length - 1 !== idx
-												? message.content
-												: message?.raContent ?? message.content
-									},
-									...message.files
-										.filter((file) => file.type === 'image')
-										.map((file) => ({
-											type: 'image_url',
-											image_url: {
-												url: file.url
-											}
-										}))
-								]
+								role: 'system',
+								content: $settings.system
 						  }
 						  }
-						: {
-								content:
-									arr.length - 1 !== idx ? message.content : message?.raContent ?? message.content
-						  })
-				})),
-			seed: $settings?.options?.seed ?? undefined,
-			stop: $settings?.options?.stop ?? undefined,
-			temperature: $settings?.options?.temperature ?? undefined,
-			top_p: $settings?.options?.top_p ?? undefined,
-			num_ctx: $settings?.options?.num_ctx ?? undefined,
-			frequency_penalty: $settings?.options?.repeat_penalty ?? undefined,
-			max_tokens: $settings?.options?.num_predict ?? undefined
-		});
+						: undefined,
+					...messages.filter((message) => !message.deleted)
+				]
+					.filter((message) => message)
+					.map((message, idx, arr) => ({
+						role: message.role,
+						...(message.files?.filter((file) => file.type === 'image').length > 0 ?? false
+							? {
+									content: [
+										{
+											type: 'text',
+											text:
+												arr.length - 1 !== idx
+													? message.content
+													: message?.raContent ?? message.content
+										},
+										...message.files
+											.filter((file) => file.type === 'image')
+											.map((file) => ({
+												type: 'image_url',
+												image_url: {
+													url: file.url
+												}
+											}))
+									]
+							  }
+							: {
+									content:
+										arr.length - 1 !== idx ? message.content : message?.raContent ?? message.content
+							  })
+					})),
+				seed: $settings?.options?.seed ?? undefined,
+				stop: $settings?.options?.stop ?? undefined,
+				temperature: $settings?.options?.temperature ?? undefined,
+				top_p: $settings?.options?.top_p ?? undefined,
+				num_ctx: $settings?.options?.num_ctx ?? undefined,
+				frequency_penalty: $settings?.options?.repeat_penalty ?? undefined,
+				max_tokens: $settings?.options?.num_predict ?? undefined
+			},
+			model.source === 'litellm' ? `${LITELLM_API_BASE_URL}/v1` : `${OPENAI_API_BASE_URL}`
+		);
 
 
 		if (res && res.ok) {
 		if (res && res.ok) {
 			const reader = res.body
 			const reader = res.body

+ 6 - 0
test.json

@@ -0,0 +1,6 @@
+{
+    "model_name": "string",
+    "litellm_params": {
+        "model": "ollama/mistral"
+    }
+}