Browse Source

Merge Updates & Dockerfile improvements

lainedfles 1 năm trước cách đây
mục cha
commit
9763d885be
100 tập tin đã thay đổi với 6010 bổ sung3773 xóa
  1. 23 11
      .github/workflows/format-backend.yaml
  2. 28 14
      .github/workflows/format-build-frontend.yaml
  3. 1 1
      .gitignore
  4. 80 0
      CHANGELOG.md
  5. 12 5
      Dockerfile
  6. 2 0
      Makefile
  7. 3 1
      README.md
  8. 19 5
      backend/apps/audio/main.py
  9. 116 19
      backend/apps/images/main.py
  10. 234 0
      backend/apps/images/utils/comfyui.py
  11. 67 8
      backend/apps/litellm/main.py
  12. 318 52
      backend/apps/ollama/main.py
  13. 46 21
      backend/apps/openai/main.py
  14. 152 85
      backend/apps/rag/main.py
  15. 13 6
      backend/apps/rag/utils.py
  16. 5 2
      backend/apps/web/internal/db.py
  17. 2 0
      backend/apps/web/main.py
  18. 8 2
      backend/apps/web/models/auths.py
  19. 0 14
      backend/apps/web/models/chats.py
  20. 8 2
      backend/apps/web/models/documents.py
  21. 12 12
      backend/apps/web/models/modelfiles.py
  22. 9 6
      backend/apps/web/models/prompts.py
  23. 10 4
      backend/apps/web/models/tags.py
  24. 13 1
      backend/apps/web/routers/auths.py
  25. 8 2
      backend/apps/web/routers/chats.py
  26. 6 2
      backend/apps/web/routers/configs.py
  27. 24 21
      backend/apps/web/routers/modelfiles.py
  28. 7 1
      backend/apps/web/routers/users.py
  29. 0 149
      backend/apps/web/routers/utils.py
  30. 66 10
      backend/config.py
  31. 11 1
      backend/constants.py
  32. 23 34
      backend/data/config.json
  33. 48 7
      backend/main.py
  34. 1 0
      backend/requirements.txt
  35. 5 0
      backend/start.sh
  36. 54 0
      backend/utils/webhook.py
  37. BIN
      demo.gif
  38. 12 0
      docs/CONTRIBUTING.md
  39. 38 0
      i18next-parser.config.ts
  40. 1 1
      kubernetes/helm/templates/ollama-statefulset.yaml
  41. 1 1
      kubernetes/helm/templates/webui-pvc.yaml
  42. 11 6
      kubernetes/helm/templates/webui-service.yaml
  43. 2 0
      kubernetes/helm/values.yaml
  44. 1 1
      kubernetes/manifest/base/webui-deployment.yaml
  45. 2 2
      kubernetes/manifest/base/webui-pvc.yaml
  46. 2529 2511
      package-lock.json
  47. 9 3
      package.json
  48. 4 0
      src/app.css
  49. 32 11
      src/app.html
  50. 5 5
      src/lib/apis/images/index.ts
  51. 57 0
      src/lib/apis/index.ts
  52. 1 1
      src/lib/apis/litellm/index.ts
  53. 68 1
      src/lib/apis/ollama/index.ts
  54. 50 0
      src/lib/apis/openai/index.ts
  55. 7 2
      src/lib/components/AddFilesPlaceholder.svelte
  56. 7 4
      src/lib/components/ChangelogModal.svelte
  57. 9 7
      src/lib/components/admin/EditUserModal.svelte
  58. 6 4
      src/lib/components/admin/Settings/Database.svelte
  59. 43 15
      src/lib/components/admin/Settings/General.svelte
  60. 15 11
      src/lib/components/admin/Settings/Users.svelte
  61. 7 4
      src/lib/components/admin/SettingsModal.svelte
  62. 23 15
      src/lib/components/chat/MessageInput.svelte
  63. 8 4
      src/lib/components/chat/MessageInput/Documents.svelte
  64. 7 3
      src/lib/components/chat/MessageInput/Models.svelte
  65. 7 4
      src/lib/components/chat/MessageInput/PromptCommands.svelte
  66. 4 2
      src/lib/components/chat/Messages.svelte
  67. 7 5
      src/lib/components/chat/Messages/Placeholder.svelte
  68. 7 5
      src/lib/components/chat/Messages/ResponseMessage.svelte
  69. 12 9
      src/lib/components/chat/Messages/UserMessage.svelte
  70. 72 98
      src/lib/components/chat/ModelSelector.svelte
  71. 389 0
      src/lib/components/chat/ModelSelector/Selector.svelte
  72. 13 9
      src/lib/components/chat/Settings/About.svelte
  73. 9 7
      src/lib/components/chat/Settings/Account.svelte
  74. 10 7
      src/lib/components/chat/Settings/Account/UpdatePassword.svelte
  75. 13 11
      src/lib/components/chat/Settings/Advanced.svelte
  76. 40 36
      src/lib/components/chat/Settings/Advanced/AdvancedParams.svelte
  77. 33 25
      src/lib/components/chat/Settings/Audio.svelte
  78. 14 43
      src/lib/components/chat/Settings/Chats.svelte
  79. 16 13
      src/lib/components/chat/Settings/Connections.svelte
  80. 106 72
      src/lib/components/chat/Settings/General.svelte
  81. 115 46
      src/lib/components/chat/Settings/Images.svelte
  82. 75 34
      src/lib/components/chat/Settings/Interface.svelte
  83. 216 97
      src/lib/components/chat/Settings/Models.svelte
  84. 20 17
      src/lib/components/chat/SettingsModal.svelte
  85. 6 3
      src/lib/components/chat/ShareChatModal.svelte
  86. 12 9
      src/lib/components/chat/ShortcutsModal.svelte
  87. 20 0
      src/lib/components/chat/TagChatModal.svelte
  88. 40 0
      src/lib/components/common/Dropdown.svelte
  89. 17 4
      src/lib/components/common/ImagePreview.svelte
  90. 4 2
      src/lib/components/common/Modal.svelte
  91. 95 0
      src/lib/components/common/Selector.svelte
  92. 2 1
      src/lib/components/common/Tags.svelte
  93. 38 17
      src/lib/components/common/Tags/TagInput.svelte
  94. 1 1
      src/lib/components/common/Tags/TagList.svelte
  95. 1 1
      src/lib/components/common/Tooltip.svelte
  96. 9 7
      src/lib/components/documents/AddDocModal.svelte
  97. 8 6
      src/lib/components/documents/EditDocModal.svelte
  98. 170 70
      src/lib/components/documents/Settings/General.svelte
  99. 5 2
      src/lib/components/documents/SettingsModal.svelte
  100. 15 0
      src/lib/components/icons/Check.svelte

+ 23 - 11
.github/workflows/format-backend.yaml

@@ -1,27 +1,39 @@
 name: Python CI
+
 on:
   push:
-    branches: ['main']
+    branches:
+      - main
+      - dev
   pull_request:
+    branches:
+      - main
+      - dev
+
 jobs:
   build:
     name: 'Format Backend'
-    env:
-      PUBLIC_API_BASE_URL: ''
     runs-on: ubuntu-latest
+
     strategy:
       matrix:
-        node-version:
-          - latest
+        python-version: [3.11]
+
     steps:
       - uses: actions/checkout@v4
-      - name: Use Python
-        uses: actions/setup-python@v4
-      - name: Use Bun
-        uses: oven-sh/setup-bun@v1
+
+      - name: Set up Python
+        uses: actions/setup-python@v2
+        with:
+          python-version: ${{ matrix.python-version }}
+
       - name: Install dependencies
         run: |
           python -m pip install --upgrade pip
-          pip install yapf
+          pip install black
+
       - name: Format backend
-        run: bun run format:backend
+        run: npm run format:backend
+
+      - name: Check for changes after format
+        run: git diff --exit-code

+ 28 - 14
.github/workflows/format-build-frontend.yaml

@@ -1,22 +1,36 @@
-name: Bun CI
+name: Frontend Build
+
 on:
   push:
-    branches: ['main']
+    branches:
+      - main
+      - dev
   pull_request:
+    branches:
+      - main
+      - dev
+
 jobs:
   build:
     name: 'Format & Build Frontend'
-    env:
-      PUBLIC_API_BASE_URL: ''
     runs-on: ubuntu-latest
     steps:
-      - uses: actions/checkout@v4
-      - name: Use Bun
-        uses: oven-sh/setup-bun@v1
-      - run: bun --version
-      - name: Install frontend dependencies
-        run: bun install
-      - name: Format frontend
-        run: bun run format
-      - name: Build frontend
-        run: bun run build
+      - name: Checkout Repository
+        uses: actions/checkout@v4
+
+      - name: Setup Node.js
+        uses: actions/setup-node@v3
+        with:
+          node-version: '20' # Or specify any other version you want to use
+
+      - name: Install Dependencies
+        run: npm install
+
+      - name: Format Frontend
+        run: npm run format
+
+      - name: Check for Changes After Format
+        run: git diff --exit-code
+
+      - name: Build Frontend
+        run: npm run build

+ 1 - 1
.gitignore

@@ -166,7 +166,7 @@ cython_debug/
 #  be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
 #  and can be added to the global gitignore or merged into this file.  For a more nuclear
 #  option (not recommended) you can uncomment the following to ignore the entire idea folder.
-#.idea/
+.idea/
 
 # Logs
 logs

+ 80 - 0
CHANGELOG.md

@@ -5,6 +5,86 @@ All notable changes to this project will be documented in this file.
 The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
 and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
 
+## [0.1.116] - 2024-03-31
+
+### Added
+
+- **🔄 Enhanced UI**: Model selector now conveniently located in the navbar, enabling seamless switching between multiple models during conversations.
+- **🔍 Improved Model Selector**: Directly pull a model from the selector/Models now display detailed information for better understanding.
+- **💬 Webhook Support**: Now compatible with Google Chat and Microsoft Teams.
+- **🌐 Localization**: Korean translation (I18n) now available.
+- **🌑 Dark Theme**: OLED dark theme introduced for reduced strain during prolonged usage.
+- **🏷️ Tag Autocomplete**: Dropdown feature added for effortless chat tagging.
+
+### Fixed
+
+- **🔽 Auto-Scrolling**: Addressed OpenAI auto-scrolling issue.
+- **🏷️ Tag Validation**: Implemented tag validation to prevent empty string tags.
+- **🚫 Model Whitelisting**: Resolved LiteLLM model whitelisting issue.
+- **✅ Spelling**: Corrected various spelling issues for improved readability.
+
+## [0.1.115] - 2024-03-24
+
+### Added
+
+- **🔍 Custom Model Selector**: Easily find and select custom models with the new search filter feature.
+- **🛑 Cancel Model Download**: Added the ability to cancel model downloads.
+- **🎨 Image Generation ComfyUI**: Image generation now supports ComfyUI.
+- **🌟 Updated Light Theme**: Updated the light theme for a fresh look.
+- **🌍 Additional Language Support**: Now supporting Bulgarian, Italian, Portuguese, Japanese, and Dutch.
+
+### Fixed
+
+- **🔧 Fixed Broken Experimental GGUF Upload**: Resolved issues with experimental GGUF upload functionality.
+
+### Changed
+
+- **🔄 Vector Storage Reset Button**: Moved the reset vector storage button to document settings.
+
+## [0.1.114] - 2024-03-20
+
+### Added
+
+- **🔗 Webhook Integration**: Now you can subscribe to new user sign-up events via webhook. Simply navigate to the admin panel > admin settings > webhook URL.
+- **🛡️ Enhanced Model Filtering**: Alongside Ollama, OpenAI proxy model whitelisting, we've added model filtering functionality for LiteLLM proxy.
+- **🌍 Expanded Language Support**: Spanish, Catalan, and Vietnamese languages are now available, with improvements made to others.
+
+### Fixed
+
+- **🔧 Input Field Spelling**: Resolved issue with spelling mistakes in input fields.
+- **🖊️ Light Mode Styling**: Fixed styling issue with light mode in document adding.
+
+### Changed
+
+- **🔄 Language Sorting**: Languages are now sorted alphabetically by their code for improved organization.
+
+## [0.1.113] - 2024-03-18
+
+### Added
+
+- 🌍 **Localization**: You can now change the UI language in Settings > General. We support Ukrainian, German, Farsi (Persian), Traditional and Simplified Chinese and French translations. You can help us to translate the UI into your language! More info in our [CONTRIBUTION.md](https://github.com/open-webui/open-webui/blob/main/docs/CONTRIBUTING.md#-translations-and-internationalization).
+- 🎨 **System-wide Theme**: Introducing a new system-wide theme for enhanced visual experience.
+
+### Fixed
+
+- 🌑 **Dark Background on Select Fields**: Improved readability by adding a dark background to select fields, addressing issues on certain browsers/devices.
+- **Multiple OPENAI_API_BASE_URLS Issue**: Resolved issue where multiple base URLs caused conflicts when one wasn't functioning.
+- **RAG Encoding Issue**: Fixed encoding problem in RAG.
+- **npm Audit Fix**: Addressed npm audit findings.
+- **Reduced Scroll Threshold**: Improved auto-scroll experience by reducing the scroll threshold from 50px to 5px.
+
+### Changed
+
+- 🔄 **Sidebar UI Update**: Updated sidebar UI to feature a chat menu dropdown, replacing two icons for improved navigation.
+
+## [0.1.112] - 2024-03-15
+
+### Fixed
+
+- 🗨️ Resolved chat malfunction after image generation.
+- 🎨 Fixed various RAG issues.
+- 🧪 Rectified experimental broken GGUF upload logic.
+
 ## [0.1.111] - 2024-03-10
 
 ### Added

+ 12 - 5
Dockerfile

@@ -2,6 +2,8 @@
 # Initialize device type args
 # use build args in the docker build commmand with --build-arg="BUILDARG=true"
 ARG USE_CUDA=false
+ARG USE_CUDA_VER=cu121
+ARG USE_EMBEDDING_MODEL=all-MiniLM-L6-v2
 ARG USE_MPS=false
 ARG INCLUDE_OLLAMA=false
 
@@ -28,8 +30,9 @@ RUN npm run build
 ######## WebUI backend ########
 FROM python:3.11-slim-bookworm as base
 
-# Use args
 ARG USE_CUDA
+ARG USE_CUDA_VER
+ARG USE_EMBEDDING_MODEL
 ARG USE_MPS
 ARG INCLUDE_OLLAMA
 
@@ -39,7 +42,9 @@ ENV ENV=prod \
     # pass build args to the build
     INCLUDE_OLLAMA_DOCKER=${INCLUDE_OLLAMA} \
     USE_MPS_DOCKER=${USE_MPS} \
-    USE_CUDA_DOCKER=${USE_CUDA}
+    USE_CUDA_DOCKER=${USE_CUDA} \
+    USE_CUDA_DOCKER_VER=${USE_CUDA_VER} \
+    USE_EMBEDDING_MODEL_DOCKER=${USE_EMBEDDING_MODEL}
 
 ## Basis URL Config ##
 ENV OLLAMA_BASE_URL="/ollama" \
@@ -61,7 +66,7 @@ ENV WHISPER_MODEL="base" \
 # Leaderboard: https://huggingface.co/spaces/mteb/leaderboard 
 # for better performance and multilangauge support use "intfloat/multilingual-e5-large" (~2.5GB) or "intfloat/multilingual-e5-base" (~1.5GB)
 # IMPORTANT: If you change the default model (all-MiniLM-L6-v2) and vice versa, you aren't able to use RAG Chat with your previous documents loaded in the WebUI! You need to re-embed them.
-ENV RAG_EMBEDDING_MODEL="all-MiniLM-L6-v2" \
+ENV RAG_EMBEDDING_MODEL="$USE_EMBEDDING_MODEL_DOCKER" \
     RAG_EMBEDDING_MODEL_DIR="/app/backend/data/cache/embedding/models" \
     SENTENCE_TRANSFORMERS_HOME="/app/backend/data/cache/embedding/models" \
     # device type for whisper tts and embbeding models - "cpu" (default) or "mps" (apple silicon) - choosing this right can lead to better performance
@@ -78,8 +83,10 @@ WORKDIR /app/backend
 COPY ./backend/requirements.txt ./requirements.txt
 
 RUN if [ "$USE_CUDA" = "true" ]; then \
-        pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu117 --no-cache-dir && \
+        pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/$USE_CUDA_DOCKER_VER --no-cache-dir && \
         pip3 install -r requirements.txt --no-cache-dir; \
+        python -c "import os; from faster_whisper import WhisperModel; WhisperModel(os.environ['WHISPER_MODEL'], device='cpu', compute_type='int8', download_root=os.environ['WHISPER_MODEL_DIR'])" && \
+        python -c "import os; from chromadb.utils import embedding_functions; sentence_transformer_ef = embedding_functions.SentenceTransformerEmbeddingFunction(model_name=os.environ['RAG_EMBEDDING_MODEL'], device='cpu')"; \
     elif [ "$USE_MPS" = "true" ]; then \
         pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu --no-cache-dir && \
         pip3 install -r requirements.txt --no-cache-dir && \
@@ -131,4 +138,4 @@ COPY ./backend .
 
 EXPOSE 8080
 
-CMD [ "bash", "start.sh"]
+CMD [ "bash", "start.sh"]

+ 2 - 0
Makefile

@@ -8,6 +8,8 @@ remove:
 
 start:
 	@docker-compose start
+startAndBuild: 
+	docker-compose up -d --build
 
 stop:
 	@docker-compose stop

+ 3 - 1
README.md

@@ -11,7 +11,7 @@
 [![Discord](https://img.shields.io/badge/Discord-Open_WebUI-blue?logo=discord&logoColor=white)](https://discord.gg/5rJgQTnV4s)
 [![](https://img.shields.io/static/v1?label=Sponsor&message=%E2%9D%A4&logo=GitHub&color=%23fe8e86)](https://github.com/sponsors/tjbck)
 
-User-friendly WebUI for LLMs, supported LLM runners include Ollama and OpenAI-compatible APIs. For more information, be sure to check out our [Open WebUI Documentation](https://docs.openwebui.com/).
+Open WebUI is an extensible, feature-rich, and user-friendly self-hosted WebUI designed to operate entirely offline. It supports various LLM runners, including Ollama and OpenAI-compatible APIs. For more information, be sure to check out our [Open WebUI Documentation](https://docs.openwebui.com/).
 
 ![Open WebUI Demo](./demo.gif)
 
@@ -79,6 +79,8 @@ User-friendly WebUI for LLMs, supported LLM runners include Ollama and OpenAI-co
 
 - 🔒 **Backend Reverse Proxy Support**: Bolster security through direct communication between Open WebUI backend and Ollama. This key feature eliminates the need to expose Ollama over LAN. Requests made to the '/ollama/api' route from the web UI are seamlessly redirected to Ollama from the backend, enhancing overall system security.
 
+- 🌐🌍 **Multilingual Support**: Experience Open WebUI in your preferred language with our internationalization (i18n) support. Join us in expanding our supported languages! We're actively seeking contributors!
+
 - 🌟 **Continuous Updates**: We are committed to improving Open WebUI with regular updates and new features.
 
 ## 🔗 Also Check Out Open WebUI Community!

+ 19 - 5
backend/apps/audio/main.py

@@ -1,4 +1,5 @@
 import os
+import logging
 from fastapi import (
     FastAPI,
     Request,
@@ -21,11 +22,24 @@ from utils.utils import (
 )
 from utils.misc import calculate_sha256
 
-from config import CACHE_DIR, UPLOAD_DIR, WHISPER_MODEL, WHISPER_MODEL_DIR, DEVICE_TYPE
+from config import (
+    SRC_LOG_LEVELS,
+    CACHE_DIR,
+    UPLOAD_DIR,
+    WHISPER_MODEL,
+    WHISPER_MODEL_DIR,
+    DEVICE_TYPE,
+)
+
+log = logging.getLogger(__name__)
+log.setLevel(SRC_LOG_LEVELS["AUDIO"])
+
+whisper_device_type = DEVICE_TYPE
 
-if DEVICE_TYPE != "cuda":
+if whisper_device_type != "cuda":
     whisper_device_type = "cpu"
 
+log.info(f"whisper_device_type: {whisper_device_type}")
 
 app = FastAPI()
 app.add_middleware(
@@ -42,7 +56,7 @@ def transcribe(
     file: UploadFile = File(...),
     user=Depends(get_current_user),
 ):
-    print(file.content_type)
+    log.info(f"file.content_type: {file.content_type}")
 
     if file.content_type not in ["audio/mpeg", "audio/wav"]:
         raise HTTPException(
@@ -66,7 +80,7 @@ def transcribe(
         )
 
         segments, info = model.transcribe(file_path, beam_size=5)
-        print(
+        log.info(
             "Detected language '%s' with probability %f"
             % (info.language, info.language_probability)
         )
@@ -76,7 +90,7 @@ def transcribe(
         return {"text": transcript.strip()}
 
     except Exception as e:
-        print(e)
+        log.exception(e)
 
         raise HTTPException(
             status_code=status.HTTP_400_BAD_REQUEST,

+ 116 - 19
backend/apps/images/main.py

@@ -18,6 +18,8 @@ from utils.utils import (
     get_current_user,
     get_admin_user,
 )
+
+from apps.images.utils.comfyui import ImageGenerationPayload, comfyui_generate_image
 from utils.misc import calculate_sha256
 from typing import Optional
 from pydantic import BaseModel
@@ -25,9 +27,13 @@ from pathlib import Path
 import uuid
 import base64
 import json
+import logging
+
+from config import SRC_LOG_LEVELS, CACHE_DIR, AUTOMATIC1111_BASE_URL, COMFYUI_BASE_URL
 
-from config import CACHE_DIR, AUTOMATIC1111_BASE_URL
 
+log = logging.getLogger(__name__)
+log.setLevel(SRC_LOG_LEVELS["IMAGES"])
 
 IMAGE_CACHE_DIR = Path(CACHE_DIR).joinpath("./image/generations/")
 IMAGE_CACHE_DIR.mkdir(parents=True, exist_ok=True)
@@ -49,6 +55,8 @@ app.state.MODEL = ""
 
 
 app.state.AUTOMATIC1111_BASE_URL = AUTOMATIC1111_BASE_URL
+app.state.COMFYUI_BASE_URL = COMFYUI_BASE_URL
+
 
 app.state.IMAGE_SIZE = "512x512"
 app.state.IMAGE_STEPS = 50
@@ -71,32 +79,48 @@ async def update_config(form_data: ConfigUpdateForm, user=Depends(get_admin_user
     return {"engine": app.state.ENGINE, "enabled": app.state.ENABLED}
 
 
-class UrlUpdateForm(BaseModel):
-    url: str
+class EngineUrlUpdateForm(BaseModel):
+    AUTOMATIC1111_BASE_URL: Optional[str] = None
+    COMFYUI_BASE_URL: Optional[str] = None
 
 
 @app.get("/url")
-async def get_automatic1111_url(user=Depends(get_admin_user)):
-    return {"AUTOMATIC1111_BASE_URL": app.state.AUTOMATIC1111_BASE_URL}
+async def get_engine_url(user=Depends(get_admin_user)):
+    return {
+        "AUTOMATIC1111_BASE_URL": app.state.AUTOMATIC1111_BASE_URL,
+        "COMFYUI_BASE_URL": app.state.COMFYUI_BASE_URL,
+    }
 
 
 @app.post("/url/update")
-async def update_automatic1111_url(
-    form_data: UrlUpdateForm, user=Depends(get_admin_user)
+async def update_engine_url(
+    form_data: EngineUrlUpdateForm, user=Depends(get_admin_user)
 ):
 
-    if form_data.url == "":
+    if form_data.AUTOMATIC1111_BASE_URL == None:
         app.state.AUTOMATIC1111_BASE_URL = AUTOMATIC1111_BASE_URL
     else:
-        url = form_data.url.strip("/")
+        url = form_data.AUTOMATIC1111_BASE_URL.strip("/")
         try:
             r = requests.head(url)
             app.state.AUTOMATIC1111_BASE_URL = url
         except Exception as e:
             raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(e))
 
+    if form_data.COMFYUI_BASE_URL == None:
+        app.state.COMFYUI_BASE_URL = COMFYUI_BASE_URL
+    else:
+        url = form_data.COMFYUI_BASE_URL.strip("/")
+
+        try:
+            r = requests.head(url)
+            app.state.COMFYUI_BASE_URL = url
+        except Exception as e:
+            raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(e))
+
     return {
         "AUTOMATIC1111_BASE_URL": app.state.AUTOMATIC1111_BASE_URL,
+        "COMFYUI_BASE_URL": app.state.COMFYUI_BASE_URL,
         "status": True,
     }
 
@@ -186,6 +210,18 @@ def get_models(user=Depends(get_current_user)):
                 {"id": "dall-e-2", "name": "DALL·E 2"},
                 {"id": "dall-e-3", "name": "DALL·E 3"},
             ]
+        elif app.state.ENGINE == "comfyui":
+
+            r = requests.get(url=f"{app.state.COMFYUI_BASE_URL}/object_info")
+            info = r.json()
+
+            return list(
+                map(
+                    lambda model: {"id": model, "name": model},
+                    info["CheckpointLoaderSimple"]["input"]["required"]["ckpt_name"][0],
+                )
+            )
+
         else:
             r = requests.get(
                 url=f"{app.state.AUTOMATIC1111_BASE_URL}/sdapi/v1/sd-models"
@@ -207,6 +243,8 @@ async def get_default_model(user=Depends(get_admin_user)):
     try:
         if app.state.ENGINE == "openai":
             return {"model": app.state.MODEL if app.state.MODEL else "dall-e-2"}
+        elif app.state.ENGINE == "comfyui":
+            return {"model": app.state.MODEL if app.state.MODEL else ""}
         else:
             r = requests.get(url=f"{app.state.AUTOMATIC1111_BASE_URL}/sdapi/v1/options")
             options = r.json()
@@ -221,10 +259,12 @@ class UpdateModelForm(BaseModel):
 
 
 def set_model_handler(model: str):
-
     if app.state.ENGINE == "openai":
         app.state.MODEL = model
         return app.state.MODEL
+    if app.state.ENGINE == "comfyui":
+        app.state.MODEL = model
+        return app.state.MODEL
     else:
         r = requests.get(url=f"{app.state.AUTOMATIC1111_BASE_URL}/sdapi/v1/options")
         options = r.json()
@@ -268,7 +308,24 @@ def save_b64_image(b64_str):
 
         return image_id
     except Exception as e:
-        print(f"Error saving image: {e}")
+        log.error(f"Error saving image: {e}")
+        return None
+
+
+def save_url_image(url):
+    image_id = str(uuid.uuid4())
+    file_path = IMAGE_CACHE_DIR.joinpath(f"{image_id}.png")
+
+    try:
+        r = requests.get(url)
+        r.raise_for_status()
+
+        with open(file_path, "wb") as image_file:
+            image_file.write(r.content)
+
+        return image_id
+    except Exception as e:
+        log.exception(f"Error saving image: {e}")
         return None
 
 
@@ -278,6 +335,8 @@ def generate_image(
     user=Depends(get_current_user),
 ):
 
+    width, height = tuple(map(int, app.state.IMAGE_SIZE.split("x")))
+
     r = None
     try:
         if app.state.ENGINE == "openai":
@@ -293,6 +352,7 @@ def generate_image(
                 "size": form_data.size if form_data.size else app.state.IMAGE_SIZE,
                 "response_format": "b64_json",
             }
+
             r = requests.post(
                 url=f"https://api.openai.com/v1/images/generations",
                 json=data,
@@ -300,7 +360,6 @@ def generate_image(
             )
 
             r.raise_for_status()
-
             res = r.json()
 
             images = []
@@ -315,12 +374,47 @@ def generate_image(
 
             return images
 
+        elif app.state.ENGINE == "comfyui":
+
+            data = {
+                "prompt": form_data.prompt,
+                "width": width,
+                "height": height,
+                "n": form_data.n,
+            }
+
+            if app.state.IMAGE_STEPS != None:
+                data["steps"] = app.state.IMAGE_STEPS
+
+            if form_data.negative_prompt != None:
+                data["negative_prompt"] = form_data.negative_prompt
+
+            data = ImageGenerationPayload(**data)
+
+            res = comfyui_generate_image(
+                app.state.MODEL,
+                data,
+                user.id,
+                app.state.COMFYUI_BASE_URL,
+            )
+            log.debug(f"res: {res}")
+
+            images = []
+
+            for image in res["data"]:
+                image_id = save_url_image(image["url"])
+                images.append({"url": f"/cache/image/generations/{image_id}.png"})
+                file_body_path = IMAGE_CACHE_DIR.joinpath(f"{image_id}.json")
+
+                with open(file_body_path, "w") as f:
+                    json.dump(data.model_dump(exclude_none=True), f)
+
+            log.debug(f"images: {images}")
+            return images
         else:
             if form_data.model:
                 set_model_handler(form_data.model)
 
-            width, height = tuple(map(int, app.state.IMAGE_SIZE.split("x")))
-
             data = {
                 "prompt": form_data.prompt,
                 "batch_size": form_data.n,
@@ -341,7 +435,7 @@ def generate_image(
 
             res = r.json()
 
-            print(res)
+            log.debug(f"res: {res}")
 
             images = []
 
@@ -356,7 +450,10 @@ def generate_image(
             return images
 
     except Exception as e:
-        print(e)
-        if r:
-            print(r.json())
-        raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(e))
+        error = e
+
+        if r != None:
+            data = r.json()
+            if "error" in data:
+                error = data["error"]["message"]
+        raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(error))

+ 234 - 0
backend/apps/images/utils/comfyui.py

@@ -0,0 +1,234 @@
+import websocket  # NOTE: websocket-client (https://github.com/websocket-client/websocket-client)
+import uuid
+import json
+import urllib.request
+import urllib.parse
+import random
+import logging
+
+from config import SRC_LOG_LEVELS
+
+log = logging.getLogger(__name__)
+log.setLevel(SRC_LOG_LEVELS["COMFYUI"])
+
+from pydantic import BaseModel
+
+from typing import Optional
+
+COMFYUI_DEFAULT_PROMPT = """
+{
+  "3": {
+    "inputs": {
+      "seed": 0,
+      "steps": 20,
+      "cfg": 8,
+      "sampler_name": "euler",
+      "scheduler": "normal",
+      "denoise": 1,
+      "model": [
+        "4",
+        0
+      ],
+      "positive": [
+        "6",
+        0
+      ],
+      "negative": [
+        "7",
+        0
+      ],
+      "latent_image": [
+        "5",
+        0
+      ]
+    },
+    "class_type": "KSampler",
+    "_meta": {
+      "title": "KSampler"
+    }
+  },
+  "4": {
+    "inputs": {
+      "ckpt_name": "model.safetensors"
+    },
+    "class_type": "CheckpointLoaderSimple",
+    "_meta": {
+      "title": "Load Checkpoint"
+    }
+  },
+  "5": {
+    "inputs": {
+      "width": 512,
+      "height": 512,
+      "batch_size": 1
+    },
+    "class_type": "EmptyLatentImage",
+    "_meta": {
+      "title": "Empty Latent Image"
+    }
+  },
+  "6": {
+    "inputs": {
+      "text": "Prompt",
+      "clip": [
+        "4",
+        1
+      ]
+    },
+    "class_type": "CLIPTextEncode",
+    "_meta": {
+      "title": "CLIP Text Encode (Prompt)"
+    }
+  },
+  "7": {
+    "inputs": {
+      "text": "Negative Prompt",
+      "clip": [
+        "4",
+        1
+      ]
+    },
+    "class_type": "CLIPTextEncode",
+    "_meta": {
+      "title": "CLIP Text Encode (Prompt)"
+    }
+  },
+  "8": {
+    "inputs": {
+      "samples": [
+        "3",
+        0
+      ],
+      "vae": [
+        "4",
+        2
+      ]
+    },
+    "class_type": "VAEDecode",
+    "_meta": {
+      "title": "VAE Decode"
+    }
+  },
+  "9": {
+    "inputs": {
+      "filename_prefix": "ComfyUI",
+      "images": [
+        "8",
+        0
+      ]
+    },
+    "class_type": "SaveImage",
+    "_meta": {
+      "title": "Save Image"
+    }
+  }
+}
+"""
+
+
+def queue_prompt(prompt, client_id, base_url):
+    log.info("queue_prompt")
+    p = {"prompt": prompt, "client_id": client_id}
+    data = json.dumps(p).encode("utf-8")
+    req = urllib.request.Request(f"{base_url}/prompt", data=data)
+    return json.loads(urllib.request.urlopen(req).read())
+
+
+def get_image(filename, subfolder, folder_type, base_url):
+    log.info("get_image")
+    data = {"filename": filename, "subfolder": subfolder, "type": folder_type}
+    url_values = urllib.parse.urlencode(data)
+    with urllib.request.urlopen(f"{base_url}/view?{url_values}") as response:
+        return response.read()
+
+
+def get_image_url(filename, subfolder, folder_type, base_url):
+    log.info("get_image")
+    data = {"filename": filename, "subfolder": subfolder, "type": folder_type}
+    url_values = urllib.parse.urlencode(data)
+    return f"{base_url}/view?{url_values}"
+
+
+def get_history(prompt_id, base_url):
+    log.info("get_history")
+    with urllib.request.urlopen(f"{base_url}/history/{prompt_id}") as response:
+        return json.loads(response.read())
+
+
+def get_images(ws, prompt, client_id, base_url):
+    prompt_id = queue_prompt(prompt, client_id, base_url)["prompt_id"]
+    output_images = []
+    while True:
+        out = ws.recv()
+        if isinstance(out, str):
+            message = json.loads(out)
+            if message["type"] == "executing":
+                data = message["data"]
+                if data["node"] is None and data["prompt_id"] == prompt_id:
+                    break  # Execution is done
+        else:
+            continue  # previews are binary data
+
+    history = get_history(prompt_id, base_url)[prompt_id]
+    for o in history["outputs"]:
+        for node_id in history["outputs"]:
+            node_output = history["outputs"][node_id]
+            if "images" in node_output:
+                for image in node_output["images"]:
+                    url = get_image_url(
+                        image["filename"], image["subfolder"], image["type"], base_url
+                    )
+                    output_images.append({"url": url})
+    return {"data": output_images}
+
+
+class ImageGenerationPayload(BaseModel):
+    prompt: str
+    negative_prompt: Optional[str] = ""
+    steps: Optional[int] = None
+    seed: Optional[int] = None
+    width: int
+    height: int
+    n: int = 1
+
+
+def comfyui_generate_image(
+    model: str, payload: ImageGenerationPayload, client_id, base_url
+):
+    host = base_url.replace("http://", "").replace("https://", "")
+
+    comfyui_prompt = json.loads(COMFYUI_DEFAULT_PROMPT)
+
+    comfyui_prompt["4"]["inputs"]["ckpt_name"] = model
+    comfyui_prompt["5"]["inputs"]["batch_size"] = payload.n
+    comfyui_prompt["5"]["inputs"]["width"] = payload.width
+    comfyui_prompt["5"]["inputs"]["height"] = payload.height
+
+    # set the text prompt for our positive CLIPTextEncode
+    comfyui_prompt["6"]["inputs"]["text"] = payload.prompt
+    comfyui_prompt["7"]["inputs"]["text"] = payload.negative_prompt
+
+    if payload.steps:
+        comfyui_prompt["3"]["inputs"]["steps"] = payload.steps
+
+    comfyui_prompt["3"]["inputs"]["seed"] = (
+        payload.seed if payload.seed else random.randint(0, 18446744073709551614)
+    )
+
+    try:
+        ws = websocket.WebSocket()
+        ws.connect(f"ws://{host}/ws?clientId={client_id}")
+        log.info("WebSocket connection established.")
+    except Exception as e:
+        log.exception(f"Failed to connect to WebSocket server: {e}")
+        return None
+
+    try:
+        images = get_images(ws, comfyui_prompt, client_id, base_url)
+    except Exception as e:
+        log.exception(f"Error while receiving images: {e}")
+        images = None
+
+    ws.close()
+
+    return images

+ 67 - 8
backend/apps/litellm/main.py

@@ -1,10 +1,27 @@
+import logging
+
 from litellm.proxy.proxy_server import ProxyConfig, initialize
 from litellm.proxy.proxy_server import app
 
-from fastapi import FastAPI, Request, Depends, status
+from fastapi import FastAPI, Request, Depends, status, Response
 from fastapi.responses import JSONResponse
+
+from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint
+from starlette.responses import StreamingResponse
+import json
+
 from utils.utils import get_http_authorization_cred, get_current_user
-from config import ENV
+from config import SRC_LOG_LEVELS, ENV
+
+log = logging.getLogger(__name__)
+log.setLevel(SRC_LOG_LEVELS["LITELLM"])
+
+
+from config import (
+    MODEL_FILTER_ENABLED,
+    MODEL_FILTER_LIST,
+)
+
 
 proxy_config = ProxyConfig()
 
@@ -26,16 +43,58 @@ async def on_startup():
     await startup()
 
 
+app.state.MODEL_FILTER_ENABLED = MODEL_FILTER_ENABLED
+app.state.MODEL_FILTER_LIST = MODEL_FILTER_LIST
+
+
 @app.middleware("http")
 async def auth_middleware(request: Request, call_next):
     auth_header = request.headers.get("Authorization", "")
+    request.state.user = None
 
-    if ENV != "dev":
-        try:
-            user = get_current_user(get_http_authorization_cred(auth_header))
-            print(user)
-        except Exception as e:
-            return JSONResponse(status_code=400, content={"detail": str(e)})
+    try:
+        user = get_current_user(get_http_authorization_cred(auth_header))
+        log.debug(f"user: {user}")
+        request.state.user = user
+    except Exception as e:
+        return JSONResponse(status_code=400, content={"detail": str(e)})
 
     response = await call_next(request)
     return response
+
+
+class ModifyModelsResponseMiddleware(BaseHTTPMiddleware):
+    async def dispatch(
+        self, request: Request, call_next: RequestResponseEndpoint
+    ) -> Response:
+
+        response = await call_next(request)
+        user = request.state.user
+
+        if "/models" in request.url.path:
+            if isinstance(response, StreamingResponse):
+                # Read the content of the streaming response
+                body = b""
+                async for chunk in response.body_iterator:
+                    body += chunk
+
+                data = json.loads(body.decode("utf-8"))
+
+                if app.state.MODEL_FILTER_ENABLED:
+                    if user and user.role == "user":
+                        data["data"] = list(
+                            filter(
+                                lambda model: model["id"]
+                                in app.state.MODEL_FILTER_LIST,
+                                data["data"],
+                            )
+                        )
+
+                # Modified Flag
+                data["modified"] = True
+                return JSONResponse(content=data)
+
+        return response
+
+
+app.add_middleware(ModifyModelsResponseMiddleware)

+ 318 - 52
backend/apps/ollama/main.py

@@ -1,24 +1,49 @@
-from fastapi import FastAPI, Request, Response, HTTPException, Depends, status
+from fastapi import (
+    FastAPI,
+    Request,
+    Response,
+    HTTPException,
+    Depends,
+    status,
+    UploadFile,
+    File,
+    BackgroundTasks,
+)
 from fastapi.middleware.cors import CORSMiddleware
 from fastapi.responses import StreamingResponse
 from fastapi.concurrency import run_in_threadpool
 
 from pydantic import BaseModel, ConfigDict
 
+import os
+import copy
 import random
 import requests
 import json
 import uuid
 import aiohttp
 import asyncio
+import logging
+from urllib.parse import urlparse
+from typing import Optional, List, Union
+
 
 from apps.web.models.users import Users
 from constants import ERROR_MESSAGES
 from utils.utils import decode_token, get_current_user, get_admin_user
-from config import OLLAMA_BASE_URLS, MODEL_FILTER_ENABLED, MODEL_FILTER_LIST
 
-from typing import Optional, List, Union
 
+from config import (
+    SRC_LOG_LEVELS,
+    OLLAMA_BASE_URLS,
+    MODEL_FILTER_ENABLED,
+    MODEL_FILTER_LIST,
+    UPLOAD_DIR,
+)
+from utils.misc import calculate_sha256
+
+log = logging.getLogger(__name__)
+log.setLevel(SRC_LOG_LEVELS["OLLAMA"])
 
 app = FastAPI()
 app.add_middleware(
@@ -69,7 +94,7 @@ class UrlUpdateForm(BaseModel):
 async def update_ollama_api_url(form_data: UrlUpdateForm, user=Depends(get_admin_user)):
     app.state.OLLAMA_BASE_URLS = form_data.urls
 
-    print(app.state.OLLAMA_BASE_URLS)
+    log.info(f"app.state.OLLAMA_BASE_URLS: {app.state.OLLAMA_BASE_URLS}")
     return {"OLLAMA_BASE_URLS": app.state.OLLAMA_BASE_URLS}
 
 
@@ -90,7 +115,7 @@ async def fetch_url(url):
                 return await response.json()
     except Exception as e:
         # Handle connection error here
-        print(f"Connection error: {e}")
+        log.error(f"Connection error: {e}")
         return None
 
 
@@ -98,13 +123,14 @@ def merge_models_lists(model_lists):
     merged_models = {}
 
     for idx, model_list in enumerate(model_lists):
-        for model in model_list:
-            digest = model["digest"]
-            if digest not in merged_models:
-                model["urls"] = [idx]
-                merged_models[digest] = model
-            else:
-                merged_models[digest]["urls"].append(idx)
+        if model_list is not None:
+            for model in model_list:
+                digest = model["digest"]
+                if digest not in merged_models:
+                    model["urls"] = [idx]
+                    merged_models[digest] = model
+                else:
+                    merged_models[digest]["urls"].append(idx)
 
     return list(merged_models.values())
 
@@ -113,16 +139,16 @@ def merge_models_lists(model_lists):
 
 
 async def get_all_models():
-    print("get_all_models")
+    log.info("get_all_models()")
     tasks = [fetch_url(f"{url}/api/tags") for url in app.state.OLLAMA_BASE_URLS]
     responses = await asyncio.gather(*tasks)
-    responses = list(filter(lambda x: x is not None, responses))
 
     models = {
         "models": merge_models_lists(
-            map(lambda response: response["models"], responses)
+            map(lambda response: response["models"] if response else None, responses)
         )
     }
+
     app.state.MODELS = {model["model"]: model for model in models["models"]}
 
     return models
@@ -154,7 +180,7 @@ async def get_ollama_tags(
 
             return r.json()
         except Exception as e:
-            print(e)
+            log.exception(e)
             error_detail = "Open WebUI: Server Connection Error"
             if r is not None:
                 try:
@@ -181,11 +207,17 @@ async def get_ollama_versions(url_idx: Optional[int] = None):
         responses = await asyncio.gather(*tasks)
         responses = list(filter(lambda x: x is not None, responses))
 
-        lowest_version = min(
-            responses, key=lambda x: tuple(map(int, x["version"].split(".")))
-        )
+        if len(responses) > 0:
+            lowest_version = min(
+                responses, key=lambda x: tuple(map(int, x["version"].split(".")))
+            )
 
-        return {"version": lowest_version["version"]}
+            return {"version": lowest_version["version"]}
+        else:
+            raise HTTPException(
+                status_code=500,
+                detail=ERROR_MESSAGES.OLLAMA_NOT_FOUND,
+            )
     else:
         url = app.state.OLLAMA_BASE_URLS[url_idx]
         try:
@@ -194,7 +226,7 @@ async def get_ollama_versions(url_idx: Optional[int] = None):
 
             return r.json()
         except Exception as e:
-            print(e)
+            log.exception(e)
             error_detail = "Open WebUI: Server Connection Error"
             if r is not None:
                 try:
@@ -220,18 +252,33 @@ async def pull_model(
     form_data: ModelNameForm, url_idx: int = 0, user=Depends(get_admin_user)
 ):
     url = app.state.OLLAMA_BASE_URLS[url_idx]
-    print(url)
+    log.info(f"url: {url}")
 
     r = None
 
     def get_request():
         nonlocal url
         nonlocal r
+
+        request_id = str(uuid.uuid4())
         try:
+            REQUEST_POOL.append(request_id)
 
             def stream_content():
-                for chunk in r.iter_content(chunk_size=8192):
-                    yield chunk
+                try:
+                    yield json.dumps({"id": request_id, "done": False}) + "\n"
+
+                    for chunk in r.iter_content(chunk_size=8192):
+                        if request_id in REQUEST_POOL:
+                            yield chunk
+                        else:
+                            log.warning("User: canceled request")
+                            break
+                finally:
+                    if hasattr(r, "close"):
+                        r.close()
+                        if request_id in REQUEST_POOL:
+                            REQUEST_POOL.remove(request_id)
 
             r = requests.request(
                 method="POST",
@@ -252,8 +299,9 @@ async def pull_model(
 
     try:
         return await run_in_threadpool(get_request)
+
     except Exception as e:
-        print(e)
+        log.exception(e)
         error_detail = "Open WebUI: Server Connection Error"
         if r is not None:
             try:
@@ -292,7 +340,7 @@ async def push_model(
             )
 
     url = app.state.OLLAMA_BASE_URLS[url_idx]
-    print(url)
+    log.debug(f"url: {url}")
 
     r = None
 
@@ -324,7 +372,7 @@ async def push_model(
     try:
         return await run_in_threadpool(get_request)
     except Exception as e:
-        print(e)
+        log.exception(e)
         error_detail = "Open WebUI: Server Connection Error"
         if r is not None:
             try:
@@ -352,9 +400,9 @@ class CreateModelForm(BaseModel):
 async def create_model(
     form_data: CreateModelForm, url_idx: int = 0, user=Depends(get_admin_user)
 ):
-    print(form_data)
+    log.debug(f"form_data: {form_data}")
     url = app.state.OLLAMA_BASE_URLS[url_idx]
-    print(url)
+    log.info(f"url: {url}")
 
     r = None
 
@@ -376,7 +424,7 @@ async def create_model(
 
             r.raise_for_status()
 
-            print(r)
+            log.debug(f"r: {r}")
 
             return StreamingResponse(
                 stream_content(),
@@ -389,7 +437,7 @@ async def create_model(
     try:
         return await run_in_threadpool(get_request)
     except Exception as e:
-        print(e)
+        log.exception(e)
         error_detail = "Open WebUI: Server Connection Error"
         if r is not None:
             try:
@@ -427,7 +475,7 @@ async def copy_model(
             )
 
     url = app.state.OLLAMA_BASE_URLS[url_idx]
-    print(url)
+    log.info(f"url: {url}")
 
     try:
         r = requests.request(
@@ -437,11 +485,11 @@ async def copy_model(
         )
         r.raise_for_status()
 
-        print(r.text)
+        log.debug(f"r.text: {r.text}")
 
         return True
     except Exception as e:
-        print(e)
+        log.exception(e)
         error_detail = "Open WebUI: Server Connection Error"
         if r is not None:
             try:
@@ -474,7 +522,7 @@ async def delete_model(
             )
 
     url = app.state.OLLAMA_BASE_URLS[url_idx]
-    print(url)
+    log.info(f"url: {url}")
 
     try:
         r = requests.request(
@@ -484,11 +532,11 @@ async def delete_model(
         )
         r.raise_for_status()
 
-        print(r.text)
+        log.debug(f"r.text: {r.text}")
 
         return True
     except Exception as e:
-        print(e)
+        log.exception(e)
         error_detail = "Open WebUI: Server Connection Error"
         if r is not None:
             try:
@@ -514,7 +562,7 @@ async def show_model_info(form_data: ModelNameForm, user=Depends(get_current_use
 
     url_idx = random.choice(app.state.MODELS[form_data.name]["urls"])
     url = app.state.OLLAMA_BASE_URLS[url_idx]
-    print(url)
+    log.info(f"url: {url}")
 
     try:
         r = requests.request(
@@ -526,7 +574,7 @@ async def show_model_info(form_data: ModelNameForm, user=Depends(get_current_use
 
         return r.json()
     except Exception as e:
-        print(e)
+        log.exception(e)
         error_detail = "Open WebUI: Server Connection Error"
         if r is not None:
             try:
@@ -566,7 +614,7 @@ async def generate_embeddings(
             )
 
     url = app.state.OLLAMA_BASE_URLS[url_idx]
-    print(url)
+    log.info(f"url: {url}")
 
     try:
         r = requests.request(
@@ -578,7 +626,7 @@ async def generate_embeddings(
 
         return r.json()
     except Exception as e:
-        print(e)
+        log.exception(e)
         error_detail = "Open WebUI: Server Connection Error"
         if r is not None:
             try:
@@ -622,11 +670,11 @@ async def generate_completion(
         else:
             raise HTTPException(
                 status_code=400,
-                detail="error_detail",
+                detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.model),
             )
 
     url = app.state.OLLAMA_BASE_URLS[url_idx]
-    print(url)
+    log.info(f"url: {url}")
 
     r = None
 
@@ -647,7 +695,7 @@ async def generate_completion(
                         if request_id in REQUEST_POOL:
                             yield chunk
                         else:
-                            print("User: canceled request")
+                            log.warning("User: canceled request")
                             break
                 finally:
                     if hasattr(r, "close"):
@@ -702,7 +750,7 @@ class GenerateChatCompletionForm(BaseModel):
     format: Optional[str] = None
     options: Optional[dict] = None
     template: Optional[str] = None
-    stream: Optional[bool] = True
+    stream: Optional[bool] = None
     keep_alive: Optional[Union[int, str]] = None
 
 
@@ -724,11 +772,15 @@ async def generate_chat_completion(
             )
 
     url = app.state.OLLAMA_BASE_URLS[url_idx]
-    print(url)
+    log.info(f"url: {url}")
 
     r = None
 
-    print(form_data.model_dump_json(exclude_none=True).encode())
+    log.debug(
+        "form_data.model_dump_json(exclude_none=True).encode(): {0} ".format(
+            form_data.model_dump_json(exclude_none=True).encode()
+        )
+    )
 
     def get_request():
         nonlocal form_data
@@ -747,7 +799,7 @@ async def generate_chat_completion(
                         if request_id in REQUEST_POOL:
                             yield chunk
                         else:
-                            print("User: canceled request")
+                            log.warning("User: canceled request")
                             break
                 finally:
                     if hasattr(r, "close"):
@@ -770,7 +822,7 @@ async def generate_chat_completion(
                 headers=dict(r.headers),
             )
         except Exception as e:
-            print(e)
+            log.exception(e)
             raise e
 
     try:
@@ -824,7 +876,7 @@ async def generate_openai_chat_completion(
             )
 
     url = app.state.OLLAMA_BASE_URLS[url_idx]
-    print(url)
+    log.info(f"url: {url}")
 
     r = None
 
@@ -847,7 +899,7 @@ async def generate_openai_chat_completion(
                         if request_id in REQUEST_POOL:
                             yield chunk
                         else:
-                            print("User: canceled request")
+                            log.warning("User: canceled request")
                             break
                 finally:
                     if hasattr(r, "close"):
@@ -890,6 +942,220 @@ async def generate_openai_chat_completion(
         )
 
 
+class UrlForm(BaseModel):
+    url: str
+
+
+class UploadBlobForm(BaseModel):
+    filename: str
+
+
+def parse_huggingface_url(hf_url):
+    try:
+        # Parse the URL
+        parsed_url = urlparse(hf_url)
+
+        # Get the path and split it into components
+        path_components = parsed_url.path.split("/")
+
+        # Extract the desired output
+        user_repo = "/".join(path_components[1:3])
+        model_file = path_components[-1]
+
+        return model_file
+    except ValueError:
+        return None
+
+
+async def download_file_stream(
+    ollama_url, file_url, file_path, file_name, chunk_size=1024 * 1024
+):
+    done = False
+
+    if os.path.exists(file_path):
+        current_size = os.path.getsize(file_path)
+    else:
+        current_size = 0
+
+    headers = {"Range": f"bytes={current_size}-"} if current_size > 0 else {}
+
+    timeout = aiohttp.ClientTimeout(total=600)  # Set the timeout
+
+    async with aiohttp.ClientSession(timeout=timeout) as session:
+        async with session.get(file_url, headers=headers) as response:
+            total_size = int(response.headers.get("content-length", 0)) + current_size
+
+            with open(file_path, "ab+") as file:
+                async for data in response.content.iter_chunked(chunk_size):
+                    current_size += len(data)
+                    file.write(data)
+
+                    done = current_size == total_size
+                    progress = round((current_size / total_size) * 100, 2)
+
+                    yield f'data: {{"progress": {progress}, "completed": {current_size}, "total": {total_size}}}\n\n'
+
+                if done:
+                    file.seek(0)
+                    hashed = calculate_sha256(file)
+                    file.seek(0)
+
+                    url = f"{ollama_url}/api/blobs/sha256:{hashed}"
+                    response = requests.post(url, data=file)
+
+                    if response.ok:
+                        res = {
+                            "done": done,
+                            "blob": f"sha256:{hashed}",
+                            "name": file_name,
+                        }
+                        os.remove(file_path)
+
+                        yield f"data: {json.dumps(res)}\n\n"
+                    else:
+                        raise "Ollama: Could not create blob, Please try again."
+
+
+# def number_generator():
+#     for i in range(1, 101):
+#         yield f"data: {i}\n"
+
+
+# url = "https://huggingface.co/TheBloke/stablelm-zephyr-3b-GGUF/resolve/main/stablelm-zephyr-3b.Q2_K.gguf"
+@app.post("/models/download")
+@app.post("/models/download/{url_idx}")
+async def download_model(
+    form_data: UrlForm,
+    url_idx: Optional[int] = None,
+):
+
+    allowed_hosts = ["https://huggingface.co/", "https://github.com/"]
+
+    if not any(form_data.url.startswith(host) for host in allowed_hosts):
+        raise HTTPException(
+            status_code=400,
+            detail="Invalid file_url. Only URLs from allowed hosts are permitted.",
+        )
+
+    if url_idx == None:
+        url_idx = 0
+    url = app.state.OLLAMA_BASE_URLS[url_idx]
+
+    file_name = parse_huggingface_url(form_data.url)
+
+    if file_name:
+        file_path = f"{UPLOAD_DIR}/{file_name}"
+
+        return StreamingResponse(
+            download_file_stream(url, form_data.url, file_path, file_name),
+        )
+    else:
+        return None
+
+
+@app.post("/models/upload")
+@app.post("/models/upload/{url_idx}")
+def upload_model(file: UploadFile = File(...), url_idx: Optional[int] = None):
+    if url_idx == None:
+        url_idx = 0
+    ollama_url = app.state.OLLAMA_BASE_URLS[url_idx]
+
+    file_path = f"{UPLOAD_DIR}/{file.filename}"
+
+    # Save file in chunks
+    with open(file_path, "wb+") as f:
+        for chunk in file.file:
+            f.write(chunk)
+
+    def file_process_stream():
+        nonlocal ollama_url
+        total_size = os.path.getsize(file_path)
+        chunk_size = 1024 * 1024
+        try:
+            with open(file_path, "rb") as f:
+                total = 0
+                done = False
+
+                while not done:
+                    chunk = f.read(chunk_size)
+                    if not chunk:
+                        done = True
+                        continue
+
+                    total += len(chunk)
+                    progress = round((total / total_size) * 100, 2)
+
+                    res = {
+                        "progress": progress,
+                        "total": total_size,
+                        "completed": total,
+                    }
+                    yield f"data: {json.dumps(res)}\n\n"
+
+                if done:
+                    f.seek(0)
+                    hashed = calculate_sha256(f)
+                    f.seek(0)
+
+                    url = f"{ollama_url}/api/blobs/sha256:{hashed}"
+                    response = requests.post(url, data=f)
+
+                    if response.ok:
+                        res = {
+                            "done": done,
+                            "blob": f"sha256:{hashed}",
+                            "name": file.filename,
+                        }
+                        os.remove(file_path)
+                        yield f"data: {json.dumps(res)}\n\n"
+                    else:
+                        raise Exception(
+                            "Ollama: Could not create blob, Please try again."
+                        )
+
+        except Exception as e:
+            res = {"error": str(e)}
+            yield f"data: {json.dumps(res)}\n\n"
+
+    return StreamingResponse(file_process_stream(), media_type="text/event-stream")
+
+
+# async def upload_model(file: UploadFile = File(), url_idx: Optional[int] = None):
+#     if url_idx == None:
+#         url_idx = 0
+#     url = app.state.OLLAMA_BASE_URLS[url_idx]
+
+#     file_location = os.path.join(UPLOAD_DIR, file.filename)
+#     total_size = file.size
+
+#     async def file_upload_generator(file):
+#         print(file)
+#         try:
+#             async with aiofiles.open(file_location, "wb") as f:
+#                 completed_size = 0
+#                 while True:
+#                     chunk = await file.read(1024*1024)
+#                     if not chunk:
+#                         break
+#                     await f.write(chunk)
+#                     completed_size += len(chunk)
+#                     progress = (completed_size / total_size) * 100
+
+#                     print(progress)
+#                     yield f'data: {json.dumps({"status": "uploading", "percentage": progress, "total": total_size, "completed": completed_size, "done": False})}\n'
+#         except Exception as e:
+#             print(e)
+#             yield f"data: {json.dumps({'status': 'error', 'message': str(e)})}\n"
+#         finally:
+#             await file.close()
+#             print("done")
+#             yield f'data: {json.dumps({"status": "completed", "percentage": 100, "total": total_size, "completed": completed_size, "done": True})}\n'
+
+#     return StreamingResponse(
+#         file_upload_generator(copy.deepcopy(file)), media_type="text/event-stream"
+#     )
+
+
 @app.api_route("/{path:path}", methods=["GET", "POST", "PUT", "DELETE"])
 async def deprecated_proxy(path: str, request: Request, user=Depends(get_current_user)):
     url = app.state.OLLAMA_BASE_URLS[0]
@@ -940,7 +1206,7 @@ async def deprecated_proxy(path: str, request: Request, user=Depends(get_current
                         if request_id in REQUEST_POOL:
                             yield chunk
                         else:
-                            print("User: canceled request")
+                            log.warning("User: canceled request")
                             break
                 finally:
                     if hasattr(r, "close"):

+ 46 - 21
backend/apps/openai/main.py

@@ -6,6 +6,7 @@ import requests
 import aiohttp
 import asyncio
 import json
+import logging
 
 from pydantic import BaseModel
 
@@ -19,6 +20,7 @@ from utils.utils import (
     get_admin_user,
 )
 from config import (
+    SRC_LOG_LEVELS,
     OPENAI_API_BASE_URLS,
     OPENAI_API_KEYS,
     CACHE_DIR,
@@ -31,6 +33,9 @@ from typing import List, Optional
 import hashlib
 from pathlib import Path
 
+log = logging.getLogger(__name__)
+log.setLevel(SRC_LOG_LEVELS["OPENAI"])
+
 app = FastAPI()
 app.add_middleware(
     CORSMiddleware,
@@ -111,6 +116,7 @@ async def speech(request: Request, user=Depends(get_verified_user)):
         headers["Authorization"] = f"Bearer {app.state.OPENAI_API_KEYS[idx]}"
         headers["Content-Type"] = "application/json"
 
+        r = None
         try:
             r = requests.post(
                 url=f"{app.state.OPENAI_API_BASE_URLS[idx]}/audio/speech",
@@ -133,7 +139,7 @@ async def speech(request: Request, user=Depends(get_verified_user)):
             return FileResponse(file_path)
 
         except Exception as e:
-            print(e)
+            log.exception(e)
             error_detail = "Open WebUI: Server Connection Error"
             if r is not None:
                 try:
@@ -143,7 +149,9 @@ async def speech(request: Request, user=Depends(get_verified_user)):
                 except:
                     error_detail = f"External: {e}"
 
-            raise HTTPException(status_code=r.status_code, detail=error_detail)
+            raise HTTPException(
+                status_code=r.status_code if r else 500, detail=error_detail
+            )
 
     except ValueError:
         raise HTTPException(status_code=401, detail=ERROR_MESSAGES.OPENAI_NOT_FOUND)
@@ -157,7 +165,7 @@ async def fetch_url(url, key):
                 return await response.json()
     except Exception as e:
         # Handle connection error here
-        print(f"Connection error: {e}")
+        log.error(f"Connection error: {e}")
         return None
 
 
@@ -165,20 +173,21 @@ def merge_models_lists(model_lists):
     merged_list = []
 
     for idx, models in enumerate(model_lists):
-        merged_list.extend(
-            [
-                {**model, "urlIdx": idx}
-                for model in models
-                if "api.openai.com" not in app.state.OPENAI_API_BASE_URLS[idx]
-                or "gpt" in model["id"]
-            ]
-        )
+        if models is not None and "error" not in models:
+            merged_list.extend(
+                [
+                    {**model, "urlIdx": idx}
+                    for model in models
+                    if "api.openai.com" not in app.state.OPENAI_API_BASE_URLS[idx]
+                    or "gpt" in model["id"]
+                ]
+            )
 
     return merged_list
 
 
 async def get_all_models():
-    print("get_all_models")
+    log.info("get_all_models()")
 
     if len(app.state.OPENAI_API_KEYS) == 1 and app.state.OPENAI_API_KEYS[0] == "":
         models = {"data": []}
@@ -187,15 +196,24 @@ async def get_all_models():
             fetch_url(f"{url}/models", app.state.OPENAI_API_KEYS[idx])
             for idx, url in enumerate(app.state.OPENAI_API_BASE_URLS)
         ]
+
         responses = await asyncio.gather(*tasks)
-        responses = list(
-            filter(lambda x: x is not None and "error" not in x, responses)
-        )
         models = {
             "data": merge_models_lists(
-                list(map(lambda response: response["data"], responses))
+                list(
+                    map(
+                        lambda response: (
+                            response["data"]
+                            if response and "data" in response
+                            else None
+                        ),
+                        responses,
+                    )
+                )
             )
         }
+
+        log.info(f"models: {models}")
         app.state.MODELS = {model["id"]: model for model in models["data"]}
 
         return models
@@ -218,6 +236,9 @@ async def get_models(url_idx: Optional[int] = None, user=Depends(get_current_use
         return models
     else:
         url = app.state.OPENAI_API_BASE_URLS[url_idx]
+
+        r = None
+
         try:
             r = requests.request(method="GET", url=f"{url}/models")
             r.raise_for_status()
@@ -230,7 +251,7 @@ async def get_models(url_idx: Optional[int] = None, user=Depends(get_current_use
 
             return response_data
         except Exception as e:
-            print(e)
+            log.exception(e)
             error_detail = "Open WebUI: Server Connection Error"
             if r is not None:
                 try:
@@ -264,7 +285,7 @@ async def proxy(path: str, request: Request, user=Depends(get_verified_user)):
         if body.get("model") == "gpt-4-vision-preview":
             if "max_tokens" not in body:
                 body["max_tokens"] = 4000
-            print("Modified body_dict:", body)
+            log.debug("Modified body_dict:", body)
 
         # Fix for ChatGPT calls failing because the num_ctx key is in body
         if "num_ctx" in body:
@@ -276,7 +297,7 @@ async def proxy(path: str, request: Request, user=Depends(get_verified_user)):
         # Convert the modified body back to JSON
         body = json.dumps(body)
     except json.JSONDecodeError as e:
-        print("Error loading request body into a dictionary:", e)
+        log.error("Error loading request body into a dictionary:", e)
 
     url = app.state.OPENAI_API_BASE_URLS[idx]
     key = app.state.OPENAI_API_KEYS[idx]
@@ -290,6 +311,8 @@ async def proxy(path: str, request: Request, user=Depends(get_verified_user)):
     headers["Authorization"] = f"Bearer {key}"
     headers["Content-Type"] = "application/json"
 
+    r = None
+
     try:
         r = requests.request(
             method=request.method,
@@ -312,7 +335,7 @@ async def proxy(path: str, request: Request, user=Depends(get_verified_user)):
             response_data = r.json()
             return response_data
     except Exception as e:
-        print(e)
+        log.exception(e)
         error_detail = "Open WebUI: Server Connection Error"
         if r is not None:
             try:
@@ -322,4 +345,6 @@ async def proxy(path: str, request: Request, user=Depends(get_verified_user)):
             except:
                 error_detail = f"External: {e}"
 
-        raise HTTPException(status_code=r.status_code, detail=error_detail)
+        raise HTTPException(
+            status_code=r.status_code if r else 500, detail=error_detail
+        )

+ 152 - 85
backend/apps/rag/main.py

@@ -8,7 +8,7 @@ from fastapi import (
     Form,
 )
 from fastapi.middleware.cors import CORSMiddleware
-import os, shutil
+import os, shutil, logging
 
 from pathlib import Path
 from typing import List
@@ -21,6 +21,7 @@ from langchain_community.document_loaders import (
     TextLoader,
     PyPDFLoader,
     CSVLoader,
+    BSHTMLLoader,
     Docx2txtLoader,
     UnstructuredEPubLoader,
     UnstructuredWordDocumentLoader,
@@ -54,6 +55,7 @@ from utils.misc import (
 )
 from utils.utils import get_current_user, get_admin_user
 from config import (
+    SRC_LOG_LEVELS,
     UPLOAD_DIR,
     DOCS_DIR,
     RAG_EMBEDDING_MODEL,
@@ -66,6 +68,9 @@ from config import (
 
 from constants import ERROR_MESSAGES
 
+log = logging.getLogger(__name__)
+log.setLevel(SRC_LOG_LEVELS["RAG"])
+
 #
 # if RAG_EMBEDDING_MODEL:
 #    sentence_transformer_ef = SentenceTransformer(
@@ -111,39 +116,6 @@ class StoreWebForm(CollectionNameForm):
     url: str
 
 
-def store_data_in_vector_db(data, collection_name, overwrite: bool = False) -> bool:
-    text_splitter = RecursiveCharacterTextSplitter(
-        chunk_size=app.state.CHUNK_SIZE, chunk_overlap=app.state.CHUNK_OVERLAP
-    )
-    docs = text_splitter.split_documents(data)
-
-    texts = [doc.page_content for doc in docs]
-    metadatas = [doc.metadata for doc in docs]
-
-    try:
-        if overwrite:
-            for collection in CHROMA_CLIENT.list_collections():
-                if collection_name == collection.name:
-                    print(f"deleting existing collection {collection_name}")
-                    CHROMA_CLIENT.delete_collection(name=collection_name)
-
-        collection = CHROMA_CLIENT.create_collection(
-            name=collection_name,
-            embedding_function=app.state.sentence_transformer_ef,
-        )
-
-        collection.add(
-            documents=texts, metadatas=metadatas, ids=[str(uuid.uuid1()) for _ in texts]
-        )
-        return True
-    except Exception as e:
-        print(e)
-        if e.__class__.__name__ == "UniqueConstraintError":
-            return True
-
-        return False
-
-
 @app.get("/")
 async def get_status():
     return {
@@ -273,7 +245,7 @@ def query_doc_handler(
             embedding_function=app.state.sentence_transformer_ef,
         )
     except Exception as e:
-        print(e)
+        log.exception(e)
         raise HTTPException(
             status_code=status.HTTP_400_BAD_REQUEST,
             detail=ERROR_MESSAGES.DEFAULT(e),
@@ -317,13 +289,69 @@ def store_web(form_data: StoreWebForm, user=Depends(get_current_user)):
             "filename": form_data.url,
         }
     except Exception as e:
-        print(e)
+        log.exception(e)
         raise HTTPException(
             status_code=status.HTTP_400_BAD_REQUEST,
             detail=ERROR_MESSAGES.DEFAULT(e),
         )
 
 
+def store_data_in_vector_db(data, collection_name, overwrite: bool = False) -> bool:
+
+    text_splitter = RecursiveCharacterTextSplitter(
+        chunk_size=app.state.CHUNK_SIZE,
+        chunk_overlap=app.state.CHUNK_OVERLAP,
+        add_start_index=True,
+    )
+    docs = text_splitter.split_documents(data)
+
+    if len(docs) > 0:
+        return store_docs_in_vector_db(docs, collection_name, overwrite), None
+    else:
+        raise ValueError(ERROR_MESSAGES.EMPTY_CONTENT)
+
+
+def store_text_in_vector_db(
+    text, metadata, collection_name, overwrite: bool = False
+) -> bool:
+    text_splitter = RecursiveCharacterTextSplitter(
+        chunk_size=app.state.CHUNK_SIZE,
+        chunk_overlap=app.state.CHUNK_OVERLAP,
+        add_start_index=True,
+    )
+    docs = text_splitter.create_documents([text], metadatas=[metadata])
+    return store_docs_in_vector_db(docs, collection_name, overwrite)
+
+
+def store_docs_in_vector_db(docs, collection_name, overwrite: bool = False) -> bool:
+
+    texts = [doc.page_content for doc in docs]
+    metadatas = [doc.metadata for doc in docs]
+
+    try:
+        if overwrite:
+            for collection in CHROMA_CLIENT.list_collections():
+                if collection_name == collection.name:
+                    log.info(f"deleting existing collection {collection_name}")
+                    CHROMA_CLIENT.delete_collection(name=collection_name)
+
+        collection = CHROMA_CLIENT.create_collection(
+            name=collection_name,
+            embedding_function=app.state.sentence_transformer_ef,
+        )
+
+        collection.add(
+            documents=texts, metadatas=metadatas, ids=[str(uuid.uuid1()) for _ in texts]
+        )
+        return True
+    except Exception as e:
+        log.exception(e)
+        if e.__class__.__name__ == "UniqueConstraintError":
+            return True
+
+        return False
+
+
 def get_loader(filename: str, file_content_type: str, file_path: str):
     file_ext = filename.split(".")[-1].lower()
     known_type = True
@@ -381,6 +409,8 @@ def get_loader(filename: str, file_content_type: str, file_path: str):
         loader = UnstructuredRSTLoader(file_path, mode="elements")
     elif file_ext == "xml":
         loader = UnstructuredXMLLoader(file_path)
+    elif file_ext in ["htm", "html"]:
+        loader = BSHTMLLoader(file_path, open_encoding="unicode_escape")
     elif file_ext == "md":
         loader = UnstructuredMarkdownLoader(file_path)
     elif file_content_type == "application/epub+zip":
@@ -399,9 +429,9 @@ def get_loader(filename: str, file_content_type: str, file_path: str):
     elif file_ext in known_source_ext or (
         file_content_type and file_content_type.find("text/") >= 0
     ):
-        loader = TextLoader(file_path)
+        loader = TextLoader(file_path, autodetect_encoding=True)
     else:
-        loader = TextLoader(file_path)
+        loader = TextLoader(file_path, autodetect_encoding=True)
         known_type = False
 
     return loader, known_type
@@ -415,7 +445,7 @@ def store_doc(
 ):
     # "https://www.gutenberg.org/files/1727/1727-h/1727-h.htm"
 
-    print(file.content_type)
+    log.info(f"file.content_type: {file.content_type}")
     try:
         filename = file.filename
         file_path = f"{UPLOAD_DIR}/{filename}"
@@ -431,22 +461,24 @@ def store_doc(
 
         loader, known_type = get_loader(file.filename, file.content_type, file_path)
         data = loader.load()
-        result = store_data_in_vector_db(data, collection_name)
-
-        if result:
-            return {
-                "status": True,
-                "collection_name": collection_name,
-                "filename": filename,
-                "known_type": known_type,
-            }
-        else:
+
+        try:
+            result = store_data_in_vector_db(data, collection_name)
+
+            if result:
+                return {
+                    "status": True,
+                    "collection_name": collection_name,
+                    "filename": filename,
+                    "known_type": known_type,
+                }
+        except Exception as e:
             raise HTTPException(
                 status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
-                detail=ERROR_MESSAGES.DEFAULT(),
+                detail=e,
             )
     except Exception as e:
-        print(e)
+        log.exception(e)
         if "No pandoc was found" in str(e):
             raise HTTPException(
                 status_code=status.HTTP_400_BAD_REQUEST,
@@ -459,6 +491,37 @@ def store_doc(
             )
 
 
+class TextRAGForm(BaseModel):
+    name: str
+    content: str
+    collection_name: Optional[str] = None
+
+
+@app.post("/text")
+def store_text(
+    form_data: TextRAGForm,
+    user=Depends(get_current_user),
+):
+
+    collection_name = form_data.collection_name
+    if collection_name == None:
+        collection_name = calculate_sha256_string(form_data.content)
+
+    result = store_text_in_vector_db(
+        form_data.content,
+        metadata={"name": form_data.name, "created_by": user.id},
+        collection_name=collection_name,
+    )
+
+    if result:
+        return {"status": True, "collection_name": collection_name}
+    else:
+        raise HTTPException(
+            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
+            detail=ERROR_MESSAGES.DEFAULT(),
+        )
+
+
 @app.get("/scan")
 def scan_docs_dir(user=Depends(get_admin_user)):
     for path in Path(DOCS_DIR).rglob("./**/*"):
@@ -477,41 +540,45 @@ def scan_docs_dir(user=Depends(get_admin_user)):
                 )
                 data = loader.load()
 
-                result = store_data_in_vector_db(data, collection_name)
-
-                if result:
-                    sanitized_filename = sanitize_filename(filename)
-                    doc = Documents.get_doc_by_name(sanitized_filename)
-
-                    if doc == None:
-                        doc = Documents.insert_new_doc(
-                            user.id,
-                            DocumentForm(
-                                **{
-                                    "name": sanitized_filename,
-                                    "title": filename,
-                                    "collection_name": collection_name,
-                                    "filename": filename,
-                                    "content": (
-                                        json.dumps(
-                                            {
-                                                "tags": list(
-                                                    map(
-                                                        lambda name: {"name": name},
-                                                        tags,
+                try:
+                    result = store_data_in_vector_db(data, collection_name)
+
+                    if result:
+                        sanitized_filename = sanitize_filename(filename)
+                        doc = Documents.get_doc_by_name(sanitized_filename)
+
+                        if doc == None:
+                            doc = Documents.insert_new_doc(
+                                user.id,
+                                DocumentForm(
+                                    **{
+                                        "name": sanitized_filename,
+                                        "title": filename,
+                                        "collection_name": collection_name,
+                                        "filename": filename,
+                                        "content": (
+                                            json.dumps(
+                                                {
+                                                    "tags": list(
+                                                        map(
+                                                            lambda name: {"name": name},
+                                                            tags,
+                                                        )
                                                     )
-                                                )
-                                            }
-                                        )
-                                        if len(tags)
-                                        else "{}"
-                                    ),
-                                }
-                            ),
-                        )
+                                                }
+                                            )
+                                            if len(tags)
+                                            else "{}"
+                                        ),
+                                    }
+                                ),
+                            )
+                except Exception as e:
+                    log.exception(e)
+                    pass
 
         except Exception as e:
-            print(e)
+            log.exception(e)
 
     return True
 
@@ -532,11 +599,11 @@ def reset(user=Depends(get_admin_user)) -> bool:
             elif os.path.isdir(file_path):
                 shutil.rmtree(file_path)
         except Exception as e:
-            print("Failed to delete %s. Reason: %s" % (file_path, e))
+            log.error("Failed to delete %s. Reason: %s" % (file_path, e))
 
     try:
         CHROMA_CLIENT.reset()
     except Exception as e:
-        print(e)
+        log.exception(e)
 
     return True

+ 13 - 6
backend/apps/rag/utils.py

@@ -1,7 +1,11 @@
 import re
+import logging
 from typing import List
 
-from config import CHROMA_CLIENT
+from config import SRC_LOG_LEVELS, CHROMA_CLIENT
+
+log = logging.getLogger(__name__)
+log.setLevel(SRC_LOG_LEVELS["RAG"])
 
 
 def query_doc(collection_name: str, query: str, k: int, embedding_function):
@@ -91,14 +95,13 @@ def query_collection(
 
 
 def rag_template(template: str, context: str, query: str):
-    template = re.sub(r"\[context\]", context, template)
-    template = re.sub(r"\[query\]", query, template)
-
+    template = template.replace("[context]", context)
+    template = template.replace("[query]", query)
     return template
 
 
 def rag_messages(docs, messages, template, k, embedding_function):
-    print(docs)
+    log.debug(f"docs: {docs}")
 
     last_user_message_idx = None
     for i in range(len(messages) - 1, -1, -1):
@@ -138,6 +141,8 @@ def rag_messages(docs, messages, template, k, embedding_function):
                     k=k,
                     embedding_function=embedding_function,
                 )
+            elif doc["type"] == "text":
+                context = doc["content"]
             else:
                 context = query_doc(
                     collection_name=doc["collection_name"],
@@ -146,11 +151,13 @@ def rag_messages(docs, messages, template, k, embedding_function):
                     embedding_function=embedding_function,
                 )
         except Exception as e:
-            print(e)
+            log.exception(e)
             context = None
 
         relevant_contexts.append(context)
 
+    log.debug(f"relevant_contexts: {relevant_contexts}")
+
     context_string = ""
     for context in relevant_contexts:
         if context:

+ 5 - 2
backend/apps/web/internal/db.py

@@ -1,13 +1,16 @@
 from peewee import *
-from config import DATA_DIR
+from config import SRC_LOG_LEVELS, DATA_DIR
 import os
+import logging
 
+log = logging.getLogger(__name__)
+log.setLevel(SRC_LOG_LEVELS["DB"])
 
 # Check if the file exists
 if os.path.exists(f"{DATA_DIR}/ollama.db"):
     # Rename the file
     os.rename(f"{DATA_DIR}/ollama.db", f"{DATA_DIR}/webui.db")
-    print("File renamed successfully.")
+    log.info("File renamed successfully.")
 else:
     pass
 

+ 2 - 0
backend/apps/web/main.py

@@ -19,6 +19,7 @@ from config import (
     DEFAULT_USER_ROLE,
     ENABLE_SIGNUP,
     USER_PERMISSIONS,
+    WEBHOOK_URL,
 )
 
 app = FastAPI()
@@ -32,6 +33,7 @@ app.state.DEFAULT_MODELS = DEFAULT_MODELS
 app.state.DEFAULT_PROMPT_SUGGESTIONS = DEFAULT_PROMPT_SUGGESTIONS
 app.state.DEFAULT_USER_ROLE = DEFAULT_USER_ROLE
 app.state.USER_PERMISSIONS = USER_PERMISSIONS
+app.state.WEBHOOK_URL = WEBHOOK_URL
 
 
 app.add_middleware(

+ 8 - 2
backend/apps/web/models/auths.py

@@ -2,6 +2,7 @@ from pydantic import BaseModel
 from typing import List, Union, Optional
 import time
 import uuid
+import logging
 from peewee import *
 
 from apps.web.models.users import UserModel, Users
@@ -9,6 +10,11 @@ from utils.utils import verify_password
 
 from apps.web.internal.db import DB
 
+from config import SRC_LOG_LEVELS
+
+log = logging.getLogger(__name__)
+log.setLevel(SRC_LOG_LEVELS["MODELS"])
+
 ####################
 # DB MODEL
 ####################
@@ -86,7 +92,7 @@ class AuthsTable:
     def insert_new_auth(
         self, email: str, password: str, name: str, role: str = "pending"
     ) -> Optional[UserModel]:
-        print("insert_new_auth")
+        log.info("insert_new_auth")
 
         id = str(uuid.uuid4())
 
@@ -103,7 +109,7 @@ class AuthsTable:
             return None
 
     def authenticate_user(self, email: str, password: str) -> Optional[UserModel]:
-        print("authenticate_user", email)
+        log.info(f"authenticate_user: {email}")
         try:
             auth = Auth.get(Auth.email == email, Auth.active == True)
             if auth:

+ 0 - 14
backend/apps/web/models/chats.py

@@ -95,20 +95,6 @@ class ChatTable:
         except:
             return None
 
-    def update_chat_by_id(self, id: str, chat: dict) -> Optional[ChatModel]:
-        try:
-            query = Chat.update(
-                chat=json.dumps(chat),
-                title=chat["title"] if "title" in chat else "New Chat",
-                timestamp=int(time.time()),
-            ).where(Chat.id == id)
-            query.execute()
-
-            chat = Chat.get(Chat.id == id)
-            return ChatModel(**model_to_dict(chat))
-        except:
-            return None
-
     def get_chat_lists_by_user_id(
         self, user_id: str, skip: int = 0, limit: int = 50
     ) -> List[ChatModel]:

+ 8 - 2
backend/apps/web/models/documents.py

@@ -3,6 +3,7 @@ from peewee import *
 from playhouse.shortcuts import model_to_dict
 from typing import List, Union, Optional
 import time
+import logging
 
 from utils.utils import decode_token
 from utils.misc import get_gravatar_url
@@ -11,6 +12,11 @@ from apps.web.internal.db import DB
 
 import json
 
+from config import SRC_LOG_LEVELS
+
+log = logging.getLogger(__name__)
+log.setLevel(SRC_LOG_LEVELS["MODELS"])
+
 ####################
 # Documents DB Schema
 ####################
@@ -118,7 +124,7 @@ class DocumentsTable:
             doc = Document.get(Document.name == form_data.name)
             return DocumentModel(**model_to_dict(doc))
         except Exception as e:
-            print(e)
+            log.exception(e)
             return None
 
     def update_doc_content_by_name(
@@ -138,7 +144,7 @@ class DocumentsTable:
             doc = Document.get(Document.name == name)
             return DocumentModel(**model_to_dict(doc))
         except Exception as e:
-            print(e)
+            log.exception(e)
             return None
 
     def delete_doc_by_name(self, name: str) -> bool:

+ 12 - 12
backend/apps/web/models/modelfiles.py

@@ -64,8 +64,8 @@ class ModelfilesTable:
         self.db.create_tables([Modelfile])
 
     def insert_new_modelfile(
-            self, user_id: str,
-            form_data: ModelfileForm) -> Optional[ModelfileModel]:
+        self, user_id: str, form_data: ModelfileForm
+    ) -> Optional[ModelfileModel]:
         if "tagName" in form_data.modelfile:
             modelfile = ModelfileModel(
                 **{
@@ -73,7 +73,8 @@ class ModelfilesTable:
                     "tag_name": form_data.modelfile["tagName"],
                     "modelfile": json.dumps(form_data.modelfile),
                     "timestamp": int(time.time()),
-                })
+                }
+            )
 
             try:
                 result = Modelfile.create(**modelfile.model_dump())
@@ -87,29 +88,28 @@ class ModelfilesTable:
         else:
             return None
 
-    def get_modelfile_by_tag_name(self,
-                                  tag_name: str) -> Optional[ModelfileModel]:
+    def get_modelfile_by_tag_name(self, tag_name: str) -> Optional[ModelfileModel]:
         try:
             modelfile = Modelfile.get(Modelfile.tag_name == tag_name)
             return ModelfileModel(**model_to_dict(modelfile))
         except:
             return None
 
-    def get_modelfiles(self,
-                       skip: int = 0,
-                       limit: int = 50) -> List[ModelfileResponse]:
+    def get_modelfiles(self, skip: int = 0, limit: int = 50) -> List[ModelfileResponse]:
         return [
             ModelfileResponse(
                 **{
                     **model_to_dict(modelfile),
-                    "modelfile":
-                    json.loads(modelfile.modelfile),
-                }) for modelfile in Modelfile.select()
+                    "modelfile": json.loads(modelfile.modelfile),
+                }
+            )
+            for modelfile in Modelfile.select()
             # .limit(limit).offset(skip)
         ]
 
     def update_modelfile_by_tag_name(
-            self, tag_name: str, modelfile: dict) -> Optional[ModelfileModel]:
+        self, tag_name: str, modelfile: dict
+    ) -> Optional[ModelfileModel]:
         try:
             query = Modelfile.update(
                 modelfile=json.dumps(modelfile),

+ 9 - 6
backend/apps/web/models/prompts.py

@@ -52,8 +52,9 @@ class PromptsTable:
         self.db = db
         self.db.create_tables([Prompt])
 
-    def insert_new_prompt(self, user_id: str,
-                          form_data: PromptForm) -> Optional[PromptModel]:
+    def insert_new_prompt(
+        self, user_id: str, form_data: PromptForm
+    ) -> Optional[PromptModel]:
         prompt = PromptModel(
             **{
                 "user_id": user_id,
@@ -61,7 +62,8 @@ class PromptsTable:
                 "title": form_data.title,
                 "content": form_data.content,
                 "timestamp": int(time.time()),
-            })
+            }
+        )
 
         try:
             result = Prompt.create(**prompt.model_dump())
@@ -81,13 +83,14 @@ class PromptsTable:
 
     def get_prompts(self) -> List[PromptModel]:
         return [
-            PromptModel(**model_to_dict(prompt)) for prompt in Prompt.select()
+            PromptModel(**model_to_dict(prompt))
+            for prompt in Prompt.select()
             # .limit(limit).offset(skip)
         ]
 
     def update_prompt_by_command(
-            self, command: str,
-            form_data: PromptForm) -> Optional[PromptModel]:
+        self, command: str, form_data: PromptForm
+    ) -> Optional[PromptModel]:
         try:
             query = Prompt.update(
                 title=form_data.title,

+ 10 - 4
backend/apps/web/models/tags.py

@@ -6,9 +6,15 @@ from playhouse.shortcuts import model_to_dict
 import json
 import uuid
 import time
+import logging
 
 from apps.web.internal.db import DB
 
+from config import SRC_LOG_LEVELS
+
+log = logging.getLogger(__name__)
+log.setLevel(SRC_LOG_LEVELS["MODELS"])
+
 ####################
 # Tag DB Schema
 ####################
@@ -173,7 +179,7 @@ class TagTable:
                 (ChatIdTag.tag_name == tag_name) & (ChatIdTag.user_id == user_id)
             )
             res = query.execute()  # Remove the rows, return number of rows removed.
-            print(res)
+            log.debug(f"res: {res}")
 
             tag_count = self.count_chat_ids_by_tag_name_and_user_id(tag_name, user_id)
             if tag_count == 0:
@@ -185,7 +191,7 @@ class TagTable:
 
             return True
         except Exception as e:
-            print("delete_tag", e)
+            log.error(f"delete_tag: {e}")
             return False
 
     def delete_tag_by_tag_name_and_chat_id_and_user_id(
@@ -198,7 +204,7 @@ class TagTable:
                 & (ChatIdTag.user_id == user_id)
             )
             res = query.execute()  # Remove the rows, return number of rows removed.
-            print(res)
+            log.debug(f"res: {res}")
 
             tag_count = self.count_chat_ids_by_tag_name_and_user_id(tag_name, user_id)
             if tag_count == 0:
@@ -210,7 +216,7 @@ class TagTable:
 
             return True
         except Exception as e:
-            print("delete_tag", e)
+            log.error(f"delete_tag: {e}")
             return False
 
     def delete_tags_by_chat_id_and_user_id(self, chat_id: str, user_id: str) -> bool:

+ 13 - 1
backend/apps/web/routers/auths.py

@@ -27,7 +27,8 @@ from utils.utils import (
     create_token,
 )
 from utils.misc import parse_duration, validate_email_format
-from constants import ERROR_MESSAGES
+from utils.webhook import post_webhook
+from constants import ERROR_MESSAGES, WEBHOOK_MESSAGES
 
 router = APIRouter()
 
@@ -155,6 +156,17 @@ async def signup(request: Request, form_data: SignupForm):
             )
             # response.set_cookie(key='token', value=token, httponly=True)
 
+            if request.app.state.WEBHOOK_URL:
+                post_webhook(
+                    request.app.state.WEBHOOK_URL,
+                    WEBHOOK_MESSAGES.USER_SIGNUP(user.name),
+                    {
+                        "action": "signup",
+                        "message": WEBHOOK_MESSAGES.USER_SIGNUP(user.name),
+                        "user": user.model_dump_json(exclude_none=True),
+                    },
+                )
+
             return {
                 "token": token,
                 "token_type": "Bearer",

+ 8 - 2
backend/apps/web/routers/chats.py

@@ -5,6 +5,7 @@ from utils.utils import get_current_user, get_admin_user
 from fastapi import APIRouter
 from pydantic import BaseModel
 import json
+import logging
 
 from apps.web.models.users import Users
 from apps.web.models.chats import (
@@ -27,6 +28,11 @@ from apps.web.models.tags import (
 
 from constants import ERROR_MESSAGES
 
+from config import SRC_LOG_LEVELS
+
+log = logging.getLogger(__name__)
+log.setLevel(SRC_LOG_LEVELS["MODELS"])
+
 router = APIRouter()
 
 ############################
@@ -78,7 +84,7 @@ async def create_new_chat(form_data: ChatForm, user=Depends(get_current_user)):
         chat = Chats.insert_new_chat(user.id, form_data)
         return ChatResponse(**{**chat.model_dump(), "chat": json.loads(chat.chat)})
     except Exception as e:
-        print(e)
+        log.exception(e)
         raise HTTPException(
             status_code=status.HTTP_400_BAD_REQUEST, detail=ERROR_MESSAGES.DEFAULT()
         )
@@ -95,7 +101,7 @@ async def get_all_tags(user=Depends(get_current_user)):
         tags = Tags.get_tags_by_user_id(user.id)
         return tags
     except Exception as e:
-        print(e)
+        log.exception(e)
         raise HTTPException(
             status_code=status.HTTP_400_BAD_REQUEST, detail=ERROR_MESSAGES.DEFAULT()
         )

+ 6 - 2
backend/apps/web/routers/configs.py

@@ -10,7 +10,12 @@ import uuid
 
 from apps.web.models.users import Users
 
-from utils.utils import get_password_hash, get_current_user, get_admin_user, create_token
+from utils.utils import (
+    get_password_hash,
+    get_current_user,
+    get_admin_user,
+    create_token,
+)
 from utils.misc import get_gravatar_url, validate_email_format
 from constants import ERROR_MESSAGES
 
@@ -43,7 +48,6 @@ async def set_global_default_models(
     return request.app.state.DEFAULT_MODELS
 
 
-
 @router.post("/default/suggestions", response_model=List[PromptSuggestion])
 async def set_global_default_suggestions(
     request: Request,

+ 24 - 21
backend/apps/web/routers/modelfiles.py

@@ -24,9 +24,9 @@ router = APIRouter()
 
 
 @router.get("/", response_model=List[ModelfileResponse])
-async def get_modelfiles(skip: int = 0,
-                         limit: int = 50,
-                         user=Depends(get_current_user)):
+async def get_modelfiles(
+    skip: int = 0, limit: int = 50, user=Depends(get_current_user)
+):
     return Modelfiles.get_modelfiles(skip, limit)
 
 
@@ -36,17 +36,16 @@ async def get_modelfiles(skip: int = 0,
 
 
 @router.post("/create", response_model=Optional[ModelfileResponse])
-async def create_new_modelfile(form_data: ModelfileForm,
-                               user=Depends(get_admin_user)):
+async def create_new_modelfile(form_data: ModelfileForm, user=Depends(get_admin_user)):
     modelfile = Modelfiles.insert_new_modelfile(user.id, form_data)
 
     if modelfile:
         return ModelfileResponse(
             **{
                 **modelfile.model_dump(),
-                "modelfile":
-                json.loads(modelfile.modelfile),
-            })
+                "modelfile": json.loads(modelfile.modelfile),
+            }
+        )
     else:
         raise HTTPException(
             status_code=status.HTTP_401_UNAUTHORIZED,
@@ -60,17 +59,18 @@ async def create_new_modelfile(form_data: ModelfileForm,
 
 
 @router.post("/", response_model=Optional[ModelfileResponse])
-async def get_modelfile_by_tag_name(form_data: ModelfileTagNameForm,
-                                    user=Depends(get_current_user)):
+async def get_modelfile_by_tag_name(
+    form_data: ModelfileTagNameForm, user=Depends(get_current_user)
+):
     modelfile = Modelfiles.get_modelfile_by_tag_name(form_data.tag_name)
 
     if modelfile:
         return ModelfileResponse(
             **{
                 **modelfile.model_dump(),
-                "modelfile":
-                json.loads(modelfile.modelfile),
-            })
+                "modelfile": json.loads(modelfile.modelfile),
+            }
+        )
     else:
         raise HTTPException(
             status_code=status.HTTP_401_UNAUTHORIZED,
@@ -84,8 +84,9 @@ async def get_modelfile_by_tag_name(form_data: ModelfileTagNameForm,
 
 
 @router.post("/update", response_model=Optional[ModelfileResponse])
-async def update_modelfile_by_tag_name(form_data: ModelfileUpdateForm,
-                                       user=Depends(get_admin_user)):
+async def update_modelfile_by_tag_name(
+    form_data: ModelfileUpdateForm, user=Depends(get_admin_user)
+):
     modelfile = Modelfiles.get_modelfile_by_tag_name(form_data.tag_name)
     if modelfile:
         updated_modelfile = {
@@ -94,14 +95,15 @@ async def update_modelfile_by_tag_name(form_data: ModelfileUpdateForm,
         }
 
         modelfile = Modelfiles.update_modelfile_by_tag_name(
-            form_data.tag_name, updated_modelfile)
+            form_data.tag_name, updated_modelfile
+        )
 
         return ModelfileResponse(
             **{
                 **modelfile.model_dump(),
-                "modelfile":
-                json.loads(modelfile.modelfile),
-            })
+                "modelfile": json.loads(modelfile.modelfile),
+            }
+        )
     else:
         raise HTTPException(
             status_code=status.HTTP_401_UNAUTHORIZED,
@@ -115,7 +117,8 @@ async def update_modelfile_by_tag_name(form_data: ModelfileUpdateForm,
 
 
 @router.delete("/delete", response_model=bool)
-async def delete_modelfile_by_tag_name(form_data: ModelfileTagNameForm,
-                                       user=Depends(get_admin_user)):
+async def delete_modelfile_by_tag_name(
+    form_data: ModelfileTagNameForm, user=Depends(get_admin_user)
+):
     result = Modelfiles.delete_modelfile_by_tag_name(form_data.tag_name)
     return result

+ 7 - 1
backend/apps/web/routers/users.py

@@ -7,6 +7,7 @@ from fastapi import APIRouter
 from pydantic import BaseModel
 import time
 import uuid
+import logging
 
 from apps.web.models.users import UserModel, UserUpdateForm, UserRoleUpdateForm, Users
 from apps.web.models.auths import Auths
@@ -14,6 +15,11 @@ from apps.web.models.auths import Auths
 from utils.utils import get_current_user, get_password_hash, get_admin_user
 from constants import ERROR_MESSAGES
 
+from config import SRC_LOG_LEVELS
+
+log = logging.getLogger(__name__)
+log.setLevel(SRC_LOG_LEVELS["MODELS"])
+
 router = APIRouter()
 
 ############################
@@ -83,7 +89,7 @@ async def update_user_by_id(
 
         if form_data.password:
             hashed = get_password_hash(form_data.password)
-            print(hashed)
+            log.debug(f"hashed: {hashed}")
             Auths.update_user_password_by_id(user_id, hashed)
 
         Auths.update_email_by_id(user_id, form_data.email.lower())

+ 0 - 149
backend/apps/web/routers/utils.py

@@ -21,155 +21,6 @@ from constants import ERROR_MESSAGES
 router = APIRouter()
 
 
-class UploadBlobForm(BaseModel):
-    filename: str
-
-
-from urllib.parse import urlparse
-
-
-def parse_huggingface_url(hf_url):
-    try:
-        # Parse the URL
-        parsed_url = urlparse(hf_url)
-
-        # Get the path and split it into components
-        path_components = parsed_url.path.split("/")
-
-        # Extract the desired output
-        user_repo = "/".join(path_components[1:3])
-        model_file = path_components[-1]
-
-        return model_file
-    except ValueError:
-        return None
-
-
-async def download_file_stream(url, file_path, file_name, chunk_size=1024 * 1024):
-    done = False
-
-    if os.path.exists(file_path):
-        current_size = os.path.getsize(file_path)
-    else:
-        current_size = 0
-
-    headers = {"Range": f"bytes={current_size}-"} if current_size > 0 else {}
-
-    timeout = aiohttp.ClientTimeout(total=600)  # Set the timeout
-
-    async with aiohttp.ClientSession(timeout=timeout) as session:
-        async with session.get(url, headers=headers) as response:
-            total_size = int(response.headers.get("content-length", 0)) + current_size
-
-            with open(file_path, "ab+") as file:
-                async for data in response.content.iter_chunked(chunk_size):
-                    current_size += len(data)
-                    file.write(data)
-
-                    done = current_size == total_size
-                    progress = round((current_size / total_size) * 100, 2)
-                    yield f'data: {{"progress": {progress}, "completed": {current_size}, "total": {total_size}}}\n\n'
-
-                if done:
-                    file.seek(0)
-                    hashed = calculate_sha256(file)
-                    file.seek(0)
-
-                    url = f"{OLLAMA_BASE_URLS[0]}/blobs/sha256:{hashed}"
-                    response = requests.post(url, data=file)
-
-                    if response.ok:
-                        res = {
-                            "done": done,
-                            "blob": f"sha256:{hashed}",
-                            "name": file_name,
-                        }
-                        os.remove(file_path)
-
-                        yield f"data: {json.dumps(res)}\n\n"
-                    else:
-                        raise "Ollama: Could not create blob, Please try again."
-
-
-@router.get("/download")
-async def download(
-    url: str,
-):
-    # url = "https://huggingface.co/TheBloke/stablelm-zephyr-3b-GGUF/resolve/main/stablelm-zephyr-3b.Q2_K.gguf"
-    file_name = parse_huggingface_url(url)
-
-    if file_name:
-        file_path = f"{UPLOAD_DIR}/{file_name}"
-
-        return StreamingResponse(
-            download_file_stream(url, file_path, file_name),
-            media_type="text/event-stream",
-        )
-    else:
-        return None
-
-
-@router.post("/upload")
-def upload(file: UploadFile = File(...)):
-    file_path = f"{UPLOAD_DIR}/{file.filename}"
-
-    # Save file in chunks
-    with open(file_path, "wb+") as f:
-        for chunk in file.file:
-            f.write(chunk)
-
-    def file_process_stream():
-        total_size = os.path.getsize(file_path)
-        chunk_size = 1024 * 1024
-        try:
-            with open(file_path, "rb") as f:
-                total = 0
-                done = False
-
-                while not done:
-                    chunk = f.read(chunk_size)
-                    if not chunk:
-                        done = True
-                        continue
-
-                    total += len(chunk)
-                    progress = round((total / total_size) * 100, 2)
-
-                    res = {
-                        "progress": progress,
-                        "total": total_size,
-                        "completed": total,
-                    }
-                    yield f"data: {json.dumps(res)}\n\n"
-
-                if done:
-                    f.seek(0)
-                    hashed = calculate_sha256(f)
-                    f.seek(0)
-
-                    url = f"{OLLAMA_BASE_URLS[0]}/blobs/sha256:{hashed}"
-                    response = requests.post(url, data=f)
-
-                    if response.ok:
-                        res = {
-                            "done": done,
-                            "blob": f"sha256:{hashed}",
-                            "name": file.filename,
-                        }
-                        os.remove(file_path)
-                        yield f"data: {json.dumps(res)}\n\n"
-                    else:
-                        raise Exception(
-                            "Ollama: Could not create blob, Please try again."
-                        )
-
-        except Exception as e:
-            res = {"error": str(e)}
-            yield f"data: {json.dumps(res)}\n\n"
-
-    return StreamingResponse(file_process_stream(), media_type="text/event-stream")
-
-
 @router.get("/gravatar")
 async def get_gravatar(
     email: str,

+ 66 - 10
backend/config.py

@@ -1,4 +1,6 @@
 import os
+import sys
+import logging
 import chromadb
 from chromadb import Settings
 from base64 import b64encode
@@ -21,9 +23,10 @@ try:
 
     load_dotenv(find_dotenv("../.env"))
 except ImportError:
-    print("dotenv not installed, skipping...")
+    log.warning("dotenv not installed, skipping...")
 
 WEBUI_NAME = "Open WebUI"
+WEBUI_FAVICON_URL = "https://openwebui.com/favicon.png"
 shutil.copyfile("../build/favicon.png", "./static/favicon.png")
 
 ####################################
@@ -100,6 +103,47 @@ for version in soup.find_all("h2"):
 CHANGELOG = changelog_json
 
 
+####################################
+# LOGGING
+####################################
+log_levels = ["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"]
+
+GLOBAL_LOG_LEVEL = os.environ.get("GLOBAL_LOG_LEVEL", "").upper()
+if GLOBAL_LOG_LEVEL in log_levels:
+    logging.basicConfig(stream=sys.stdout, level=GLOBAL_LOG_LEVEL, force=True)
+else:
+    GLOBAL_LOG_LEVEL = "INFO"
+
+log = logging.getLogger(__name__)
+log.info(f"GLOBAL_LOG_LEVEL: {GLOBAL_LOG_LEVEL}")
+
+log_sources = [
+    "AUDIO",
+    "COMFYUI",
+    "CONFIG",
+    "DB",
+    "IMAGES",
+    "LITELLM",
+    "MAIN",
+    "MODELS",
+    "OLLAMA",
+    "OPENAI",
+    "RAG",
+    "WEBHOOK",
+]
+
+SRC_LOG_LEVELS = {}
+
+for source in log_sources:
+    log_env_var = source + "_LOG_LEVEL"
+    SRC_LOG_LEVELS[source] = os.environ.get(log_env_var, "").upper()
+    if SRC_LOG_LEVELS[source] not in log_levels:
+        SRC_LOG_LEVELS[source] = GLOBAL_LOG_LEVEL
+    log.info(f"{log_env_var}: {SRC_LOG_LEVELS[source]}")
+
+log.setLevel(SRC_LOG_LEVELS["CONFIG"])
+
+
 ####################################
 # CUSTOM_NAME
 ####################################
@@ -111,7 +155,7 @@ if CUSTOM_NAME:
         data = r.json()
         if r.ok:
             if "logo" in data:
-                url = (
+                WEBUI_FAVICON_URL = url = (
                     f"https://api.openwebui.com{data['logo']}"
                     if data["logo"][0] == "/"
                     else data["logo"]
@@ -125,7 +169,7 @@ if CUSTOM_NAME:
 
             WEBUI_NAME = data["name"]
     except Exception as e:
-        print(e)
+        log.exception(e)
         pass
 
 
@@ -194,9 +238,9 @@ def create_config_file(file_path):
 LITELLM_CONFIG_PATH = f"{DATA_DIR}/litellm/config.yaml"
 
 if not os.path.exists(LITELLM_CONFIG_PATH):
-    print("Config file doesn't exist. Creating...")
+    log.info("Config file doesn't exist. Creating...")
     create_config_file(LITELLM_CONFIG_PATH)
-    print("Config file created successfully.")
+    log.info("Config file created successfully.")
 
 
 ####################################
@@ -209,7 +253,7 @@ OLLAMA_API_BASE_URL = os.environ.get(
 
 OLLAMA_BASE_URL = os.environ.get("OLLAMA_BASE_URL", "")
 INCLUDE_OLLAMA = os.environ.get("INCLUDE_OLLAMA_ENV", "false")
-
+K8S_FLAG = os.environ.get("K8S_FLAG", "")
 
 if OLLAMA_BASE_URL == "" and OLLAMA_API_BASE_URL != "":
     OLLAMA_BASE_URL = (
@@ -227,6 +271,9 @@ if ENV == "prod":
         else:    
             OLLAMA_BASE_URL = "http://host.docker.internal:11434"
 
+    elif K8S_FLAG:
+        OLLAMA_BASE_URL = "http://ollama-service.open-webui.svc.cluster.local:11434"
+
 
 OLLAMA_BASE_URLS = os.environ.get("OLLAMA_BASE_URLS", "")
 OLLAMA_BASE_URLS = OLLAMA_BASE_URLS if OLLAMA_BASE_URLS != "" else OLLAMA_BASE_URL
@@ -256,8 +303,10 @@ OPENAI_API_BASE_URLS = (
     OPENAI_API_BASE_URLS if OPENAI_API_BASE_URLS != "" else OPENAI_API_BASE_URL
 )
 
-OPENAI_API_BASE_URLS = [url.strip() for url in OPENAI_API_BASE_URLS.split(";")]
-
+OPENAI_API_BASE_URLS = [
+    url.strip() if url != "" else "https://api.openai.com/v1"
+    for url in OPENAI_API_BASE_URLS.split(";")
+]
 
 ####################################
 # WEBUI
@@ -294,13 +343,19 @@ DEFAULT_PROMPT_SUGGESTIONS = (
 
 
 DEFAULT_USER_ROLE = os.getenv("DEFAULT_USER_ROLE", "pending")
-USER_PERMISSIONS = {"chat": {"deletion": True}}
+
+USER_PERMISSIONS_CHAT_DELETION = (
+    os.environ.get("USER_PERMISSIONS_CHAT_DELETION", "True").lower() == "true"
+)
+
+USER_PERMISSIONS = {"chat": {"deletion": USER_PERMISSIONS_CHAT_DELETION}}
 
 
-MODEL_FILTER_ENABLED = os.environ.get("MODEL_FILTER_ENABLED", False)
+MODEL_FILTER_ENABLED = os.environ.get("MODEL_FILTER_ENABLED", "False").lower() == "true"
 MODEL_FILTER_LIST = os.environ.get("MODEL_FILTER_LIST", "")
 MODEL_FILTER_LIST = [model.strip() for model in MODEL_FILTER_LIST.split(";")]
 
+WEBHOOK_URL = os.environ.get("WEBHOOK_URL", "")
 
 ####################################
 # WEBUI_VERSION
@@ -385,3 +440,4 @@ WHISPER_MODEL_DIR = os.getenv("WHISPER_MODEL_DIR", f"{CACHE_DIR}/whisper/models"
 ####################################
 
 AUTOMATIC1111_BASE_URL = os.getenv("AUTOMATIC1111_BASE_URL", "")
+COMFYUI_BASE_URL = os.getenv("COMFYUI_BASE_URL", "")

+ 11 - 1
backend/constants.py

@@ -5,6 +5,13 @@ class MESSAGES(str, Enum):
     DEFAULT = lambda msg="": f"{msg if msg else ''}"
 
 
+class WEBHOOK_MESSAGES(str, Enum):
+    DEFAULT = lambda msg="": f"{msg if msg else ''}"
+    USER_SIGNUP = lambda username="": (
+        f"New user signed up: {username}" if username else "New user signed up"
+    )
+
+
 class ERROR_MESSAGES(str, Enum):
     def __str__(self) -> str:
         return super().__str__()
@@ -46,9 +53,12 @@ class ERROR_MESSAGES(str, Enum):
 
     PANDOC_NOT_INSTALLED = "Pandoc is not installed on the server. Please contact your administrator for assistance."
     INCORRECT_FORMAT = (
-        lambda err="": f"Invalid format. Please use the correct format{err if err else ''}"
+        lambda err="": f"Invalid format. Please use the correct format{err}"
     )
     RATE_LIMIT_EXCEEDED = "API rate limit exceeded"
 
     MODEL_NOT_FOUND = lambda name="": f"Model '{name}' was not found"
     OPENAI_NOT_FOUND = lambda name="": f"OpenAI API was not found"
+    OLLAMA_NOT_FOUND = "WebUI could not connect to Ollama"
+
+    EMPTY_CONTENT = "The content provided is empty. Please ensure that there is text or data present before proceeding."

+ 23 - 34
backend/data/config.json

@@ -1,35 +1,24 @@
 {
-    "version": "0.0.1",
-    "ui": {
-        "prompt_suggestions": [
-            {
-                "title": [
-                    "Help me study",
-                    "vocabulary for a college entrance exam"
-                ],
-                "content": "Help me study vocabulary: write a sentence for me to fill in the blank, and I'll try to pick the correct option."
-            },
-            {
-                "title": [
-                    "Give me ideas",
-                    "for what to do with my kids' art"
-                ],
-                "content": "What are 5 creative things I could do with my kids' art? I don't want to throw them away, but it's also so much clutter."
-            },
-            {
-                "title": [
-                    "Tell me a fun fact",
-                    "about the Roman Empire"
-                ],
-                "content": "Tell me a random fun fact about the Roman Empire"
-            },
-            {
-                "title": [
-                    "Show me a code snippet",
-                    "of a website's sticky header"
-                ],
-                "content": "Show me a code snippet of a website's sticky header in CSS and JavaScript."
-            }
-        ]
-    }
-}
+	"version": 0,
+	"ui": {
+		"default_locale": "en-US",
+		"prompt_suggestions": [
+			{
+				"title": ["Help me study", "vocabulary for a college entrance exam"],
+				"content": "Help me study vocabulary: write a sentence for me to fill in the blank, and I'll try to pick the correct option."
+			},
+			{
+				"title": ["Give me ideas", "for what to do with my kids' art"],
+				"content": "What are 5 creative things I could do with my kids' art? I don't want to throw them away, but it's also so much clutter."
+			},
+			{
+				"title": ["Tell me a fun fact", "about the Roman Empire"],
+				"content": "Tell me a random fun fact about the Roman Empire"
+			},
+			{
+				"title": ["Show me a code snippet", "of a website's sticky header"],
+				"content": "Show me a code snippet of a website's sticky header in CSS and JavaScript."
+			}
+		]
+	}
+}

+ 48 - 7
backend/main.py

@@ -4,6 +4,7 @@ import markdown
 import time
 import os
 import sys
+import logging
 import requests
 
 from fastapi import FastAPI, Request, Depends, status
@@ -31,6 +32,7 @@ from utils.utils import get_admin_user
 from apps.rag.utils import rag_messages
 
 from config import (
+    CONFIG_DATA,
     WEBUI_NAME,
     ENV,
     VERSION,
@@ -38,9 +40,16 @@ from config import (
     FRONTEND_BUILD_DIR,
     MODEL_FILTER_ENABLED,
     MODEL_FILTER_LIST,
+    GLOBAL_LOG_LEVEL,
+    SRC_LOG_LEVELS,
+    WEBHOOK_URL,
 )
 from constants import ERROR_MESSAGES
 
+logging.basicConfig(stream=sys.stdout, level=GLOBAL_LOG_LEVEL)
+log = logging.getLogger(__name__)
+log.setLevel(SRC_LOG_LEVELS["MAIN"])
+
 
 class SPAStaticFiles(StaticFiles):
     async def get_response(self, path: str, scope):
@@ -58,6 +67,9 @@ app = FastAPI(docs_url="/docs" if ENV == "dev" else None, redoc_url=None)
 app.state.MODEL_FILTER_ENABLED = MODEL_FILTER_ENABLED
 app.state.MODEL_FILTER_LIST = MODEL_FILTER_LIST
 
+app.state.WEBHOOK_URL = WEBHOOK_URL
+
+
 origins = ["*"]
 
 
@@ -66,7 +78,7 @@ class RAGMiddleware(BaseHTTPMiddleware):
         if request.method == "POST" and (
             "/api/chat" in request.url.path or "/chat/completions" in request.url.path
         ):
-            print(request.url.path)
+            log.debug(f"request.url.path: {request.url.path}")
 
             # Read the original request body
             body = await request.body()
@@ -78,7 +90,6 @@ class RAGMiddleware(BaseHTTPMiddleware):
             # Example: Add a new key-value pair or modify existing ones
             # data["modified"] = True  # Example modification
             if "docs" in data:
-
                 data = {**data}
                 data["messages"] = rag_messages(
                     data["docs"],
@@ -89,7 +100,7 @@ class RAGMiddleware(BaseHTTPMiddleware):
                 )
                 del data["docs"]
 
-                print(data["messages"])
+                log.debug(f"data['messages']: {data['messages']}")
 
             modified_body_bytes = json.dumps(data).encode("utf-8")
 
@@ -153,11 +164,18 @@ app.mount("/rag/api/v1", rag_app)
 
 @app.get("/api/config")
 async def get_app_config():
+    # Checking and Handling the Absence of 'ui' in CONFIG_DATA
+
+    default_locale = "en-US"
+    if "ui" in CONFIG_DATA:
+        default_locale = CONFIG_DATA["ui"].get("default_locale", "en-US")
 
+    # The Rest of the Function Now Uses the Variables Defined Above
     return {
         "status": True,
         "name": WEBUI_NAME,
         "version": VERSION,
+        "default_locale": default_locale,
         "images": images_app.state.ENABLED,
         "default_models": webui_app.state.DEFAULT_MODELS,
         "default_prompt_suggestions": webui_app.state.DEFAULT_PROMPT_SUGGESTIONS,
@@ -178,10 +196,9 @@ class ModelFilterConfigForm(BaseModel):
 
 
 @app.post("/api/config/model/filter")
-async def get_model_filter_config(
+async def update_model_filter_config(
     form_data: ModelFilterConfigForm, user=Depends(get_admin_user)
 ):
-
     app.state.MODEL_FILTER_ENABLED = form_data.enabled
     app.state.MODEL_FILTER_LIST = form_data.models
 
@@ -191,15 +208,39 @@ async def get_model_filter_config(
     openai_app.state.MODEL_FILTER_ENABLED = app.state.MODEL_FILTER_ENABLED
     openai_app.state.MODEL_FILTER_LIST = app.state.MODEL_FILTER_LIST
 
+    litellm_app.state.MODEL_FILTER_ENABLED = app.state.MODEL_FILTER_ENABLED
+    litellm_app.state.MODEL_FILTER_LIST = app.state.MODEL_FILTER_LIST
+
     return {
         "enabled": app.state.MODEL_FILTER_ENABLED,
         "models": app.state.MODEL_FILTER_LIST,
     }
 
 
+@app.get("/api/webhook")
+async def get_webhook_url(user=Depends(get_admin_user)):
+    return {
+        "url": app.state.WEBHOOK_URL,
+    }
+
+
+class UrlForm(BaseModel):
+    url: str
+
+
+@app.post("/api/webhook")
+async def update_webhook_url(form_data: UrlForm, user=Depends(get_admin_user)):
+    app.state.WEBHOOK_URL = form_data.url
+
+    webui_app.state.WEBHOOK_URL = app.state.WEBHOOK_URL
+
+    return {
+        "url": app.state.WEBHOOK_URL,
+    }
+
+
 @app.get("/api/version")
 async def get_app_config():
-
     return {
         "version": VERSION,
     }
@@ -207,7 +248,7 @@ async def get_app_config():
 
 @app.get("/api/changelog")
 async def get_app_changelog():
-    return CHANGELOG
+    return {key: CHANGELOG[key] for idx, key in enumerate(CHANGELOG) if idx < 5}
 
 
 @app.get("/api/version/updates")

+ 1 - 0
backend/requirements.txt

@@ -45,3 +45,4 @@ PyJWT
 pyjwt[crypto]
 
 black
+langfuse

+ 5 - 0
backend/start.sh

@@ -28,4 +28,9 @@ if [ "$INCLUDE_OLLAMA" = "true" ]; then
     ollama serve &
 fi
 
+if [ "$USE_CUDA_DOCKER" = "true" ]; then
+    echo "CUDA is enabled, appending LD_LIBRARY_PATH to include torch/cudnn & cublas libraries."
+    export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/local/lib/python3.11/site-packages/torch/lib:/usr/local/lib/python3.11/site-packages/nvidia/cudnn/lib"
+fi
+
 WEBUI_SECRET_KEY="$WEBUI_SECRET_KEY" exec uvicorn main:app --host 0.0.0.0 --port "$PORT" --forwarded-allow-ips '*'

+ 54 - 0
backend/utils/webhook.py

@@ -0,0 +1,54 @@
+import json
+import requests
+import logging
+
+from config import SRC_LOG_LEVELS, VERSION, WEBUI_FAVICON_URL, WEBUI_NAME
+
+log = logging.getLogger(__name__)
+log.setLevel(SRC_LOG_LEVELS["WEBHOOK"])
+
+
+def post_webhook(url: str, message: str, event_data: dict) -> bool:
+    try:
+        payload = {}
+
+        # Slack and Google Chat Webhooks
+        if "https://hooks.slack.com" in url or "https://chat.googleapis.com" in url:
+            payload["text"] = message
+        # Discord Webhooks
+        elif "https://discord.com/api/webhooks" in url:
+            payload["content"] = message
+        # Microsoft Teams Webhooks
+        elif "webhook.office.com" in url:
+            action = event_data.get("action", "undefined")
+            facts = [
+                {"name": name, "value": value}
+                for name, value in json.loads(event_data.get("user", {})).items()
+            ]
+            payload = {
+                "@type": "MessageCard",
+                "@context": "http://schema.org/extensions",
+                "themeColor": "0076D7",
+                "summary": message,
+                "sections": [
+                    {
+                        "activityTitle": message,
+                        "activitySubtitle": f"{WEBUI_NAME} ({VERSION}) - {action}",
+                        "activityImage": WEBUI_FAVICON_URL,
+                        "facts": facts,
+                        "markdown": True,
+                    }
+                ],
+            }
+        # Default Payload
+        else:
+            payload = {**event_data}
+
+        log.debug(f"payload: {payload}")
+        r = requests.post(url, json=payload)
+        r.raise_for_status()
+        log.debug(f"r.text: {r.text}")
+        return True
+    except Exception as e:
+        log.exception(e)
+        return False

BIN
demo.gif


+ 12 - 0
docs/CONTRIBUTING.md

@@ -50,6 +50,18 @@ We welcome pull requests. Before submitting one, please:
 
 Help us make Open WebUI more accessible by improving documentation, writing tutorials, or creating guides on setting up and optimizing the web UI.
 
+### 🌐 Translations and Internationalization
+
+Help us make Open WebUI available to a wider audience. In this section, we'll guide you through the process of adding new translations to the project.
+
+We use JSON files to store translations. You can find the existing translation files in the `src/lib/i18n/locales` directory. Each directory corresponds to a specific language, for example, `en-US` for English (US), `fr-FR` for French (France) and so on. You can refer to [ISO 639 Language Codes][http://www.lingoes.net/en/translator/langcode.htm] to find the appropriate code for a specific language.
+
+To add a new language:
+
+- Create a new directory in the `src/lib/i18n/locales` path with the appropriate language code as its name. For instance, if you're adding translations for Spanish (Spain), create a new directory named `es-ES`.
+- Copy the American English translation file(s) (from `en-US` directory in `src/lib/i18n/locale`) to this new directory and update the string values in JSON format according to your language. Make sure to preserve the structure of the JSON object.
+- Add the language code and its respective title to languages file at `src/lib/i18n/locales/languages.json`.
+
 ### 🤔 Questions & Feedback
 
 Got questions or feedback? Join our [Discord community](https://discord.gg/5rJgQTnV4s) or open an issue. We're here to help!

+ 38 - 0
i18next-parser.config.ts

@@ -0,0 +1,38 @@
+// i18next-parser.config.ts
+import { getLanguages } from './src/lib/i18n/index.ts';
+
+const getLangCodes = async () => {
+	const languages = await getLanguages();
+	return languages.map((l) => l.code);
+};
+
+export default {
+	contextSeparator: '_',
+	createOldCatalogs: false,
+	defaultNamespace: 'translation',
+	defaultValue: '',
+	indentation: 2,
+	keepRemoved: false,
+	keySeparator: false,
+	lexers: {
+		svelte: ['JavascriptLexer'],
+		js: ['JavascriptLexer'],
+		ts: ['JavascriptLexer'],
+
+		default: ['JavascriptLexer']
+	},
+	lineEnding: 'auto',
+	locales: await getLangCodes(),
+	namespaceSeparator: false,
+	output: 'src/lib/i18n/locales/$LOCALE/$NAMESPACE.json',
+	pluralSeparator: '_',
+	input: 'src/**/*.{js,svelte}',
+	sort: true,
+	verbose: true,
+	failOnWarnings: false,
+	failOnUpdate: false,
+	customValueTemplate: null,
+	resetDefaultValueLocale: null,
+	i18nextOptions: null,
+	yamlOptions: null
+};

+ 1 - 1
kubernetes/helm/templates/ollama-statefulset.yaml

@@ -88,7 +88,7 @@ spec:
       resources:
         requests:
           storage: {{ .Values.ollama.persistence.size | quote }}
-      storageClass: {{ .Values.ollama.persistence.storageClass }}
+      storageClassName: {{ .Values.ollama.persistence.storageClass }}
       {{- with .Values.ollama.persistence.selector }}
       selector:
         {{- toYaml . | nindent 8 }}

+ 1 - 1
kubernetes/helm/templates/webui-pvc.yaml

@@ -17,7 +17,7 @@ spec:
   resources:
     requests:
       storage: {{ .Values.webui.persistence.size }}
-  storageClass: {{ .Values.webui.persistence.storageClass }}
+  storageClassName: {{ .Values.webui.persistence.storageClass }}
   {{- with .Values.webui.persistence.selector }}
   selector:
     {{- toYaml . | nindent 4 }}

+ 11 - 6
kubernetes/helm/templates/webui-service.yaml

@@ -4,6 +4,9 @@ metadata:
   name: {{ include "open-webui.name" . }}
   labels:
     {{- include "open-webui.labels" . | nindent 4 }}
+    {{- with .Values.webui.service.labels }}
+    {{- toYaml . | nindent 4 }}
+    {{- end }}
   {{- with .Values.webui.service.annotations }}
   annotations:
     {{- toYaml . | nindent 4 }}
@@ -11,14 +14,16 @@ metadata:
 spec:
   selector:
     {{- include "open-webui.selectorLabels" . | nindent 4 }}
-{{- with .Values.webui.service }}
-  type: {{ .type }}
+  type: {{ .Values.webui.service.type | default "ClusterIP" }}
   ports:
   - protocol: TCP
     name: http
-    port: {{ .port }}
+    port: {{ .Values.webui.service.port }}
     targetPort: http
-    {{- if .nodePort }}
-    nodePort: {{ .nodePort | int }}
+    {{- if .Values.webui.service.nodePort }}
+    nodePort: {{ .Values.webui.service.nodePort | int }}
     {{- end }}
-{{- end }}
+  {{- if .Values.webui.service.loadBalancerClass }}
+  loadBalancerClass: {{ .Values.webui.service.loadBalancerClass | quote }}
+  {{- end }}
+

+ 2 - 0
kubernetes/helm/values.yaml

@@ -70,3 +70,5 @@ webui:
     port: 80
     containerPort: 8080
     nodePort: ""
+    labels: {}
+    loadBalancerClass: "" 

+ 1 - 1
kubernetes/manifest/base/webui-deployment.yaml

@@ -35,4 +35,4 @@ spec:
       volumes:
       - name: webui-volume
         persistentVolumeClaim:
-          claimName: ollama-webui-pvc          
+          claimName: open-webui-pvc          

+ 2 - 2
kubernetes/manifest/base/webui-pvc.yaml

@@ -2,8 +2,8 @@ apiVersion: v1
 kind: PersistentVolumeClaim
 metadata:
   labels:
-    app: ollama-webui
-  name: ollama-webui-pvc
+    app: open-webui
+  name: open-webui-pvc
   namespace: open-webui
 spec:
   accessModes: ["ReadWriteOnce"]

Những thai đổi đã bị hủy bỏ vì nó quá lớn
+ 2529 - 2511
package-lock.json


+ 9 - 3
package.json

@@ -1,6 +1,6 @@
 {
 	"name": "open-webui",
-	"version": "0.1.111",
+	"version": "0.1.116",
 	"private": true,
 	"scripts": {
 		"dev": "vite dev --host",
@@ -13,7 +13,8 @@
 		"lint:types": "npm run check",
 		"lint:backend": "pylint backend/",
 		"format": "prettier --plugin-search-dir --write '**/*.{js,ts,svelte,css,md,html,json}'",
-		"format:backend": "yapf --recursive backend -p -i"
+		"format:backend": "black . --exclude \"/venv/\"",
+		"i18n:parse": "i18next --config i18next-parser.config.ts && prettier --write 'src/lib/i18n/**/*.{js,json}'"
 	},
 	"devDependencies": {
 		"@sveltejs/adapter-auto": "^2.0.0",
@@ -27,6 +28,7 @@
 		"eslint": "^8.56.0",
 		"eslint-config-prettier": "^8.5.0",
 		"eslint-plugin-svelte": "^2.30.0",
+		"i18next-parser": "^8.13.0",
 		"postcss": "^8.4.31",
 		"prettier": "^2.8.0",
 		"prettier-plugin-svelte": "^2.10.1",
@@ -42,9 +44,13 @@
 	"dependencies": {
 		"@sveltejs/adapter-node": "^1.3.1",
 		"async": "^3.2.5",
+		"bits-ui": "^0.19.7",
 		"dayjs": "^1.11.10",
 		"file-saver": "^2.0.5",
 		"highlight.js": "^11.9.0",
+		"i18next": "^23.10.0",
+		"i18next-browser-languagedetector": "^7.2.0",
+		"i18next-resources-to-backend": "^1.2.0",
 		"idb": "^7.1.1",
 		"js-sha256": "^0.10.1",
 		"katex": "^0.16.9",
@@ -53,4 +59,4 @@
 		"tippy.js": "^6.3.7",
 		"uuid": "^9.0.1"
 	}
-}
+}

+ 4 - 0
src/app.css

@@ -78,3 +78,7 @@ select {
 	/* for Chrome */
 	-webkit-appearance: none;
 }
+
+.katex-mathml {
+	display: none;
+}

+ 32 - 11
src/app.html

@@ -8,18 +8,39 @@
 		<meta name="robots" content="noindex,nofollow" />
 		<script>
 			// On page load or when changing themes, best to add inline in `head` to avoid FOUC
-			if (
-				localStorage.theme === 'light' ||
-				(!('theme' in localStorage) && window.matchMedia('(prefers-color-scheme: light)').matches)
-			) {
-				document.documentElement.classList.add('light');
-			} else if (localStorage.theme) {
-				localStorage.theme.split(' ').forEach((e) => {
-					document.documentElement.classList.add(e);
+			(() => {
+				if (localStorage?.theme && localStorage?.theme.includes('oled')) {
+					document.documentElement.style.setProperty('--color-gray-900', '#000000');
+					document.documentElement.style.setProperty('--color-gray-950', '#000000');
+					document.documentElement.classList.add('dark');
+				} else if (
+					localStorage.theme === 'light' ||
+					(!('theme' in localStorage) && window.matchMedia('(prefers-color-scheme: light)').matches)
+				) {
+					document.documentElement.classList.add('light');
+				} else if (localStorage.theme && localStorage.theme !== 'system') {
+					localStorage.theme.split(' ').forEach((e) => {
+						document.documentElement.classList.add(e);
+					});
+				} else if (localStorage.theme && localStorage.theme === 'system') {
+					systemTheme = window.matchMedia('(prefers-color-scheme: dark)').matches;
+					document.documentElement.classList.add(systemTheme ? 'dark' : 'light');
+				} else {
+					document.documentElement.classList.add('dark');
+				}
+
+				window.matchMedia('(prefers-color-scheme: dark)').addListener((e) => {
+					if (localStorage.theme === 'system') {
+						if (e.matches) {
+							document.documentElement.classList.add('dark');
+							document.documentElement.classList.remove('light');
+						} else {
+							document.documentElement.classList.add('light');
+							document.documentElement.classList.remove('dark');
+						}
+					}
 				});
-			} else {
-				document.documentElement.classList.add('dark');
-			}
+			})();
 		</script>
 
 		%sveltekit.head%

+ 5 - 5
src/lib/apis/images/index.ts

@@ -139,7 +139,7 @@ export const updateOpenAIKey = async (token: string = '', key: string) => {
 	return res.OPENAI_API_KEY;
 };
 
-export const getAUTOMATIC1111Url = async (token: string = '') => {
+export const getImageGenerationEngineUrls = async (token: string = '') => {
 	let error = null;
 
 	const res = await fetch(`${IMAGES_API_BASE_URL}/url`, {
@@ -168,10 +168,10 @@ export const getAUTOMATIC1111Url = async (token: string = '') => {
 		throw error;
 	}
 
-	return res.AUTOMATIC1111_BASE_URL;
+	return res;
 };
 
-export const updateAUTOMATIC1111Url = async (token: string = '', url: string) => {
+export const updateImageGenerationEngineUrls = async (token: string = '', urls: object = {}) => {
 	let error = null;
 
 	const res = await fetch(`${IMAGES_API_BASE_URL}/url/update`, {
@@ -182,7 +182,7 @@ export const updateAUTOMATIC1111Url = async (token: string = '', url: string) =>
 			...(token && { authorization: `Bearer ${token}` })
 		},
 		body: JSON.stringify({
-			url: url
+			...urls
 		})
 	})
 		.then(async (res) => {
@@ -203,7 +203,7 @@ export const updateAUTOMATIC1111Url = async (token: string = '', url: string) =>
 		throw error;
 	}
 
-	return res.AUTOMATIC1111_BASE_URL;
+	return res;
 };
 
 export const getImageSize = async (token: string = '') => {

+ 57 - 0
src/lib/apis/index.ts

@@ -139,3 +139,60 @@ export const updateModelFilterConfig = async (
 
 	return res;
 };
+
+export const getWebhookUrl = async (token: string) => {
+	let error = null;
+
+	const res = await fetch(`${WEBUI_BASE_URL}/api/webhook`, {
+		method: 'GET',
+		headers: {
+			'Content-Type': 'application/json',
+			Authorization: `Bearer ${token}`
+		}
+	})
+		.then(async (res) => {
+			if (!res.ok) throw await res.json();
+			return res.json();
+		})
+		.catch((err) => {
+			console.log(err);
+			error = err;
+			return null;
+		});
+
+	if (error) {
+		throw error;
+	}
+
+	return res.url;
+};
+
+export const updateWebhookUrl = async (token: string, url: string) => {
+	let error = null;
+
+	const res = await fetch(`${WEBUI_BASE_URL}/api/webhook`, {
+		method: 'POST',
+		headers: {
+			'Content-Type': 'application/json',
+			Authorization: `Bearer ${token}`
+		},
+		body: JSON.stringify({
+			url: url
+		})
+	})
+		.then(async (res) => {
+			if (!res.ok) throw await res.json();
+			return res.json();
+		})
+		.catch((err) => {
+			console.log(err);
+			error = err;
+			return null;
+		});
+
+	if (error) {
+		throw error;
+	}
+
+	return res.url;
+};

+ 1 - 1
src/lib/apis/litellm/index.ts

@@ -33,7 +33,7 @@ export const getLiteLLMModels = async (token: string = '') => {
 					id: model.id,
 					name: model.name ?? model.id,
 					external: true,
-					source: 'litellm'
+					source: 'LiteLLM'
 				}))
 				.sort((a, b) => {
 					return a.name.localeCompare(b.name);

+ 68 - 1
src/lib/apis/ollama/index.ts

@@ -271,7 +271,7 @@ export const generateChatCompletion = async (token: string = '', body: object) =
 	return [res, controller];
 };
 
-export const cancelChatCompletion = async (token: string = '', requestId: string) => {
+export const cancelOllamaRequest = async (token: string = '', requestId: string) => {
 	let error = null;
 
 	const res = await fetch(`${OLLAMA_API_BASE_URL}/cancel/${requestId}`, {
@@ -390,6 +390,73 @@ export const pullModel = async (token: string, tagName: string, urlIdx: string |
 	return res;
 };
 
+export const downloadModel = async (
+	token: string,
+	download_url: string,
+	urlIdx: string | null = null
+) => {
+	let error = null;
+
+	const res = await fetch(
+		`${OLLAMA_API_BASE_URL}/models/download${urlIdx !== null ? `/${urlIdx}` : ''}`,
+		{
+			method: 'POST',
+			headers: {
+				Accept: 'application/json',
+				'Content-Type': 'application/json',
+				Authorization: `Bearer ${token}`
+			},
+			body: JSON.stringify({
+				url: download_url
+			})
+		}
+	).catch((err) => {
+		console.log(err);
+		error = err;
+
+		if ('detail' in err) {
+			error = err.detail;
+		}
+
+		return null;
+	});
+	if (error) {
+		throw error;
+	}
+	return res;
+};
+
+export const uploadModel = async (token: string, file: File, urlIdx: string | null = null) => {
+	let error = null;
+
+	const formData = new FormData();
+	formData.append('file', file);
+
+	const res = await fetch(
+		`${OLLAMA_API_BASE_URL}/models/upload${urlIdx !== null ? `/${urlIdx}` : ''}`,
+		{
+			method: 'POST',
+			headers: {
+				Authorization: `Bearer ${token}`
+			},
+			body: formData
+		}
+	).catch((err) => {
+		console.log(err);
+		error = err;
+
+		if ('detail' in err) {
+			error = err.detail;
+		}
+
+		return null;
+	});
+	if (error) {
+		throw error;
+	}
+	return res;
+};
+
 // export const pullModel = async (token: string, tagName: string) => {
 // 	return await fetch(`${OLLAMA_API_BASE_URL}/pull`, {
 // 		method: 'POST',

+ 50 - 0
src/lib/apis/openai/index.ts

@@ -263,3 +263,53 @@ export const synthesizeOpenAISpeech = async (
 
 	return res;
 };
+
+export const generateTitle = async (
+	token: string = '',
+	template: string,
+	model: string,
+	prompt: string,
+	url: string = OPENAI_API_BASE_URL
+) => {
+	let error = null;
+
+	template = template.replace(/{{prompt}}/g, prompt);
+
+	console.log(template);
+
+	const res = await fetch(`${url}/chat/completions`, {
+		method: 'POST',
+		headers: {
+			Accept: 'application/json',
+			'Content-Type': 'application/json',
+			Authorization: `Bearer ${token}`
+		},
+		body: JSON.stringify({
+			model: model,
+			messages: [
+				{
+					role: 'user',
+					content: template
+				}
+			],
+			stream: false
+		})
+	})
+		.then(async (res) => {
+			if (!res.ok) throw await res.json();
+			return res.json();
+		})
+		.catch((err) => {
+			console.log(err);
+			if ('detail' in err) {
+				error = err.detail;
+			}
+			return null;
+		});
+
+	if (error) {
+		throw error;
+	}
+
+	return res?.choices[0]?.message?.content ?? 'New Chat';
+};

+ 7 - 2
src/lib/components/AddFilesPlaceholder.svelte

@@ -1,8 +1,13 @@
+<script>
+	import { getContext } from 'svelte';
+	const i18n = getContext('i18n');
+</script>
+
 <div class="  text-center text-6xl mb-3">📄</div>
-<div class="text-center dark:text-white text-2xl font-semibold z-50">Add Files</div>
+<div class="text-center dark:text-white text-2xl font-semibold z-50">{$i18n.t('Add Files')}</div>
 
 <slot
 	><div class=" mt-2 text-center text-sm dark:text-gray-200 w-full">
-		Drop any files here to add to the conversation
+		{$i18n.t('Drop any files here to add to the conversation')}
 	</div>
 </slot>

+ 7 - 4
src/lib/components/ChangelogModal.svelte

@@ -1,5 +1,5 @@
 <script lang="ts">
-	import { onMount } from 'svelte';
+	import { onMount, getContext } from 'svelte';
 	import { Confetti } from 'svelte-confetti';
 
 	import { WEBUI_NAME, config } from '$lib/stores';
@@ -9,6 +9,8 @@
 
 	import Modal from './common/Modal.svelte';
 
+	const i18n = getContext('i18n');
+
 	export let show = false;
 
 	let changelog = null;
@@ -23,7 +25,8 @@
 	<div class="px-5 py-4 dark:text-gray-300">
 		<div class="flex justify-between items-start">
 			<div class="text-xl font-bold">
-				What’s New in {$WEBUI_NAME}
+				{$i18n.t('What’s New in')}
+				{$WEBUI_NAME}
 				<Confetti x={[-1, -0.25]} y={[0, 0.5]} />
 			</div>
 			<button
@@ -45,7 +48,7 @@
 			</button>
 		</div>
 		<div class="flex items-center mt-1">
-			<div class="text-sm dark:text-gray-200">Release Notes</div>
+			<div class="text-sm dark:text-gray-200">{$i18n.t('Release Notes')}</div>
 			<div class="flex self-center w-[1px] h-6 mx-2.5 bg-gray-200 dark:bg-gray-700" />
 			<div class="text-sm dark:text-gray-200">
 				v{WEBUI_VERSION}
@@ -108,7 +111,7 @@
 				}}
 				class=" px-4 py-2 bg-emerald-600 hover:bg-emerald-700 text-gray-100 transition rounded"
 			>
-				<span class="relative">Okay, Let's Go!</span>
+				<span class="relative">{$i18n.t("Okay, Let's Go!")}</span>
 			</button>
 		</div>
 	</div>

+ 9 - 7
src/lib/components/admin/EditUserModal.svelte

@@ -2,11 +2,12 @@
 	import { toast } from 'svelte-sonner';
 	import dayjs from 'dayjs';
 	import { createEventDispatcher } from 'svelte';
-	import { onMount } from 'svelte';
+	import { onMount, getContext } from 'svelte';
 
 	import { updateUserById } from '$lib/apis/users';
 	import Modal from '../common/Modal.svelte';
 
+	const i18n = getContext('i18n');
 	const dispatch = createEventDispatcher();
 
 	export let show = false;
@@ -42,7 +43,7 @@
 <Modal size="sm" bind:show>
 	<div>
 		<div class=" flex justify-between dark:text-gray-300 px-5 py-4">
-			<div class=" text-lg font-medium self-center">Edit User</div>
+			<div class=" text-lg font-medium self-center">{$i18n.t('Edit User')}</div>
 			<button
 				class="self-center"
 				on:click={() => {
@@ -84,7 +85,8 @@
 							<div class=" self-center capitalize font-semibold">{selectedUser.name}</div>
 
 							<div class="text-xs text-gray-500">
-								Created at {dayjs(selectedUser.timestamp * 1000).format('MMMM DD, YYYY')}
+								{$i18n.t('Created at')}
+								{dayjs(selectedUser.timestamp * 1000).format($i18n.t('MMMM DD, YYYY'))}
 							</div>
 						</div>
 					</div>
@@ -93,7 +95,7 @@
 
 					<div class=" flex flex-col space-y-1.5">
 						<div class="flex flex-col w-full">
-							<div class=" mb-1 text-xs text-gray-500">Email</div>
+							<div class=" mb-1 text-xs text-gray-500">{$i18n.t('Email')}</div>
 
 							<div class="flex-1">
 								<input
@@ -108,7 +110,7 @@
 						</div>
 
 						<div class="flex flex-col w-full">
-							<div class=" mb-1 text-xs text-gray-500">Name</div>
+							<div class=" mb-1 text-xs text-gray-500">{$i18n.t('Name')}</div>
 
 							<div class="flex-1">
 								<input
@@ -122,7 +124,7 @@
 						</div>
 
 						<div class="flex flex-col w-full">
-							<div class=" mb-1 text-xs text-gray-500">New Password</div>
+							<div class=" mb-1 text-xs text-gray-500">{$i18n.t('New Password')}</div>
 
 							<div class="flex-1">
 								<input
@@ -140,7 +142,7 @@
 							class=" px-4 py-2 bg-emerald-600 hover:bg-emerald-700 text-gray-100 transition rounded"
 							type="submit"
 						>
-							Save
+							{$i18n.t('Save')}
 						</button>
 					</div>
 				</form>

+ 6 - 4
src/lib/components/admin/Settings/Database.svelte

@@ -1,6 +1,8 @@
 <script lang="ts">
 	import { downloadDatabase } from '$lib/apis/utils';
-	import { onMount } from 'svelte';
+	import { onMount, getContext } from 'svelte';
+
+	const i18n = getContext('i18n');
 
 	export let saveHandler: Function;
 
@@ -17,10 +19,10 @@
 >
 	<div class=" space-y-3 pr-1.5 overflow-y-scroll max-h-80">
 		<div>
-			<div class=" mb-2 text-sm font-medium">Database</div>
+			<div class=" mb-2 text-sm font-medium">{$i18n.t('Database')}</div>
 
 			<div class="  flex w-full justify-between">
-				<!-- <div class=" self-center text-xs font-medium">Allow Chat Deletion</div> -->
+				<!-- <div class=" self-center text-xs font-medium">{$i18n.t('Allow Chat Deletion')}</div> -->
 
 				<button
 					class=" flex rounded-md py-1.5 px-3 w-full hover:bg-gray-200 dark:hover:bg-gray-800 transition"
@@ -46,7 +48,7 @@
 							/>
 						</svg>
 					</div>
-					<div class=" self-center text-sm font-medium">Download Database</div>
+					<div class=" self-center text-sm font-medium">{$i18n.t('Download Database')}</div>
 				</button>
 			</div>
 		</div>

+ 43 - 15
src/lib/components/admin/Settings/General.svelte

@@ -1,4 +1,5 @@
 <script lang="ts">
+	import { getWebhookUrl, updateWebhookUrl } from '$lib/apis';
 	import {
 		getDefaultUserRole,
 		getJWTExpiresDuration,
@@ -7,13 +8,17 @@
 		updateDefaultUserRole,
 		updateJWTExpiresDuration
 	} from '$lib/apis/auths';
-	import { onMount } from 'svelte';
+	import { onMount, getContext } from 'svelte';
+
+	const i18n = getContext('i18n');
 
 	export let saveHandler: Function;
 	let signUpEnabled = true;
 	let defaultUserRole = 'pending';
 	let JWTExpiresIn = '';
 
+	let webhookUrl = '';
+
 	const toggleSignUpEnabled = async () => {
 		signUpEnabled = await toggleSignUpEnabledStatus(localStorage.token);
 	};
@@ -26,27 +31,32 @@
 		JWTExpiresIn = await updateJWTExpiresDuration(localStorage.token, duration);
 	};
 
+	const updateWebhookUrlHandler = async () => {
+		webhookUrl = await updateWebhookUrl(localStorage.token, webhookUrl);
+	};
+
 	onMount(async () => {
 		signUpEnabled = await getSignUpEnabledStatus(localStorage.token);
 		defaultUserRole = await getDefaultUserRole(localStorage.token);
 		JWTExpiresIn = await getJWTExpiresDuration(localStorage.token);
+		webhookUrl = await getWebhookUrl(localStorage.token);
 	});
 </script>
 
 <form
 	class="flex flex-col h-full justify-between space-y-3 text-sm"
 	on:submit|preventDefault={() => {
-		// console.log('submit');
 		updateJWTExpiresDurationHandler(JWTExpiresIn);
+		updateWebhookUrlHandler();
 		saveHandler();
 	}}
 >
 	<div class=" space-y-3 pr-1.5 overflow-y-scroll max-h-80">
 		<div>
-			<div class=" mb-2 text-sm font-medium">General Settings</div>
+			<div class=" mb-2 text-sm font-medium">{$i18n.t('General Settings')}</div>
 
 			<div class="  flex w-full justify-between">
-				<div class=" self-center text-xs font-medium">Enable New Sign Ups</div>
+				<div class=" self-center text-xs font-medium">{$i18n.t('Enable New Sign Ups')}</div>
 
 				<button
 					class="p-1 px-3 text-xs flex rounded transition"
@@ -66,7 +76,7 @@
 								d="M11.5 1A3.5 3.5 0 0 0 8 4.5V7H2.5A1.5 1.5 0 0 0 1 8.5v5A1.5 1.5 0 0 0 2.5 15h7a1.5 1.5 0 0 0 1.5-1.5v-5A1.5 1.5 0 0 0 9.5 7V4.5a2 2 0 1 1 4 0v1.75a.75.75 0 0 0 1.5 0V4.5A3.5 3.5 0 0 0 11.5 1Z"
 							/>
 						</svg>
-						<span class="ml-2 self-center">Enabled</span>
+						<span class="ml-2 self-center">{$i18n.t('Enabled')}</span>
 					{:else}
 						<svg
 							xmlns="http://www.w3.org/2000/svg"
@@ -81,25 +91,25 @@
 							/>
 						</svg>
 
-						<span class="ml-2 self-center">Disabled</span>
+						<span class="ml-2 self-center">{$i18n.t('Disabled')}</span>
 					{/if}
 				</button>
 			</div>
 
 			<div class=" flex w-full justify-between">
-				<div class=" self-center text-xs font-medium">Default User Role</div>
+				<div class=" self-center text-xs font-medium">{$i18n.t('Default User Role')}</div>
 				<div class="flex items-center relative">
 					<select
-						class="w-fit pr-8 rounded py-2 px-2 text-xs bg-transparent outline-none text-right"
+						class="dark:bg-gray-900 w-fit pr-8 rounded py-2 px-2 text-xs bg-transparent outline-none text-right"
 						bind:value={defaultUserRole}
 						placeholder="Select a theme"
 						on:change={(e) => {
 							updateDefaultUserRoleHandler(e.target.value);
 						}}
 					>
-						<option value="pending">Pending</option>
-						<option value="user">User</option>
-						<option value="admin">Admin</option>
+						<option value="pending">{$i18n.t('pending')}</option>
+						<option value="user">{$i18n.t('user')}</option>
+						<option value="admin">{$i18n.t('admin')}</option>
 					</select>
 				</div>
 			</div>
@@ -108,7 +118,24 @@
 
 			<div class=" w-full justify-between">
 				<div class="flex w-full justify-between">
-					<div class=" self-center text-xs font-medium">JWT Expiration</div>
+					<div class=" self-center text-xs font-medium">{$i18n.t('Webhook URL')}</div>
+				</div>
+
+				<div class="flex mt-2 space-x-2">
+					<input
+						class="w-full rounded py-1.5 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none border border-gray-100 dark:border-gray-600"
+						type="text"
+						placeholder={`https://example.com/webhook`}
+						bind:value={webhookUrl}
+					/>
+				</div>
+			</div>
+
+			<hr class=" dark:border-gray-700 my-3" />
+
+			<div class=" w-full justify-between">
+				<div class="flex w-full justify-between">
+					<div class=" self-center text-xs font-medium">{$i18n.t('JWT Expiration')}</div>
 				</div>
 
 				<div class="flex mt-2 space-x-2">
@@ -121,8 +148,9 @@
 				</div>
 
 				<div class="mt-2 text-xs text-gray-400 dark:text-gray-500">
-					Valid time units: <span class=" text-gray-300 font-medium"
-						>'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.</span
+					{$i18n.t('Valid time units:')}
+					<span class=" text-gray-300 font-medium"
+						>{$i18n.t("'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.")}</span
 					>
 				</div>
 			</div>
@@ -134,7 +162,7 @@
 			class=" px-4 py-2 bg-emerald-600 hover:bg-emerald-700 text-gray-100 transition rounded"
 			type="submit"
 		>
-			Save
+			{$i18n.t('Save')}
 		</button>
 	</div>
 </form>

+ 15 - 11
src/lib/components/admin/Settings/Users.svelte

@@ -2,8 +2,11 @@
 	import { getModelFilterConfig, updateModelFilterConfig } from '$lib/apis';
 	import { getSignUpEnabledStatus, toggleSignUpEnabledStatus } from '$lib/apis/auths';
 	import { getUserPermissions, updateUserPermissions } from '$lib/apis/users';
+
+	import { onMount, getContext } from 'svelte';
 	import { models } from '$lib/stores';
-	import { onMount } from 'svelte';
+
+	const i18n = getContext('i18n');
 
 	export let saveHandler: Function;
 
@@ -39,10 +42,10 @@
 >
 	<div class=" space-y-3 pr-1.5 overflow-y-scroll max-h-80">
 		<div>
-			<div class=" mb-2 text-sm font-medium">User Permissions</div>
+			<div class=" mb-2 text-sm font-medium">{$i18n.t('User Permissions')}</div>
 
 			<div class="  flex w-full justify-between">
-				<div class=" self-center text-xs font-medium">Allow Chat Deletion</div>
+				<div class=" self-center text-xs font-medium">{$i18n.t('Allow Chat Deletion')}</div>
 
 				<button
 					class="p-1 px-3 text-xs flex rounded transition"
@@ -62,7 +65,7 @@
 								d="M11.5 1A3.5 3.5 0 0 0 8 4.5V7H2.5A1.5 1.5 0 0 0 1 8.5v5A1.5 1.5 0 0 0 2.5 15h7a1.5 1.5 0 0 0 1.5-1.5v-5A1.5 1.5 0 0 0 9.5 7V4.5a2 2 0 1 1 4 0v1.75a.75.75 0 0 0 1.5 0V4.5A3.5 3.5 0 0 0 11.5 1Z"
 							/>
 						</svg>
-						<span class="ml-2 self-center">Allow</span>
+						<span class="ml-2 self-center">{$i18n.t('Allow')}</span>
 					{:else}
 						<svg
 							xmlns="http://www.w3.org/2000/svg"
@@ -77,7 +80,7 @@
 							/>
 						</svg>
 
-						<span class="ml-2 self-center">Don't Allow</span>
+						<span class="ml-2 self-center">{$i18n.t("Don't Allow")}</span>
 					{/if}
 				</button>
 			</div>
@@ -89,21 +92,21 @@
 			<div>
 				<div class="mb-2">
 					<div class="flex justify-between items-center text-xs">
-						<div class=" text-sm font-medium">Manage Models</div>
+						<div class=" text-sm font-medium">{$i18n.t('Manage Models')}</div>
 					</div>
 				</div>
 
 				<div class=" space-y-3">
 					<div>
 						<div class="flex justify-between items-center text-xs">
-							<div class=" text-xs font-medium">Model Whitelisting</div>
+							<div class=" text-xs font-medium">{$i18n.t('Model Whitelisting')}</div>
 
 							<button
 								class=" text-xs font-medium text-gray-500"
 								type="button"
 								on:click={() => {
 									whitelistEnabled = !whitelistEnabled;
-								}}>{whitelistEnabled ? 'On' : 'Off'}</button
+								}}>{whitelistEnabled ? $i18n.t('On') : $i18n.t('Off')}</button
 							>
 						</div>
 					</div>
@@ -119,7 +122,7 @@
 												bind:value={modelId}
 												placeholder="Select a model"
 											>
-												<option value="" disabled selected>Select a model</option>
+												<option value="" disabled selected>{$i18n.t('Select a model')}</option>
 												{#each $models.filter((model) => model.id) as model}
 													<option value={model.id} class="bg-gray-100 dark:bg-gray-700"
 														>{model.name}</option
@@ -174,7 +177,8 @@
 
 							<div class="flex justify-end items-center text-xs mt-1.5 text-right">
 								<div class=" text-xs font-medium">
-									{whitelistModels.length} Model(s) Whitelisted
+									{whitelistModels.length}
+									{$i18n.t('Model(s) Whitelisted')}
 								</div>
 							</div>
 						</div>
@@ -189,7 +193,7 @@
 			class=" px-4 py-2 bg-emerald-600 hover:bg-emerald-700 text-gray-100 transition rounded"
 			type="submit"
 		>
-			Save
+			{$i18n.t('Save')}
 		</button>
 	</div>
 </form>

+ 7 - 4
src/lib/components/admin/SettingsModal.svelte

@@ -1,10 +1,13 @@
 <script>
+	import { getContext } from 'svelte';
 	import Modal from '../common/Modal.svelte';
 	import Database from './Settings/Database.svelte';
 
 	import General from './Settings/General.svelte';
 	import Users from './Settings/Users.svelte';
 
+	const i18n = getContext('i18n');
+
 	export let show = false;
 
 	let selectedTab = 'general';
@@ -13,7 +16,7 @@
 <Modal bind:show>
 	<div>
 		<div class=" flex justify-between dark:text-gray-300 px-5 py-4">
-			<div class=" text-lg font-medium self-center">Admin Settings</div>
+			<div class=" text-lg font-medium self-center">{$i18n.t('Admin Settings')}</div>
 			<button
 				class="self-center"
 				on:click={() => {
@@ -61,7 +64,7 @@
 							/>
 						</svg>
 					</div>
-					<div class=" self-center">General</div>
+					<div class=" self-center">{$i18n.t('General')}</div>
 				</button>
 
 				<button
@@ -85,7 +88,7 @@
 							/>
 						</svg>
 					</div>
-					<div class=" self-center">Users</div>
+					<div class=" self-center">{$i18n.t('Users')}</div>
 				</button>
 
 				<button
@@ -113,7 +116,7 @@
 							/>
 						</svg>
 					</div>
-					<div class=" self-center">Database</div>
+					<div class=" self-center">{$i18n.t('Database')}</div>
 				</button>
 			</div>
 			<div class="flex-1 md:min-h-[380px]">

+ 23 - 15
src/lib/components/chat/MessageInput.svelte

@@ -1,6 +1,6 @@
 <script lang="ts">
 	import { toast } from 'svelte-sonner';
-	import { onMount, tick } from 'svelte';
+	import { onMount, tick, getContext } from 'svelte';
 	import { settings } from '$lib/stores';
 	import { blobToFile, calculateSHA256, findWordIndices } from '$lib/utils';
 
@@ -14,6 +14,8 @@
 	import { transcribeAudio } from '$lib/apis/audio';
 	import Tooltip from '../common/Tooltip.svelte';
 
+	const i18n = getContext('i18n');
+
 	export let submitPrompt: Function;
 	export let stopResponse: Function;
 
@@ -209,11 +211,11 @@
 					// Event triggered when an error occurs
 					speechRecognition.onerror = function (event) {
 						console.log(event);
-						toast.error(`Speech recognition error: ${event.error}`);
+						toast.error($i18n.t(`Speech recognition error: {{error}}`, { error: event.error }));
 						isRecording = false;
 					};
 				} else {
-					toast.error('SpeechRecognition API is not supported in this browser.');
+					toast.error($i18n.t('SpeechRecognition API is not supported in this browser.'));
 				}
 			}
 		}
@@ -333,12 +335,15 @@
 						uploadDoc(file);
 					} else {
 						toast.error(
-							`Unknown File Type '${file['type']}', but accepting and treating as plain text`
+							$i18n.t(
+								`Unknown File Type '{{file_type}}', but accepting and treating as plain text`,
+								{ file_type: file['type'] }
+							)
 						);
 						uploadDoc(file);
 					}
 				} else {
-					toast.error(`File not found.`);
+					toast.error($i18n.t(`File not found.`));
 				}
 			}
 
@@ -477,13 +482,16 @@
 								filesInputElement.value = '';
 							} else {
 								toast.error(
-									`Unknown File Type '${file['type']}', but accepting and treating as plain text`
+									$i18n.t(
+										`Unknown File Type '{{file_type}}', but accepting and treating as plain text`,
+										{ file_type: file['type'] }
+									)
 								);
 								uploadDoc(file);
 								filesInputElement.value = '';
 							}
 						} else {
-							toast.error(`File not found.`);
+							toast.error($i18n.t(`File not found.`));
 						}
 					}}
 				/>
@@ -570,7 +578,7 @@
 													{file.name}
 												</div>
 
-												<div class=" text-gray-500 text-sm">Document</div>
+												<div class=" text-gray-500 text-sm">{$i18n.t('Document')}</div>
 											</div>
 										</div>
 									{:else if file.type === 'collection'}
@@ -598,7 +606,7 @@
 													{file?.title ?? `#${file.name}`}
 												</div>
 
-												<div class=" text-gray-500 text-sm">Collection</div>
+												<div class=" text-gray-500 text-sm">{$i18n.t('Collection')}</div>
 											</div>
 										</div>
 									{/if}
@@ -632,7 +640,7 @@
 					<div class=" flex">
 						{#if fileUploadEnabled}
 							<div class=" self-end mb-2 ml-1">
-								<Tooltip content="Upload files">
+								<Tooltip content={$i18n.t('Upload files')}>
 									<button
 										class="bg-gray-50 hover:bg-gray-100 text-gray-800 dark:bg-gray-850 dark:text-white dark:hover:bg-gray-800 transition rounded-full p-1.5"
 										type="button"
@@ -664,8 +672,8 @@
 							placeholder={chatInputPlaceholder !== ''
 								? chatInputPlaceholder
 								: isRecording
-								? 'Listening...'
-								: 'Send a message'}
+								? $i18n.t('Listening...')
+								: $i18n.t('Send a Message')}
 							bind:value={prompt}
 							on:keypress={(e) => {
 								if (e.keyCode == 13 && !e.shiftKey) {
@@ -804,7 +812,7 @@
 
 						<div class="self-end mb-2 flex space-x-1 mr-1">
 							{#if messages.length == 0 || messages.at(-1).done == true}
-								<Tooltip content="Record voice">
+								<Tooltip content={$i18n.t('Record voice')}>
 									{#if speechRecognitionEnabled}
 										<button
 											id="voice-input-button"
@@ -873,7 +881,7 @@
 									{/if}
 								</Tooltip>
 
-								<Tooltip content="Send message">
+								<Tooltip content={$i18n.t('Send message')}>
 									<button
 										class="{prompt !== ''
 											? 'bg-black text-white hover:bg-gray-900 dark:bg-white dark:text-black dark:hover:bg-gray-100 '
@@ -919,7 +927,7 @@
 				</form>
 
 				<div class="mt-1.5 text-xs text-gray-500 text-center">
-					LLMs can make mistakes. Verify important information.
+					{$i18n.t('LLMs can make mistakes. Verify important information.')}
 				</div>
 			</div>
 		</div>

+ 8 - 4
src/lib/components/chat/MessageInput/Documents.svelte

@@ -3,9 +3,11 @@
 
 	import { documents } from '$lib/stores';
 	import { removeFirstHashWord, isValidHttpUrl } from '$lib/utils';
-	import { tick } from 'svelte';
+	import { tick, getContext } from 'svelte';
 	import { toast } from 'svelte-sonner';
 
+	const i18n = getContext('i18n');
+
 	export let prompt = '';
 
 	const dispatch = createEventDispatcher();
@@ -117,7 +119,7 @@
 									{doc?.title ?? `#${doc.name}`}
 								</div>
 
-								<div class=" text-xs text-gray-600 line-clamp-1">Collection</div>
+								<div class=" text-xs text-gray-600 line-clamp-1">{$i18n.t('Collection')}</div>
 							{:else}
 								<div class=" font-medium text-black line-clamp-1">
 									#{doc.name} ({doc.filename})
@@ -140,7 +142,9 @@
 									confirmSelectWeb(url);
 								} else {
 									toast.error(
-										'Oops! Looks like the URL is invalid. Please double-check and try again.'
+										$i18n.t(
+											'Oops! Looks like the URL is invalid. Please double-check and try again.'
+										)
 									);
 								}
 							}}
@@ -149,7 +153,7 @@
 								{prompt.split(' ')?.at(0)?.substring(1)}
 							</div>
 
-							<div class=" text-xs text-gray-600 line-clamp-1">Web</div>
+							<div class=" text-xs text-gray-600 line-clamp-1">{$i18n.t('Web')}</div>
 						</button>
 					{/if}
 				</div>

+ 7 - 3
src/lib/components/chat/MessageInput/Models.svelte

@@ -2,9 +2,11 @@
 	import { generatePrompt } from '$lib/apis/ollama';
 	import { models } from '$lib/stores';
 	import { splitStream } from '$lib/utils';
-	import { tick } from 'svelte';
+	import { tick, getContext } from 'svelte';
 	import { toast } from 'svelte-sonner';
 
+	const i18n = getContext('i18n');
+
 	export let prompt = '';
 	export let user = null;
 
@@ -41,7 +43,7 @@
 		user = JSON.parse(JSON.stringify(model.name));
 		await tick();
 
-		chatInputPlaceholder = `'${model.name}' is thinking...`;
+		chatInputPlaceholder = $i18n.t('{{modelName}} is thinking...', { modelName: model.name });
 
 		const chatInputElement = document.getElementById('chat-textarea');
 
@@ -113,7 +115,9 @@
 					toast.error(error.error);
 				}
 			} else {
-				toast.error(`Uh-oh! There was an issue connecting to Ollama.`);
+				toast.error(
+					$i18n.t('Uh-oh! There was an issue connecting to {{provider}}.', { provider: 'llama' })
+				);
 			}
 		}
 

+ 7 - 4
src/lib/components/chat/MessageInput/PromptCommands.svelte

@@ -1,9 +1,11 @@
 <script lang="ts">
 	import { prompts } from '$lib/stores';
 	import { findWordIndices } from '$lib/utils';
-	import { tick } from 'svelte';
+	import { tick, getContext } from 'svelte';
 	import { toast } from 'svelte-sonner';
 
+	const i18n = getContext('i18n');
+
 	export let prompt = '';
 	let selectedCommandIdx = 0;
 	let filteredPromptCommands = [];
@@ -29,7 +31,7 @@
 
 		if (command.content.includes('{{CLIPBOARD}}')) {
 			const clipboardText = await navigator.clipboard.readText().catch((err) => {
-				toast.error('Failed to read clipboard contents');
+				toast.error($i18n.t('Failed to read clipboard contents'));
 				return '{{CLIPBOARD}}';
 			});
 
@@ -113,8 +115,9 @@
 					</div>
 
 					<div class="line-clamp-1">
-						Tip: Update multiple variable slots consecutively by pressing the tab key in the chat
-						input after each replacement.
+						{$i18n.t(
+							'Tip: Update multiple variable slots consecutively by pressing the tab key in the chat input after each replacement.'
+						)}
 					</div>
 				</div>
 			</div>

+ 4 - 2
src/lib/components/chat/Messages.svelte

@@ -2,7 +2,7 @@
 	import { v4 as uuidv4 } from 'uuid';
 
 	import { chats, config, modelfiles, settings, user } from '$lib/stores';
-	import { tick } from 'svelte';
+	import { tick, getContext } from 'svelte';
 
 	import { toast } from 'svelte-sonner';
 	import { getChatList, updateChatById } from '$lib/apis/chats';
@@ -13,6 +13,8 @@
 	import Spinner from '../common/Spinner.svelte';
 	import { imageGenerations } from '$lib/apis/images';
 
+	const i18n = getContext('i18n');
+
 	export let chatId = '';
 	export let sendPrompt: Function;
 	export let continueGeneration: Function;
@@ -67,7 +69,7 @@
 		navigator.clipboard.writeText(text).then(
 			function () {
 				console.log('Async: Copying to clipboard was successful!');
-				toast.success('Copying to clipboard was successful!');
+				toast.success($i18n.t('Copying to clipboard was successful!'));
 			},
 			function (err) {
 				console.error('Async: Could not copy text: ', err);

+ 7 - 5
src/lib/components/chat/Messages/Placeholder.svelte

@@ -1,7 +1,9 @@
 <script lang="ts">
 	import { WEBUI_BASE_URL } from '$lib/constants';
 	import { user } from '$lib/stores';
-	import { onMount } from 'svelte';
+	import { onMount, getContext } from 'svelte';
+
+	const i18n = getContext('i18n');
 
 	export let models = [];
 	export let modelfiles = [];
@@ -31,7 +33,7 @@
 							<img
 								src={modelfiles[model]?.imageUrl ?? `${WEBUI_BASE_URL}/static/favicon.png`}
 								alt="modelfile"
-								class=" w-14 rounded-full border-[1px] border-gray-200 dark:border-none"
+								class=" size-12 rounded-full border-[1px] border-gray-200 dark:border-none"
 								draggable="false"
 							/>
 						{:else}
@@ -39,7 +41,7 @@
 								src={models.length === 1
 									? `${WEBUI_BASE_URL}/static/favicon.png`
 									: `${WEBUI_BASE_URL}/static/favicon.png`}
-								class=" w-14 rounded-full border-[1px] border-gray-200 dark:border-none"
+								class=" size-12 rounded-full border-[1px] border-gray-200 dark:border-none"
 								alt="logo"
 								draggable="false"
 							/>
@@ -64,9 +66,9 @@
 					</div>
 				{/if}
 			{:else}
-				<div class=" line-clamp-1">Hello, {$user.name}</div>
+				<div class=" line-clamp-1">{$i18n.t('Hello, {{name}}', { name: $user.name })}</div>
 
-				<div>How can I help you today?</div>
+				<div>{$i18n.t('How can I help you today?')}</div>
 			{/if}
 		</div>
 	</div>

+ 7 - 5
src/lib/components/chat/Messages/ResponseMessage.svelte

@@ -8,7 +8,9 @@
 
 	import { fade } from 'svelte/transition';
 	import { createEventDispatcher } from 'svelte';
-	import { onMount, tick } from 'svelte';
+	import { onMount, tick, getContext } from 'svelte';
+
+	const i18n = getContext('i18n');
 
 	const dispatch = createEventDispatcher();
 
@@ -316,7 +318,7 @@
 
 				{#if message.timestamp}
 					<span class=" invisible group-hover:visible text-gray-400 text-xs font-medium">
-						{dayjs(message.timestamp * 1000).format('DD/MM/YYYY HH:mm')}
+						{dayjs(message.timestamp * 1000).format($i18n.t('DD/MM/YYYY HH:mm'))}
 					</span>
 				{/if}
 			</Name>
@@ -360,7 +362,7 @@
 											editMessageConfirmHandler();
 										}}
 									>
-										Save
+										{$i18n.t('Save')}
 									</button>
 
 									<button
@@ -369,7 +371,7 @@
 											cancelEditMessage();
 										}}
 									>
-										Cancel
+										{$i18n.t('Cancel')}
 									</button>
 								</div>
 							</div>
@@ -420,7 +422,7 @@
 										class=" flex justify-start space-x-1 overflow-x-auto buttons text-gray-700 dark:text-gray-500"
 									>
 										{#if siblings.length > 1}
-											<div class="flex self-center min-w-fit">
+											<div class="flex self-center min-w-fit -mt-1">
 												<button
 													class="self-center dark:hover:text-white hover:text-black transition"
 													on:click={() => {

+ 12 - 9
src/lib/components/chat/Messages/UserMessage.svelte

@@ -1,12 +1,14 @@
 <script lang="ts">
 	import dayjs from 'dayjs';
 
-	import { tick, createEventDispatcher } from 'svelte';
+	import { tick, createEventDispatcher, getContext } from 'svelte';
 	import Name from './Name.svelte';
 	import ProfileImage from './ProfileImage.svelte';
 	import { modelfiles, settings } from '$lib/stores';
 	import Tooltip from '$lib/components/common/Tooltip.svelte';
 
+	const i18n = getContext('i18n');
+
 	const dispatch = createEventDispatcher();
 
 	export let user;
@@ -65,17 +67,18 @@
 					{#if $modelfiles.map((modelfile) => modelfile.tagName).includes(message.user)}
 						{$modelfiles.find((modelfile) => modelfile.tagName === message.user)?.title}
 					{:else}
-						You <span class=" text-gray-500 text-sm font-medium">{message?.user ?? ''}</span>
+						{$i18n.t('You')}
+						<span class=" text-gray-500 text-sm font-medium">{message?.user ?? ''}</span>
 					{/if}
 				{:else if $settings.showUsername}
 					{user.name}
 				{:else}
-					You
+					{$i18n.t('You')}
 				{/if}
 
 				{#if message.timestamp}
 					<span class=" invisible group-hover:visible text-gray-400 text-xs font-medium">
-						{dayjs(message.timestamp * 1000).format('DD/MM/YYYY HH:mm')}
+						{dayjs(message.timestamp * 1000).format($i18n.t('DD/MM/YYYY HH:mm'))}
 					</span>
 				{/if}
 			</Name>
@@ -123,7 +126,7 @@
 											{file.name}
 										</div>
 
-										<div class=" text-gray-500 text-sm">Document</div>
+										<div class=" text-gray-500 text-sm">{$i18n.t('Document')}</div>
 									</div>
 								</button>
 							{:else if file.type === 'collection'}
@@ -152,7 +155,7 @@
 											{file?.title ?? `#${file.name}`}
 										</div>
 
-										<div class=" text-gray-500 text-sm">Collection</div>
+										<div class=" text-gray-500 text-sm">{$i18n.t('Collection')}</div>
 									</div>
 								</button>
 							{/if}
@@ -181,7 +184,7 @@
 								editMessageConfirmHandler();
 							}}
 						>
-							Save & Submit
+							{$i18n.t('Save & Submit')}
 						</button>
 
 						<button
@@ -190,7 +193,7 @@
 								cancelEditMessage();
 							}}
 						>
-							Cancel
+							{$i18n.t('Cancel')}
 						</button>
 					</div>
 				</div>
@@ -200,7 +203,7 @@
 
 					<div class=" flex justify-start space-x-1 text-gray-700 dark:text-gray-500">
 						{#if siblings.length > 1}
-							<div class="flex self-center">
+							<div class="flex self-center -mt-1">
 								<button
 									class="self-center dark:hover:text-white hover:text-black transition"
 									on:click={() => {

+ 72 - 98
src/lib/components/chat/ModelSelector.svelte

@@ -1,8 +1,14 @@
 <script lang="ts">
+	import { Collapsible } from 'bits-ui';
+
 	import { setDefaultModels } from '$lib/apis/configs';
 	import { models, showSettings, settings, user } from '$lib/stores';
-	import { onMount, tick } from 'svelte';
+	import { onMount, tick, getContext } from 'svelte';
 	import { toast } from 'svelte-sonner';
+	import Selector from './ModelSelector/Selector.svelte';
+	import Tooltip from '../common/Tooltip.svelte';
+
+	const i18n = getContext('i18n');
 
 	export let selectedModels = [''];
 	export let disabled = false;
@@ -10,7 +16,7 @@
 	const saveDefaultModel = async () => {
 		const hasEmptyModel = selectedModels.filter((it) => it === '');
 		if (hasEmptyModel.length) {
-			toast.error('Choose a model before saving...');
+			toast.error($i18n.t('Choose a model before saving...'));
 			return;
 		}
 		settings.set({ ...$settings, models: selectedModels });
@@ -20,7 +26,7 @@
 			console.log('setting default models globally');
 			await setDefaultModels(localStorage.token, selectedModels.join(','));
 		}
-		toast.success('Default model updated');
+		toast.success($i18n.t('Default model updated'));
 	};
 
 	$: if (selectedModels.length > 0 && $models.length > 0) {
@@ -30,108 +36,76 @@
 	}
 </script>
 
-<div class="flex flex-col my-2">
+<div class="flex flex-col mt-0.5 w-full">
 	{#each selectedModels as selectedModel, selectedModelIdx}
-		<div class="flex">
-			<select
-				id="models"
-				class="outline-none bg-transparent text-lg font-semibold rounded-lg block w-full placeholder-gray-400"
-				bind:value={selectedModel}
-				{disabled}
-			>
-				<option class=" text-gray-700" value="" selected disabled>Select a model</option>
-
-				{#each $models as model}
-					{#if model.name === 'hr'}
-						<hr />
-					{:else}
-						<option value={model.id} class="text-gray-700 text-lg"
-							>{model.name +
-								`${model.size ? ` (${(model.size / 1024 ** 3).toFixed(1)}GB)` : ''}`}</option
-						>
-					{/if}
-				{/each}
-			</select>
+		<div class="flex w-full">
+			<div class="overflow-hidden w-full">
+				<div class="mr-0.5 max-w-full">
+					<Selector
+						placeholder={$i18n.t('Select a model')}
+						items={$models
+							.filter((model) => model.name !== 'hr')
+							.map((model) => ({
+								value: model.id,
+								label: model.name,
+								info: model
+							}))}
+						bind:value={selectedModel}
+					/>
+				</div>
+			</div>
 
 			{#if selectedModelIdx === 0}
-				<button
-					class="  self-center {selectedModelIdx === 0
-						? 'mr-3'
-						: 'mr-7'} disabled:text-gray-600 disabled:hover:text-gray-600"
-					{disabled}
-					on:click={() => {
-						selectedModels = [...selectedModels, ''];
-					}}
-				>
-					<svg
-						xmlns="http://www.w3.org/2000/svg"
-						fill="none"
-						viewBox="0 0 24 24"
-						stroke-width="1.5"
-						stroke="currentColor"
-						class="w-4 h-4"
-					>
-						<path stroke-linecap="round" stroke-linejoin="round" d="M12 6v12m6-6H6" />
-					</svg>
-				</button>
+				<div class="  self-center mr-2 disabled:text-gray-600 disabled:hover:text-gray-600">
+					<Tooltip content="Add Model">
+						<button
+							class=" "
+							{disabled}
+							on:click={() => {
+								selectedModels = [...selectedModels, ''];
+							}}
+						>
+							<svg
+								xmlns="http://www.w3.org/2000/svg"
+								fill="none"
+								viewBox="0 0 24 24"
+								stroke-width="1.5"
+								stroke="currentColor"
+								class="w-4 h-4"
+							>
+								<path stroke-linecap="round" stroke-linejoin="round" d="M12 6v12m6-6H6" />
+							</svg>
+						</button>
+					</Tooltip>
+				</div>
 			{:else}
-				<button
-					class="  self-center disabled:text-gray-600 disabled:hover:text-gray-600 {selectedModelIdx ===
-					0
-						? 'mr-3'
-						: 'mr-7'}"
-					{disabled}
-					on:click={() => {
-						selectedModels.splice(selectedModelIdx, 1);
-						selectedModels = selectedModels;
-					}}
-				>
-					<svg
-						xmlns="http://www.w3.org/2000/svg"
-						fill="none"
-						viewBox="0 0 24 24"
-						stroke-width="1.5"
-						stroke="currentColor"
-						class="w-4 h-4"
-					>
-						<path stroke-linecap="round" stroke-linejoin="round" d="M19.5 12h-15" />
-					</svg>
-				</button>
-			{/if}
-
-			{#if selectedModelIdx === 0}
-				<button
-					class=" self-center dark:hover:text-gray-300"
-					id="open-settings-button"
-					on:click={async () => {
-						await showSettings.set(!$showSettings);
-					}}
-				>
-					<svg
-						xmlns="http://www.w3.org/2000/svg"
-						fill="none"
-						viewBox="0 0 24 24"
-						stroke-width="1.5"
-						stroke="currentColor"
-						class="w-4 h-4"
-					>
-						<path
-							stroke-linecap="round"
-							stroke-linejoin="round"
-							d="M10.343 3.94c.09-.542.56-.94 1.11-.94h1.093c.55 0 1.02.398 1.11.94l.149.894c.07.424.384.764.78.93.398.164.855.142 1.205-.108l.737-.527a1.125 1.125 0 011.45.12l.773.774c.39.389.44 1.002.12 1.45l-.527.737c-.25.35-.272.806-.107 1.204.165.397.505.71.93.78l.893.15c.543.09.94.56.94 1.109v1.094c0 .55-.397 1.02-.94 1.11l-.893.149c-.425.07-.765.383-.93.78-.165.398-.143.854.107 1.204l.527.738c.32.447.269 1.06-.12 1.45l-.774.773a1.125 1.125 0 01-1.449.12l-.738-.527c-.35-.25-.806-.272-1.203-.107-.397.165-.71.505-.781.929l-.149.894c-.09.542-.56.94-1.11.94h-1.094c-.55 0-1.019-.398-1.11-.94l-.148-.894c-.071-.424-.384-.764-.781-.93-.398-.164-.854-.142-1.204.108l-.738.527c-.447.32-1.06.269-1.45-.12l-.773-.774a1.125 1.125 0 01-.12-1.45l.527-.737c.25-.35.273-.806.108-1.204-.165-.397-.505-.71-.93-.78l-.894-.15c-.542-.09-.94-.56-.94-1.109v-1.094c0-.55.398-1.02.94-1.11l.894-.149c.424-.07.765-.383.93-.78.165-.398.143-.854-.107-1.204l-.527-.738a1.125 1.125 0 01.12-1.45l.773-.773a1.125 1.125 0 011.45-.12l.737.527c.35.25.807.272 1.204.107.397-.165.71-.505.78-.929l.15-.894z"
-						/>
-						<path
-							stroke-linecap="round"
-							stroke-linejoin="round"
-							d="M15 12a3 3 0 11-6 0 3 3 0 016 0z"
-						/>
-					</svg>
-				</button>
+				<div class="  self-center disabled:text-gray-600 disabled:hover:text-gray-600 mr-2">
+					<Tooltip content="Remove Model">
+						<button
+							{disabled}
+							on:click={() => {
+								selectedModels.splice(selectedModelIdx, 1);
+								selectedModels = selectedModels;
+							}}
+						>
+							<svg
+								xmlns="http://www.w3.org/2000/svg"
+								fill="none"
+								viewBox="0 0 24 24"
+								stroke-width="1.5"
+								stroke="currentColor"
+								class="w-4 h-4"
+							>
+								<path stroke-linecap="round" stroke-linejoin="round" d="M19.5 12h-15" />
+							</svg>
+						</button>
+					</Tooltip>
+				</div>
 			{/if}
 		</div>
 	{/each}
 </div>
 
-<div class="text-left mt-1.5 text-xs text-gray-500">
-	<button on:click={saveDefaultModel}> Set as default</button>
+<div class="text-left mt-0.5 ml-1 text-[0.7rem] text-gray-500">
+	<button on:click={saveDefaultModel}> {$i18n.t('Set as default')}</button>
 </div>

+ 389 - 0
src/lib/components/chat/ModelSelector/Selector.svelte

@@ -0,0 +1,389 @@
+<script lang="ts">
+	import { Select } from 'bits-ui';
+
+	import { flyAndScale } from '$lib/utils/transitions';
+	import { createEventDispatcher, onMount, getContext, tick } from 'svelte';
+
+	import ChevronDown from '$lib/components/icons/ChevronDown.svelte';
+	import Check from '$lib/components/icons/Check.svelte';
+	import Search from '$lib/components/icons/Search.svelte';
+
+	import { cancelOllamaRequest, deleteModel, getOllamaVersion, pullModel } from '$lib/apis/ollama';
+
+	import { user, MODEL_DOWNLOAD_POOL, models } from '$lib/stores';
+	import { toast } from 'svelte-sonner';
+	import { capitalizeFirstLetter, getModels, splitStream } from '$lib/utils';
+	import Tooltip from '$lib/components/common/Tooltip.svelte';
+
+	const i18n = getContext('i18n');
+	const dispatch = createEventDispatcher();
+
+	export let value = '';
+	export let placeholder = 'Select a model';
+	export let searchEnabled = true;
+	export let searchPlaceholder = 'Search a model';
+
+	export let items = [{ value: 'mango', label: 'Mango' }];
+
+	let searchValue = '';
+	let ollamaVersion = null;
+
+	$: filteredItems = searchValue
+		? items.filter((item) => item.value.includes(searchValue.toLowerCase()))
+		: items;
+
+	const pullModelHandler = async () => {
+		const sanitizedModelTag = searchValue.trim();
+
+		console.log($MODEL_DOWNLOAD_POOL);
+		if ($MODEL_DOWNLOAD_POOL[sanitizedModelTag]) {
+			toast.error(
+				$i18n.t(`Model '{{modelTag}}' is already in queue for downloading.`, {
+					modelTag: sanitizedModelTag
+				})
+			);
+			return;
+		}
+		if (Object.keys($MODEL_DOWNLOAD_POOL).length === 3) {
+			toast.error(
+				$i18n.t('Maximum of 3 models can be downloaded simultaneously. Please try again later.')
+			);
+			return;
+		}
+
+		const res = await pullModel(localStorage.token, sanitizedModelTag, '0').catch((error) => {
+			toast.error(error);
+			return null;
+		});
+
+		if (res) {
+			const reader = res.body
+				.pipeThrough(new TextDecoderStream())
+				.pipeThrough(splitStream('\n'))
+				.getReader();
+
+			while (true) {
+				try {
+					const { value, done } = await reader.read();
+					if (done) break;
+
+					let lines = value.split('\n');
+
+					for (const line of lines) {
+						if (line !== '') {
+							let data = JSON.parse(line);
+							console.log(data);
+							if (data.error) {
+								throw data.error;
+							}
+							if (data.detail) {
+								throw data.detail;
+							}
+
+							if (data.id) {
+								MODEL_DOWNLOAD_POOL.set({
+									...$MODEL_DOWNLOAD_POOL,
+									[sanitizedModelTag]: {
+										...$MODEL_DOWNLOAD_POOL[sanitizedModelTag],
+										requestId: data.id,
+										reader,
+										done: false
+									}
+								});
+								console.log(data);
+							}
+
+							if (data.status) {
+								if (data.digest) {
+									let downloadProgress = 0;
+									if (data.completed) {
+										downloadProgress = Math.round((data.completed / data.total) * 1000) / 10;
+									} else {
+										downloadProgress = 100;
+									}
+
+									MODEL_DOWNLOAD_POOL.set({
+										...$MODEL_DOWNLOAD_POOL,
+										[sanitizedModelTag]: {
+											...$MODEL_DOWNLOAD_POOL[sanitizedModelTag],
+											pullProgress: downloadProgress,
+											digest: data.digest
+										}
+									});
+								} else {
+									toast.success(data.status);
+
+									MODEL_DOWNLOAD_POOL.set({
+										...$MODEL_DOWNLOAD_POOL,
+										[sanitizedModelTag]: {
+											...$MODEL_DOWNLOAD_POOL[sanitizedModelTag],
+											done: data.status === 'success'
+										}
+									});
+								}
+							}
+						}
+					}
+				} catch (error) {
+					console.log(error);
+					if (typeof error !== 'string') {
+						error = error.message;
+					}
+
+					toast.error(error);
+					// opts.callback({ success: false, error, modelName: opts.modelName });
+				}
+			}
+
+			if ($MODEL_DOWNLOAD_POOL[sanitizedModelTag].done) {
+				toast.success(
+					$i18n.t(`Model '{{modelName}}' has been successfully downloaded.`, {
+						modelName: sanitizedModelTag
+					})
+				);
+
+				models.set(await getModels(localStorage.token));
+			} else {
+				toast.error('Download canceled');
+			}
+
+			delete $MODEL_DOWNLOAD_POOL[sanitizedModelTag];
+
+			MODEL_DOWNLOAD_POOL.set({
+				...$MODEL_DOWNLOAD_POOL
+			});
+		}
+	};
+
+	onMount(async () => {
+		ollamaVersion = await getOllamaVersion(localStorage.token).catch((error) => false);
+	});
+
+	const cancelModelPullHandler = async (model: string) => {
+		const { reader, requestId } = $MODEL_DOWNLOAD_POOL[model];
+		if (reader) {
+			await reader.cancel();
+
+			await cancelOllamaRequest(localStorage.token, requestId);
+			delete $MODEL_DOWNLOAD_POOL[model];
+			MODEL_DOWNLOAD_POOL.set({
+				...$MODEL_DOWNLOAD_POOL
+			});
+			await deleteModel(localStorage.token, model);
+			toast.success(`${model} download has been canceled`);
+		}
+	};
+</script>
+
+<Select.Root
+	{items}
+	onOpenChange={async () => {
+		searchValue = '';
+		window.setTimeout(() => document.getElementById('model-search-input')?.focus(), 0);
+	}}
+	selected={items.find((item) => item.value === value) ?? ''}
+	onSelectedChange={(selectedItem) => {
+		value = selectedItem.value;
+	}}
+>
+	<Select.Trigger class="relative w-full" aria-label={placeholder}>
+		<Select.Value
+			class="flex text-left px-0.5 outline-none bg-transparent truncate text-lg font-semibold placeholder-gray-400  focus:outline-none"
+			{placeholder}
+		/>
+		<ChevronDown className="absolute end-2 top-1/2 -translate-y-[45%] size-3.5" strokeWidth="2.5" />
+	</Select.Trigger>
+	<Select.Content
+		class=" z-40 w-full rounded-lg  bg-white dark:bg-gray-900 dark:text-white shadow-lg border border-gray-300/30 dark:border-gray-700/50  outline-none"
+		transition={flyAndScale}
+		sideOffset={4}
+	>
+		<slot>
+			{#if searchEnabled}
+				<div class="flex items-center gap-2.5 px-5 mt-3.5 mb-3">
+					<Search className="size-4" strokeWidth="2.5" />
+
+					<input
+						id="model-search-input"
+						bind:value={searchValue}
+						class="w-full text-sm bg-transparent outline-none"
+						placeholder={searchPlaceholder}
+					/>
+				</div>
+
+				<hr class="border-gray-100 dark:border-gray-800" />
+			{/if}
+
+			<div class="px-3 my-2 max-h-72 overflow-y-auto">
+				{#each filteredItems as item}
+					<Select.Item
+						class="flex w-full font-medium line-clamp-1 select-none items-center rounded-button py-2 pl-3 pr-1.5 text-sm  text-gray-700 dark:text-gray-100  outline-none transition-all duration-75 hover:bg-gray-100 dark:hover:bg-gray-850 rounded-lg cursor-pointer data-[highlighted]:bg-muted"
+						value={item.value}
+						label={item.label}
+					>
+						<div class="flex items-center gap-2">
+							<div class="line-clamp-1">
+								{item.label}
+
+								<span class=" text-xs font-medium text-gray-600 dark:text-gray-400"
+									>{item.info?.details?.parameter_size ?? ''}</span
+								>
+							</div>
+
+							<!-- {JSON.stringify(item.info)} -->
+
+							{#if item.info.external}
+								<Tooltip content={item.info?.source ?? 'External'}>
+									<div class=" mr-2">
+										<svg
+											xmlns="http://www.w3.org/2000/svg"
+											viewBox="0 0 16 16"
+											fill="currentColor"
+											class="size-3"
+										>
+											<path
+												fill-rule="evenodd"
+												d="M8.914 6.025a.75.75 0 0 1 1.06 0 3.5 3.5 0 0 1 0 4.95l-2 2a3.5 3.5 0 0 1-5.396-4.402.75.75 0 0 1 1.251.827 2 2 0 0 0 3.085 2.514l2-2a2 2 0 0 0 0-2.828.75.75 0 0 1 0-1.06Z"
+												clip-rule="evenodd"
+											/>
+											<path
+												fill-rule="evenodd"
+												d="M7.086 9.975a.75.75 0 0 1-1.06 0 3.5 3.5 0 0 1 0-4.95l2-2a3.5 3.5 0 0 1 5.396 4.402.75.75 0 0 1-1.251-.827 2 2 0 0 0-3.085-2.514l-2 2a2 2 0 0 0 0 2.828.75.75 0 0 1 0 1.06Z"
+												clip-rule="evenodd"
+											/>
+										</svg>
+									</div>
+								</Tooltip>
+							{:else}
+								<Tooltip
+									content={`${
+										item.info?.details?.quantization_level
+											? item.info?.details?.quantization_level + ' '
+											: ''
+									}${item.info.size ? `(${(item.info.size / 1024 ** 3).toFixed(1)}GB)` : ''}`}
+								>
+									<div class=" mr-2">
+										<svg
+											xmlns="http://www.w3.org/2000/svg"
+											fill="none"
+											viewBox="0 0 24 24"
+											stroke-width="1.5"
+											stroke="currentColor"
+											class="w-4 h-4"
+										>
+											<path
+												stroke-linecap="round"
+												stroke-linejoin="round"
+												d="m11.25 11.25.041-.02a.75.75 0 0 1 1.063.852l-.708 2.836a.75.75 0 0 0 1.063.853l.041-.021M21 12a9 9 0 1 1-18 0 9 9 0 0 1 18 0Zm-9-3.75h.008v.008H12V8.25Z"
+											/>
+										</svg>
+									</div>
+								</Tooltip>
+							{/if}
+						</div>
+
+						{#if value === item.value}
+							<div class="ml-auto">
+								<Check />
+							</div>
+						{/if}
+					</Select.Item>
+				{:else}
+					<div>
+						<div class="block px-3 py-2 text-sm text-gray-700 dark:text-gray-100">
+							No results found
+						</div>
+					</div>
+				{/each}
+
+				{#if !(searchValue.trim() in $MODEL_DOWNLOAD_POOL) && searchValue && ollamaVersion && $user.role === 'admin'}
+					<button
+						class="flex w-full font-medium line-clamp-1 select-none items-center rounded-button py-2 pl-3 pr-1.5 text-sm text-gray-700 dark:text-gray-100 outline-none transition-all duration-75 hover:bg-gray-100 dark:hover:bg-gray-850 rounded-lg cursor-pointer data-[highlighted]:bg-muted"
+						on:click={() => {
+							pullModelHandler();
+						}}
+					>
+						Pull "{searchValue}" from Ollama.com
+					</button>
+				{/if}
+
+				{#each Object.keys($MODEL_DOWNLOAD_POOL) as model}
+					<div
+						class="flex w-full justify-between font-medium select-none rounded-button py-2 pl-3 pr-1.5 text-sm text-gray-700 dark:text-gray-100 outline-none transition-all duration-75 rounded-lg cursor-pointer data-[highlighted]:bg-muted"
+					>
+						<div class="flex">
+							<div class="-ml-2 mr-2.5 translate-y-0.5">
+								<svg
+									class="size-4"
+									viewBox="0 0 24 24"
+									fill="currentColor"
+									xmlns="http://www.w3.org/2000/svg"
+									><style>
+										.spinner_ajPY {
+											transform-origin: center;
+											animation: spinner_AtaB 0.75s infinite linear;
+										}
+										@keyframes spinner_AtaB {
+											100% {
+												transform: rotate(360deg);
+											}
+										}
+									</style><path
+										d="M12,1A11,11,0,1,0,23,12,11,11,0,0,0,12,1Zm0,19a8,8,0,1,1,8-8A8,8,0,0,1,12,20Z"
+										opacity=".25"
+									/><path
+										d="M10.14,1.16a11,11,0,0,0-9,8.92A1.59,1.59,0,0,0,2.46,12,1.52,1.52,0,0,0,4.11,10.7a8,8,0,0,1,6.66-6.61A1.42,1.42,0,0,0,12,2.69h0A1.57,1.57,0,0,0,10.14,1.16Z"
+										class="spinner_ajPY"
+									/></svg
+								>
+							</div>
+
+							<div class="flex flex-col self-start">
+								<div class="line-clamp-1">
+									Downloading "{model}" {'pullProgress' in $MODEL_DOWNLOAD_POOL[model]
+										? `(${$MODEL_DOWNLOAD_POOL[model].pullProgress}%)`
+										: ''}
+								</div>
+
+								{#if 'digest' in $MODEL_DOWNLOAD_POOL[model] && $MODEL_DOWNLOAD_POOL[model].digest}
+									<div class="-mt-1 h-fit text-[0.7rem] dark:text-gray-500 line-clamp-1">
+										{$MODEL_DOWNLOAD_POOL[model].digest}
+									</div>
+								{/if}
+							</div>
+						</div>
+
+						<div class="mr-2 translate-y-0.5">
+							<Tooltip content="Cancel">
+								<button
+									class="text-gray-800 dark:text-gray-100"
+									on:click={() => {
+										cancelModelPullHandler(model);
+									}}
+								>
+									<svg
+										class="w-4 h-4 text-gray-800 dark:text-white"
+										aria-hidden="true"
+										xmlns="http://www.w3.org/2000/svg"
+										width="24"
+										height="24"
+										fill="currentColor"
+										viewBox="0 0 24 24"
+									>
+										<path
+											stroke="currentColor"
+											stroke-linecap="round"
+											stroke-linejoin="round"
+											stroke-width="2"
+											d="M6 18 17.94 6M18 18 6.06 6"
+										/>
+									</svg>
+								</button>
+							</Tooltip>
+						</div>
+					</div>
+				{/each}
+			</div>
+		</slot>
+	</Select.Content>
+</Select.Root>

+ 13 - 9
src/lib/components/chat/Settings/About.svelte

@@ -4,7 +4,9 @@
 	import { WEBUI_VERSION } from '$lib/constants';
 	import { WEBUI_NAME, config, showChangelog } from '$lib/stores';
 	import { compareVersion } from '$lib/utils';
-	import { onMount } from 'svelte';
+	import { onMount, getContext } from 'svelte';
+
+	const i18n = getContext('i18n');
 
 	let ollamaVersion = '';
 
@@ -43,7 +45,8 @@
 		<div>
 			<div class=" mb-2.5 text-sm font-medium flex space-x-2 items-center">
 				<div>
-					{$WEBUI_NAME} Version
+					{$WEBUI_NAME}
+					{$i18n.t('Version')}
 				</div>
 			</div>
 			<div class="flex w-full justify-between items-center">
@@ -56,10 +59,10 @@
 							target="_blank"
 						>
 							{updateAvailable === null
-								? 'Checking for updates...'
+								? $i18n.t('Checking for updates...')
 								: updateAvailable
-								? `(v${version.latest} available!)`
-								: '(latest)'}
+								? `(v${version.latest} ${$i18n.t('available!')})`
+								: $i18n.t('(latest)')}
 						</a>
 					</div>
 
@@ -69,7 +72,7 @@
 							showChangelog.set(true);
 						}}
 					>
-						<div>See what's new</div>
+						<div>{$i18n.t("See what's new")}</div>
 					</button>
 				</div>
 
@@ -79,7 +82,7 @@
 						checkForVersionUpdates();
 					}}
 				>
-					Check for updates
+					{$i18n.t('Check for updates')}
 				</button>
 			</div>
 		</div>
@@ -88,7 +91,7 @@
 			<hr class=" dark:border-gray-700" />
 
 			<div>
-				<div class=" mb-2.5 text-sm font-medium">Ollama Version</div>
+				<div class=" mb-2.5 text-sm font-medium">{$i18n.t('Ollama Version')}</div>
 				<div class="flex w-full">
 					<div class="flex-1 text-xs text-gray-700 dark:text-gray-200">
 						{ollamaVersion ?? 'N/A'}
@@ -123,7 +126,8 @@
 		</div>
 
 		<div class="mt-2 text-xs text-gray-400 dark:text-gray-500">
-			Created by <a
+			{$i18n.t('Created by')}
+			<a
 				class=" text-gray-500 dark:text-gray-300 font-medium"
 				href="https://github.com/tjbck"
 				target="_blank">Timothy J. Baek</a

+ 9 - 7
src/lib/components/chat/Settings/Account.svelte

@@ -1,6 +1,6 @@
 <script lang="ts">
 	import { toast } from 'svelte-sonner';
-	import { onMount } from 'svelte';
+	import { onMount, getContext } from 'svelte';
 
 	import { user } from '$lib/stores';
 	import { updateUserProfile } from '$lib/apis/auths';
@@ -9,6 +9,8 @@
 	import { getGravatarUrl } from '$lib/apis/utils';
 	import { copyToClipboard } from '$lib/utils';
 
+	const i18n = getContext('i18n');
+
 	export let saveHandler: Function;
 
 	let profileImageUrl = '';
@@ -38,7 +40,7 @@
 </script>
 
 <div class="flex flex-col h-full justify-between text-sm">
-	<div class=" space-y-3 pr-1.5 overflow-y-scroll max-h-80">
+	<div class=" space-y-3 pr-1.5 overflow-y-scroll max-h-[22rem]">
 		<input
 			id="profile-image-input"
 			bind:this={profileImageInputElement}
@@ -101,7 +103,7 @@
 			}}
 		/>
 
-		<div class=" mb-2.5 text-sm font-medium">Profile</div>
+		<div class=" mb-2.5 text-sm font-medium">{$i18n.t('Profile')}</div>
 
 		<div class="flex space-x-5">
 			<div class="flex flex-col">
@@ -143,13 +145,13 @@
 						const url = await getGravatarUrl($user.email);
 
 						profileImageUrl = url;
-					}}>Use Gravatar</button
+					}}>{$i18n.t('Use Gravatar')}</button
 				>
 			</div>
 
 			<div class="flex-1">
 				<div class="flex flex-col w-full">
-					<div class=" mb-1 text-xs text-gray-500">Name</div>
+					<div class=" mb-1 text-xs text-gray-500">{$i18n.t('Name')}</div>
 
 					<div class="flex-1">
 						<input
@@ -170,7 +172,7 @@
 
 		<div class=" w-full justify-between">
 			<div class="flex w-full justify-between">
-				<div class=" self-center text-xs font-medium">JWT Token</div>
+				<div class=" self-center text-xs font-medium">{$i18n.t('JWT Token')}</div>
 			</div>
 
 			<div class="flex mt-2">
@@ -280,7 +282,7 @@
 				}
 			}}
 		>
-			Save
+			{$i18n.t('Save')}
 		</button>
 	</div>
 </div>

+ 10 - 7
src/lib/components/chat/Settings/Account/UpdatePassword.svelte

@@ -1,7 +1,10 @@
 <script lang="ts">
+	import { getContext } from 'svelte';
 	import { toast } from 'svelte-sonner';
 	import { updateUserPassword } from '$lib/apis/auths';
 
+	const i18n = getContext('i18n');
+
 	let show = false;
 	let currentPassword = '';
 	let newPassword = '';
@@ -17,7 +20,7 @@
 			);
 
 			if (res) {
-				toast.success('Successfully updated.');
+				toast.success($i18n.t('Successfully updated.'));
 			}
 
 			currentPassword = '';
@@ -40,20 +43,20 @@
 	}}
 >
 	<div class="flex justify-between items-center text-sm">
-		<div class="  font-medium">Change Password</div>
+		<div class="  font-medium">{$i18n.t('Change Password')}</div>
 		<button
 			class=" text-xs font-medium text-gray-500"
 			type="button"
 			on:click={() => {
 				show = !show;
-			}}>{show ? 'Hide' : 'Show'}</button
+			}}>{show ? $i18n.t('Hide') : $i18n.t('Show')}</button
 		>
 	</div>
 
 	{#if show}
 		<div class=" py-2.5 space-y-1.5">
 			<div class="flex flex-col w-full">
-				<div class=" mb-1 text-xs text-gray-500">Current Password</div>
+				<div class=" mb-1 text-xs text-gray-500">{$i18n.t('Current Password')}</div>
 
 				<div class="flex-1">
 					<input
@@ -67,7 +70,7 @@
 			</div>
 
 			<div class="flex flex-col w-full">
-				<div class=" mb-1 text-xs text-gray-500">New Password</div>
+				<div class=" mb-1 text-xs text-gray-500">{$i18n.t('New Password')}</div>
 
 				<div class="flex-1">
 					<input
@@ -81,7 +84,7 @@
 			</div>
 
 			<div class="flex flex-col w-full">
-				<div class=" mb-1 text-xs text-gray-500">Confirm Password</div>
+				<div class=" mb-1 text-xs text-gray-500">{$i18n.t('Confirm Password')}</div>
 
 				<div class="flex-1">
 					<input
@@ -99,7 +102,7 @@
 			<button
 				class=" px-4 py-2 text-xs bg-gray-800 hover:bg-gray-900 dark:bg-gray-700 dark:hover:bg-gray-800 text-gray-100 transition rounded-md font-medium"
 			>
-				Update password
+				{$i18n.t('Update password')}
 			</button>
 		</div>
 	{/if}

+ 13 - 11
src/lib/components/chat/Settings/Advanced.svelte

@@ -1,8 +1,10 @@
 <script lang="ts">
-	import { createEventDispatcher, onMount } from 'svelte';
+	import { createEventDispatcher, onMount, getContext } from 'svelte';
+	import AdvancedParams from './Advanced/AdvancedParams.svelte';
+
+	const i18n = getContext('i18n');
 	const dispatch = createEventDispatcher();
 
-	import AdvancedParams from './Advanced/AdvancedParams.svelte';
 	export let saveSettings: Function;
 
 	// Advanced
@@ -55,14 +57,14 @@
 
 <div class="flex flex-col h-full justify-between text-sm">
 	<div class=" space-y-3 pr-1.5 overflow-y-scroll max-h-80">
-		<div class=" text-sm font-medium">Parameters</div>
+		<div class=" text-sm font-medium">{$i18n.t('Parameters')}</div>
 
 		<AdvancedParams bind:options />
 		<hr class=" dark:border-gray-700" />
 
 		<div class=" py-1 w-full justify-between">
 			<div class="flex w-full justify-between">
-				<div class=" self-center text-xs font-medium">Keep Alive</div>
+				<div class=" self-center text-xs font-medium">{$i18n.t('Keep Alive')}</div>
 
 				<button
 					class="p-1 px-3 text-xs flex rounded transition"
@@ -72,9 +74,9 @@
 					}}
 				>
 					{#if keepAlive === null}
-						<span class="ml-2 self-center"> Default </span>
+						<span class="ml-2 self-center">{$i18n.t('Default')}</span>
 					{:else}
-						<span class="ml-2 self-center"> Custom </span>
+						<span class="ml-2 self-center">{$i18n.t('Custom')}</span>
 					{/if}
 				</button>
 			</div>
@@ -84,7 +86,7 @@
 					<input
 						class="w-full rounded py-1.5 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none border border-gray-100 dark:border-gray-600"
 						type="text"
-						placeholder={`e.g.) "30s","10m". Valid time units are "s", "m", "h".`}
+						placeholder={$i18n.t("e.g. '30s','10m'. Valid time units are 's', 'm', 'h'.")}
 						bind:value={keepAlive}
 					/>
 				</div>
@@ -93,7 +95,7 @@
 
 		<div>
 			<div class=" py-1 flex w-full justify-between">
-				<div class=" self-center text-sm font-medium">Request Mode</div>
+				<div class=" self-center text-sm font-medium">{$i18n.t('Request Mode')}</div>
 
 				<button
 					class="p-1 px-3 text-xs flex rounded transition"
@@ -102,7 +104,7 @@
 					}}
 				>
 					{#if requestFormat === ''}
-						<span class="ml-2 self-center"> Default </span>
+						<span class="ml-2 self-center"> {$i18n.t('Default')} </span>
 					{:else if requestFormat === 'json'}
 						<!-- <svg
                             xmlns="http://www.w3.org/2000/svg"
@@ -114,7 +116,7 @@
                                 d="M10 2a.75.75 0 01.75.75v1.5a.75.75 0 01-1.5 0v-1.5A.75.75 0 0110 2zM10 15a.75.75 0 01.75.75v1.5a.75.75 0 01-1.5 0v-1.5A.75.75 0 0110 15zM10 7a3 3 0 100 6 3 3 0 000-6zM15.657 5.404a.75.75 0 10-1.06-1.06l-1.061 1.06a.75.75 0 001.06 1.06l1.06-1.06zM6.464 14.596a.75.75 0 10-1.06-1.06l-1.06 1.06a.75.75 0 001.06 1.06l1.06-1.06zM18 10a.75.75 0 01-.75.75h-1.5a.75.75 0 010-1.5h1.5A.75.75 0 0118 10zM5 10a.75.75 0 01-.75.75h-1.5a.75.75 0 010-1.5h1.5A.75.75 0 015 10zM14.596 15.657a.75.75 0 001.06-1.06l-1.06-1.061a.75.75 0 10-1.06 1.06l1.06 1.06zM5.404 6.464a.75.75 0 001.06-1.06l-1.06-1.06a.75.75 0 10-1.061 1.06l1.06 1.06z"
                             />
                         </svg> -->
-						<span class="ml-2 self-center"> JSON </span>
+						<span class="ml-2 self-center">{$i18n.t('JSON')}</span>
 					{/if}
 				</button>
 			</div>
@@ -147,7 +149,7 @@
 				dispatch('save');
 			}}
 		>
-			Save
+			{$i18n.t('Save')}
 		</button>
 	</div>
 </div>

+ 40 - 36
src/lib/components/chat/Settings/Advanced/AdvancedParams.svelte

@@ -1,4 +1,8 @@
 <script lang="ts">
+	import { getContext } from 'svelte';
+
+	const i18n = getContext('i18n');
+
 	export let options = {
 		// Advanced
 		seed: 0,
@@ -20,7 +24,7 @@
 <div class=" space-y-3 text-xs">
 	<div>
 		<div class=" py-0.5 flex w-full justify-between">
-			<div class=" w-20 text-xs font-medium self-center">Seed</div>
+			<div class=" w-20 text-xs font-medium self-center">{$i18n.t('Seed')}</div>
 			<div class=" flex-1 self-center">
 				<input
 					class="w-full rounded py-1.5 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none border border-gray-100 dark:border-gray-600"
@@ -36,12 +40,12 @@
 
 	<div>
 		<div class=" py-0.5 flex w-full justify-between">
-			<div class=" w-20 text-xs font-medium self-center">Stop Sequence</div>
+			<div class=" w-20 text-xs font-medium self-center">{$i18n.t('Stop Sequence')}</div>
 			<div class=" flex-1 self-center">
 				<input
 					class="w-full rounded py-1.5 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none border border-gray-100 dark:border-gray-600"
 					type="text"
-					placeholder="Enter Stop Sequence"
+					placeholder={$i18n.t('Enter stop sequence')}
 					bind:value={options.stop}
 					autocomplete="off"
 				/>
@@ -51,7 +55,7 @@
 
 	<div class=" py-0.5 w-full justify-between">
 		<div class="flex w-full justify-between">
-			<div class=" self-center text-xs font-medium">Temperature</div>
+			<div class=" self-center text-xs font-medium">{$i18n.t('Temperature')}</div>
 
 			<button
 				class="p-1 px-3 text-xs flex rounded transition"
@@ -61,9 +65,9 @@
 				}}
 			>
 				{#if options.temperature === ''}
-					<span class="ml-2 self-center"> Default </span>
+					<span class="ml-2 self-center"> {$i18n.t('Default')} </span>
 				{:else}
-					<span class="ml-2 self-center"> Custom </span>
+					<span class="ml-2 self-center"> {$i18n.t('Custom')} </span>
 				{/if}
 			</button>
 		</div>
@@ -97,7 +101,7 @@
 
 	<div class=" py-0.5 w-full justify-between">
 		<div class="flex w-full justify-between">
-			<div class=" self-center text-xs font-medium">Mirostat</div>
+			<div class=" self-center text-xs font-medium">{$i18n.t('Mirostat')}</div>
 
 			<button
 				class="p-1 px-3 text-xs flex rounded transition"
@@ -107,9 +111,9 @@
 				}}
 			>
 				{#if options.mirostat === ''}
-					<span class="ml-2 self-center"> Default </span>
+					<span class="ml-2 self-center">{$i18n.t('Default')}</span>
 				{:else}
-					<span class="ml-2 self-center"> Custom </span>
+					<span class="ml-2 self-center">{$i18n.t('Default')}</span>
 				{/if}
 			</button>
 		</div>
@@ -143,7 +147,7 @@
 
 	<div class=" py-0.5 w-full justify-between">
 		<div class="flex w-full justify-between">
-			<div class=" self-center text-xs font-medium">Mirostat Eta</div>
+			<div class=" self-center text-xs font-medium">{$i18n.t('Mirostat Eta')}</div>
 
 			<button
 				class="p-1 px-3 text-xs flex rounded transition"
@@ -153,9 +157,9 @@
 				}}
 			>
 				{#if options.mirostat_eta === ''}
-					<span class="ml-2 self-center"> Default </span>
+					<span class="ml-2 self-center">{$i18n.t('Default')}</span>
 				{:else}
-					<span class="ml-2 self-center"> Custom </span>
+					<span class="ml-2 self-center">{$i18n.t('Default')}</span>
 				{/if}
 			</button>
 		</div>
@@ -189,7 +193,7 @@
 
 	<div class=" py-0.5 w-full justify-between">
 		<div class="flex w-full justify-between">
-			<div class=" self-center text-xs font-medium">Mirostat Tau</div>
+			<div class=" self-center text-xs font-medium">{$i18n.t('Mirostat Tau')}</div>
 
 			<button
 				class="p-1 px-3 text-xs flex rounded transition"
@@ -199,9 +203,9 @@
 				}}
 			>
 				{#if options.mirostat_tau === ''}
-					<span class="ml-2 self-center"> Default </span>
+					<span class="ml-2 self-center">{$i18n.t('Default')}</span>
 				{:else}
-					<span class="ml-2 self-center"> Custom </span>
+					<span class="ml-2 self-center">{$i18n.t('Custom')}</span>
 				{/if}
 			</button>
 		</div>
@@ -235,7 +239,7 @@
 
 	<div class=" py-0.5 w-full justify-between">
 		<div class="flex w-full justify-between">
-			<div class=" self-center text-xs font-medium">Top K</div>
+			<div class=" self-center text-xs font-medium">{$i18n.t('Top K')}</div>
 
 			<button
 				class="p-1 px-3 text-xs flex rounded transition"
@@ -245,9 +249,9 @@
 				}}
 			>
 				{#if options.top_k === ''}
-					<span class="ml-2 self-center"> Default </span>
+					<span class="ml-2 self-center">{$i18n.t('Default')}</span>
 				{:else}
-					<span class="ml-2 self-center"> Custom </span>
+					<span class="ml-2 self-center">{$i18n.t('Default')}</span>
 				{/if}
 			</button>
 		</div>
@@ -281,7 +285,7 @@
 
 	<div class=" py-0.5 w-full justify-between">
 		<div class="flex w-full justify-between">
-			<div class=" self-center text-xs font-medium">Top P</div>
+			<div class=" self-center text-xs font-medium">{$i18n.t('Top P')}</div>
 
 			<button
 				class="p-1 px-3 text-xs flex rounded transition"
@@ -291,9 +295,9 @@
 				}}
 			>
 				{#if options.top_p === ''}
-					<span class="ml-2 self-center"> Default </span>
+					<span class="ml-2 self-center">{$i18n.t('Default')}</span>
 				{:else}
-					<span class="ml-2 self-center"> Custom </span>
+					<span class="ml-2 self-center">{$i18n.t('Default')}</span>
 				{/if}
 			</button>
 		</div>
@@ -327,7 +331,7 @@
 
 	<div class=" py-0.5 w-full justify-between">
 		<div class="flex w-full justify-between">
-			<div class=" self-center text-xs font-medium">Repeat Penalty</div>
+			<div class=" self-center text-xs font-medium">{$i18n.t('Repeat Penalty')}</div>
 
 			<button
 				class="p-1 px-3 text-xs flex rounded transition"
@@ -337,9 +341,9 @@
 				}}
 			>
 				{#if options.repeat_penalty === ''}
-					<span class="ml-2 self-center"> Default </span>
+					<span class="ml-2 self-center">{$i18n.t('Default')}</span>
 				{:else}
-					<span class="ml-2 self-center"> Custom </span>
+					<span class="ml-2 self-center">{$i18n.t('Default')}</span>
 				{/if}
 			</button>
 		</div>
@@ -373,7 +377,7 @@
 
 	<div class=" py-0.5 w-full justify-between">
 		<div class="flex w-full justify-between">
-			<div class=" self-center text-xs font-medium">Repeat Last N</div>
+			<div class=" self-center text-xs font-medium">{$i18n.t('Repeat Last N')}</div>
 
 			<button
 				class="p-1 px-3 text-xs flex rounded transition"
@@ -383,9 +387,9 @@
 				}}
 			>
 				{#if options.repeat_last_n === ''}
-					<span class="ml-2 self-center"> Default </span>
+					<span class="ml-2 self-center">{$i18n.t('Default')}</span>
 				{:else}
-					<span class="ml-2 self-center"> Custom </span>
+					<span class="ml-2 self-center">{$i18n.t('Default')}</span>
 				{/if}
 			</button>
 		</div>
@@ -419,7 +423,7 @@
 
 	<div class=" py-0.5 w-full justify-between">
 		<div class="flex w-full justify-between">
-			<div class=" self-center text-xs font-medium">Tfs Z</div>
+			<div class=" self-center text-xs font-medium">{$i18n.t('Tfs Z')}</div>
 
 			<button
 				class="p-1 px-3 text-xs flex rounded transition"
@@ -429,9 +433,9 @@
 				}}
 			>
 				{#if options.tfs_z === ''}
-					<span class="ml-2 self-center"> Default </span>
+					<span class="ml-2 self-center">{$i18n.t('Default')}</span>
 				{:else}
-					<span class="ml-2 self-center"> Custom </span>
+					<span class="ml-2 self-center">{$i18n.t('Default')}</span>
 				{/if}
 			</button>
 		</div>
@@ -465,7 +469,7 @@
 
 	<div class=" py-0.5 w-full justify-between">
 		<div class="flex w-full justify-between">
-			<div class=" self-center text-xs font-medium">Context Length</div>
+			<div class=" self-center text-xs font-medium">{$i18n.t('Context Length')}</div>
 
 			<button
 				class="p-1 px-3 text-xs flex rounded transition"
@@ -475,9 +479,9 @@
 				}}
 			>
 				{#if options.num_ctx === ''}
-					<span class="ml-2 self-center"> Default </span>
+					<span class="ml-2 self-center">{$i18n.t('Default')}</span>
 				{:else}
-					<span class="ml-2 self-center"> Custom </span>
+					<span class="ml-2 self-center">{$i18n.t('Default')}</span>
 				{/if}
 			</button>
 		</div>
@@ -510,7 +514,7 @@
 	</div>
 	<div class=" py-0.5 w-full justify-between">
 		<div class="flex w-full justify-between">
-			<div class=" self-center text-xs font-medium">Max Tokens</div>
+			<div class=" self-center text-xs font-medium">{$i18n.t('Max Tokens')}</div>
 
 			<button
 				class="p-1 px-3 text-xs flex rounded transition"
@@ -520,9 +524,9 @@
 				}}
 			>
 				{#if options.num_predict === ''}
-					<span class="ml-2 self-center"> Default </span>
+					<span class="ml-2 self-center">{$i18n.t('Default')}</span>
 				{:else}
-					<span class="ml-2 self-center"> Custom </span>
+					<span class="ml-2 self-center">{$i18n.t('Default')}</span>
 				{/if}
 			</button>
 		</div>

+ 33 - 25
src/lib/components/chat/Settings/Audio.svelte

@@ -1,8 +1,10 @@
 <script lang="ts">
-	import { createEventDispatcher, onMount } from 'svelte';
+	import { createEventDispatcher, onMount, getContext } from 'svelte';
 	import { toast } from 'svelte-sonner';
 	const dispatch = createEventDispatcher();
 
+	const i18n = getContext('i18n');
+
 	export let saveSettings: Function;
 
 	// Audio
@@ -101,32 +103,36 @@
 >
 	<div class=" space-y-3 pr-1.5 overflow-y-scroll max-h-80">
 		<div>
-			<div class=" mb-1 text-sm font-medium">STT Settings</div>
+			<div class=" mb-1 text-sm font-medium">{$i18n.t('STT Settings')}</div>
 
 			<div class=" py-0.5 flex w-full justify-between">
-				<div class=" self-center text-xs font-medium">Speech-to-Text Engine</div>
+				<div class=" self-center text-xs font-medium">{$i18n.t('Speech-to-Text Engine')}</div>
 				<div class="flex items-center relative">
 					<select
-						class="w-fit pr-8 rounded px-2 p-1 text-xs bg-transparent outline-none text-right"
+						class="dark:bg-gray-900 w-fit pr-8 rounded px-2 p-1 text-xs bg-transparent outline-none text-right"
 						bind:value={STTEngine}
 						placeholder="Select a mode"
 						on:change={(e) => {
 							if (e.target.value !== '') {
 								navigator.mediaDevices.getUserMedia({ audio: true }).catch(function (err) {
-									toast.error(`Permission denied when accessing microphone: ${err}`);
+									toast.error(
+										$i18n.t(`Permission denied when accessing microphone: {{error}}`, {
+											error: err
+										})
+									);
 									STTEngine = '';
 								});
 							}
 						}}
 					>
-						<option value="">Default (Web API)</option>
-						<option value="whisper-local">Whisper (Local)</option>
+						<option value="">{$i18n.t('Default (Web API)')}</option>
+						<option value="whisper-local">{$i18n.t('Whisper (Local)')}</option>
 					</select>
 				</div>
 			</div>
 
 			<div class=" py-0.5 flex w-full justify-between">
-				<div class=" self-center text-xs font-medium">Conversation Mode</div>
+				<div class=" self-center text-xs font-medium">{$i18n.t('Conversation Mode')}</div>
 
 				<button
 					class="p-1 px-3 text-xs flex rounded transition"
@@ -136,15 +142,17 @@
 					type="button"
 				>
 					{#if conversationMode === true}
-						<span class="ml-2 self-center">On</span>
+						<span class="ml-2 self-center">{$i18n.t('On')}</span>
 					{:else}
-						<span class="ml-2 self-center">Off</span>
+						<span class="ml-2 self-center">{$i18n.t('Off')}</span>
 					{/if}
 				</button>
 			</div>
 
 			<div class=" py-0.5 flex w-full justify-between">
-				<div class=" self-center text-xs font-medium">Auto-send input after 3 sec.</div>
+				<div class=" self-center text-xs font-medium">
+					{$i18n.t('Auto-send input after 3 sec.')}
+				</div>
 
 				<button
 					class="p-1 px-3 text-xs flex rounded transition"
@@ -154,22 +162,22 @@
 					type="button"
 				>
 					{#if speechAutoSend === true}
-						<span class="ml-2 self-center">On</span>
+						<span class="ml-2 self-center">{$i18n.t('On')}</span>
 					{:else}
-						<span class="ml-2 self-center">Off</span>
+						<span class="ml-2 self-center">{$i18n.t('Off')}</span>
 					{/if}
 				</button>
 			</div>
 		</div>
 
 		<div>
-			<div class=" mb-1 text-sm font-medium">TTS Settings</div>
+			<div class=" mb-1 text-sm font-medium">{$i18n.t('TTS Settings')}</div>
 
 			<div class=" py-0.5 flex w-full justify-between">
-				<div class=" self-center text-xs font-medium">Text-to-Speech Engine</div>
+				<div class=" self-center text-xs font-medium">{$i18n.t('Text-to-Speech Engine')}</div>
 				<div class="flex items-center relative">
 					<select
-						class="w-fit pr-8 rounded px-2 p-1 text-xs bg-transparent outline-none text-right"
+						class=" dark:bg-gray-900 w-fit pr-8 rounded px-2 p-1 text-xs bg-transparent outline-none text-right"
 						bind:value={TTSEngine}
 						placeholder="Select a mode"
 						on:change={(e) => {
@@ -182,14 +190,14 @@
 							}
 						}}
 					>
-						<option value="">Default (Web API)</option>
-						<option value="openai">Open AI</option>
+						<option value="">{$i18n.t('Default (Web API)')}</option>
+						<option value="openai">{$i18n.t('Open AI')}</option>
 					</select>
 				</div>
 			</div>
 
 			<div class=" py-0.5 flex w-full justify-between">
-				<div class=" self-center text-xs font-medium">Auto-playback response</div>
+				<div class=" self-center text-xs font-medium">{$i18n.t('Auto-playback response')}</div>
 
 				<button
 					class="p-1 px-3 text-xs flex rounded transition"
@@ -199,9 +207,9 @@
 					type="button"
 				>
 					{#if responseAutoPlayback === true}
-						<span class="ml-2 self-center">On</span>
+						<span class="ml-2 self-center">{$i18n.t('On')}</span>
 					{:else}
-						<span class="ml-2 self-center">Off</span>
+						<span class="ml-2 self-center">{$i18n.t('Off')}</span>
 					{/if}
 				</button>
 			</div>
@@ -211,7 +219,7 @@
 
 		{#if TTSEngine === ''}
 			<div>
-				<div class=" mb-2.5 text-sm font-medium">Set Voice</div>
+				<div class=" mb-2.5 text-sm font-medium">{$i18n.t('Set Voice')}</div>
 				<div class="flex w-full">
 					<div class="flex-1">
 						<select
@@ -219,7 +227,7 @@
 							bind:value={speaker}
 							placeholder="Select a voice"
 						>
-							<option value="" selected>Default</option>
+							<option value="" selected>{$i18n.t('Default')}</option>
 							{#each voices.filter((v) => v.localService === true) as voice}
 								<option value={voice.name} class="bg-gray-100 dark:bg-gray-700">{voice.name}</option
 								>
@@ -230,7 +238,7 @@
 			</div>
 		{:else if TTSEngine === 'openai'}
 			<div>
-				<div class=" mb-2.5 text-sm font-medium">Set Voice</div>
+				<div class=" mb-2.5 text-sm font-medium">{$i18n.t('Set Voice')}</div>
 				<div class="flex w-full">
 					<div class="flex-1">
 						<select
@@ -254,7 +262,7 @@
 			class=" px-4 py-2 bg-emerald-700 hover:bg-emerald-800 text-gray-100 transition rounded-lg"
 			type="submit"
 		>
-			Save
+			{$i18n.t('Save')}
 		</button>
 	</div>
 </form>

+ 14 - 43
src/lib/components/chat/Settings/Chats.svelte

@@ -2,7 +2,6 @@
 	import fileSaver from 'file-saver';
 	const { saveAs } = fileSaver;
 
-	import { resetVectorDB } from '$lib/apis/rag';
 	import { chats, user } from '$lib/stores';
 
 	import {
@@ -13,10 +12,12 @@
 		getChatList
 	} from '$lib/apis/chats';
 	import { getImportOrigin, convertOpenAIChats } from '$lib/utils';
-	import { onMount } from 'svelte';
+	import { onMount, getContext } from 'svelte';
 	import { goto } from '$app/navigation';
 	import { toast } from 'svelte-sonner';
 
+	const i18n = getContext('i18n');
+
 	export let saveSettings: Function;
 	// Chats
 	let saveChatHistory = true;
@@ -99,13 +100,13 @@
 	});
 </script>
 
-<div class="flex flex-col h-full justify-between space-y-3 text-sm">
+<div class="flex flex-col h-full justify-between space-y-3 text-sm max-h-[22rem]">
 	<div class=" space-y-2">
 		<div
 			class="flex flex-col justify-between rounded-md items-center py-2 px-3.5 w-full transition"
 		>
 			<div class="flex w-full justify-between">
-				<div class=" self-center text-sm font-medium">Chat History</div>
+				<div class=" self-center text-sm font-medium">{$i18n.t('Chat History')}</div>
 
 				<button
 					class="p-1 px-3 text-xs flex rounded transition"
@@ -129,7 +130,7 @@
 							/>
 						</svg>
 
-						<span class="ml-2 self-center"> On </span>
+						<span class="ml-2 self-center"> {$i18n.t('On')} </span>
 					{:else}
 						<svg
 							xmlns="http://www.w3.org/2000/svg"
@@ -147,13 +148,13 @@
 							/>
 						</svg>
 
-						<span class="ml-2 self-center">Off</span>
+						<span class="ml-2 self-center">{$i18n.t('Off')}</span>
 					{/if}
 				</button>
 			</div>
 
 			<div class="text-xs text-left w-full font-medium mt-0.5">
-				This setting does not sync across browsers or devices.
+				{$i18n.t('This setting does not sync across browsers or devices.')}
 			</div>
 		</div>
 
@@ -188,7 +189,7 @@
 						/>
 					</svg>
 				</div>
-				<div class=" self-center text-sm font-medium">Import Chats</div>
+				<div class=" self-center text-sm font-medium">{$i18n.t('Import Chats')}</div>
 			</button>
 			<button
 				class=" flex rounded-md py-2 px-3.5 w-full hover:bg-gray-200 dark:hover:bg-gray-800 transition"
@@ -210,7 +211,7 @@
 						/>
 					</svg>
 				</div>
-				<div class=" self-center text-sm font-medium">Export Chats</div>
+				<div class=" self-center text-sm font-medium">{$i18n.t('Export Chats')}</div>
 			</button>
 		</div>
 
@@ -232,7 +233,7 @@
 							clip-rule="evenodd"
 						/>
 					</svg>
-					<span>Are you sure?</span>
+					<span>{$i18n.t('Are you sure?')}</span>
 				</div>
 
 				<div class="flex space-x-1.5 items-center">
@@ -296,7 +297,7 @@
 						/>
 					</svg>
 				</div>
-				<div class=" self-center text-sm font-medium">Delete Chats</div>
+				<div class=" self-center text-sm font-medium">{$i18n.t('Delete Chats')}</div>
 			</button>
 		{/if}
 
@@ -324,39 +325,9 @@
 						/>
 					</svg>
 				</div>
-				<div class=" self-center text-sm font-medium">Export All Chats (All Users)</div>
-			</button>
-
-			<hr class=" dark:border-gray-700" />
-
-			<button
-				class=" flex rounded-md py-2 px-3.5 w-full hover:bg-gray-200 dark:hover:bg-gray-800 transition"
-				on:click={() => {
-					const res = resetVectorDB(localStorage.token).catch((error) => {
-						toast.error(error);
-						return null;
-					});
-
-					if (res) {
-						toast.success('Success');
-					}
-				}}
-			>
-				<div class=" self-center mr-3">
-					<svg
-						xmlns="http://www.w3.org/2000/svg"
-						viewBox="0 0 16 16"
-						fill="currentColor"
-						class="w-4 h-4"
-					>
-						<path
-							fill-rule="evenodd"
-							d="M3.5 2A1.5 1.5 0 0 0 2 3.5v9A1.5 1.5 0 0 0 3.5 14h9a1.5 1.5 0 0 0 1.5-1.5v-7A1.5 1.5 0 0 0 12.5 4H9.621a1.5 1.5 0 0 1-1.06-.44L7.439 2.44A1.5 1.5 0 0 0 6.38 2H3.5Zm6.75 7.75a.75.75 0 0 0 0-1.5h-4.5a.75.75 0 0 0 0 1.5h4.5Z"
-							clip-rule="evenodd"
-						/>
-					</svg>
+				<div class=" self-center text-sm font-medium">
+					{$i18n.t('Export All Chats (All Users)')}
 				</div>
-				<div class=" self-center text-sm font-medium">Reset Vector Storage</div>
 			</button>
 		{/if}
 	</div>

+ 16 - 13
src/lib/components/chat/Settings/Connections.svelte

@@ -1,6 +1,6 @@
 <script lang="ts">
 	import { models, user } from '$lib/stores';
-	import { createEventDispatcher, onMount } from 'svelte';
+	import { createEventDispatcher, onMount, getContext } from 'svelte';
 	const dispatch = createEventDispatcher();
 
 	import { getOllamaUrls, getOllamaVersion, updateOllamaUrls } from '$lib/apis/ollama';
@@ -12,6 +12,8 @@
 	} from '$lib/apis/openai';
 	import { toast } from 'svelte-sonner';
 
+	const i18n = getContext('i18n');
+
 	export let getModels: Function;
 
 	// External
@@ -42,7 +44,7 @@
 		});
 
 		if (ollamaVersion) {
-			toast.success('Server connection verified');
+			toast.success($i18n.t('Server connection verified'));
 			await models.set(await getModels());
 		}
 	};
@@ -63,17 +65,17 @@
 		dispatch('save');
 	}}
 >
-	<div class="  pr-1.5 overflow-y-scroll max-h-[20.5rem] space-y-3">
+	<div class="  pr-1.5 overflow-y-scroll max-h-[22rem] space-y-3">
 		<div class=" space-y-3">
 			<div class="mt-2 space-y-2 pr-1.5">
 				<div class="flex justify-between items-center text-sm">
-					<div class="  font-medium">OpenAI API</div>
+					<div class="  font-medium">{$i18n.t('OpenAI API')}</div>
 					<button
 						class=" text-xs font-medium text-gray-500"
 						type="button"
 						on:click={() => {
 							showOpenAI = !showOpenAI;
-						}}>{showOpenAI ? 'Hide' : 'Show'}</button
+						}}>{showOpenAI ? $i18n.t('Hide') : $i18n.t('Show')}</button
 					>
 				</div>
 
@@ -84,7 +86,7 @@
 								<div class="flex-1">
 									<input
 										class="w-full rounded-lg py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
-										placeholder="API Base URL"
+										placeholder={$i18n.t('API Base URL')}
 										bind:value={url}
 										autocomplete="off"
 									/>
@@ -93,7 +95,7 @@
 								<div class="flex-1">
 									<input
 										class="w-full rounded-lg py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
-										placeholder="API Key"
+										placeholder={$i18n.t('API Key')}
 										bind:value={OPENAI_API_KEYS[idx]}
 										autocomplete="off"
 									/>
@@ -143,7 +145,8 @@
 								</div>
 							</div>
 							<div class=" mb-1 text-xs text-gray-400 dark:text-gray-500">
-								WebUI will make requests to <span class=" text-gray-200">'{url}/models'</span>
+								{$i18n.t('WebUI will make requests to')}
+								<span class=" text-gray-200">'{url}/models'</span>
 							</div>
 						{/each}
 					</div>
@@ -154,7 +157,7 @@
 		<hr class=" dark:border-gray-700" />
 
 		<div>
-			<div class=" mb-2.5 text-sm font-medium">Ollama Base URL</div>
+			<div class=" mb-2.5 text-sm font-medium">{$i18n.t('Ollama Base URL')}</div>
 			<div class="flex w-full gap-1.5">
 				<div class="flex-1 flex flex-col gap-2">
 					{#each OLLAMA_BASE_URLS as url, idx}
@@ -233,13 +236,13 @@
 			</div>
 
 			<div class="mt-2 text-xs text-gray-400 dark:text-gray-500">
-				Trouble accessing Ollama?
+				{$i18n.t('Trouble accessing Ollama?')}
 				<a
-					class=" text-gray-300 font-medium"
+					class=" text-gray-300 font-medium underline"
 					href="https://github.com/open-webui/open-webui#troubleshooting"
 					target="_blank"
 				>
-					Click here for help.
+					{$i18n.t('Click here for help.')}
 				</a>
 			</div>
 		</div>
@@ -250,7 +253,7 @@
 			class="  px-4 py-2 bg-emerald-700 hover:bg-emerald-800 text-gray-100 transition rounded-lg"
 			type="submit"
 		>
-			Save
+			{$i18n.t('Save')}
 		</button>
 	</div>
 </form>

+ 106 - 72
src/lib/components/chat/Settings/General.svelte

@@ -1,9 +1,12 @@
 <script lang="ts">
 	import { toast } from 'svelte-sonner';
-	import { createEventDispatcher, onMount } from 'svelte';
+	import { createEventDispatcher, onMount, getContext } from 'svelte';
+	import { getLanguages } from '$lib/i18n';
 	const dispatch = createEventDispatcher();
 
-	import { models, user } from '$lib/stores';
+	import { models, user, theme } from '$lib/stores';
+
+	const i18n = getContext('i18n');
 
 	import AdvancedParams from './Advanced/AdvancedParams.svelte';
 
@@ -11,8 +14,11 @@
 	export let getModels: Function;
 
 	// General
-	let themes = ['dark', 'light', 'rose-pine dark', 'rose-pine-dawn light'];
-	let theme = 'dark';
+	let themes = ['dark', 'light', 'rose-pine dark', 'rose-pine-dawn light', 'oled-dark'];
+	let selectedTheme = 'system';
+
+	let languages = [];
+	let lang = $i18n.language;
 	let notificationEnabled = false;
 	let system = '';
 
@@ -63,9 +69,11 @@
 	};
 
 	onMount(async () => {
+		selectedTheme = localStorage.theme ?? 'system';
+
 		let settings = JSON.parse(localStorage.getItem('settings') ?? '{}');
+		languages = await getLanguages();
 
-		theme = localStorage.theme ?? 'dark';
 		notificationEnabled = settings.notificationEnabled ?? false;
 		system = settings.system ?? '';
 
@@ -81,77 +89,103 @@
 		options = { ...options, ...settings.options };
 		options.stop = (settings?.options?.stop ?? []).join(',');
 	});
+
+	const applyTheme = (_theme: string) => {
+		let themeToApply = _theme === 'oled-dark' ? 'dark' : _theme;
+
+		if (_theme === 'system') {
+			themeToApply = window.matchMedia('(prefers-color-scheme: dark)').matches ? 'dark' : 'light';
+		}
+
+		if (themeToApply === 'dark' && !_theme.includes('oled')) {
+			document.documentElement.style.setProperty('--color-gray-900', '#171717');
+			document.documentElement.style.setProperty('--color-gray-950', '#0d0d0d');
+		}
+
+		themes
+			.filter((e) => e !== themeToApply)
+			.forEach((e) => {
+				e.split(' ').forEach((e) => {
+					document.documentElement.classList.remove(e);
+				});
+			});
+
+		themeToApply.split(' ').forEach((e) => {
+			document.documentElement.classList.add(e);
+		});
+
+		console.log(_theme);
+	};
+
+	const themeChangeHandler = (_theme: string) => {
+		theme.set(_theme);
+		localStorage.setItem('theme', _theme);
+		if (_theme.includes('oled')) {
+			document.documentElement.style.setProperty('--color-gray-900', '#000000');
+			document.documentElement.style.setProperty('--color-gray-950', '#000000');
+			document.documentElement.classList.add('dark');
+		}
+		applyTheme(_theme);
+	};
 </script>
 
 <div class="flex flex-col h-full justify-between text-sm">
-	<div class="  pr-1.5 overflow-y-scroll max-h-[20.5rem]">
+	<div class="  pr-1.5 overflow-y-scroll max-h-[22rem]">
 		<div class="">
-			<div class=" mb-1 text-sm font-medium">WebUI Settings</div>
+			<div class=" mb-1 text-sm font-medium">{$i18n.t('WebUI Settings')}</div>
 
-			<div class=" py-0.5 flex w-full justify-between">
-				<div class=" self-center text-xs font-medium">Theme</div>
+			<div class="flex w-full justify-between">
+				<div class=" self-center text-xs font-medium">{$i18n.t('Theme')}</div>
 				<div class="flex items-center relative">
-					<div class=" absolute right-16">
-						{#if theme === 'dark'}
-							<svg
-								xmlns="http://www.w3.org/2000/svg"
-								viewBox="0 0 20 20"
-								fill="currentColor"
-								class="w-4 h-4"
-							>
-								<path
-									fill-rule="evenodd"
-									d="M7.455 2.004a.75.75 0 01.26.77 7 7 0 009.958 7.967.75.75 0 011.067.853A8.5 8.5 0 116.647 1.921a.75.75 0 01.808.083z"
-									clip-rule="evenodd"
-								/>
-							</svg>
-						{:else if theme === 'light'}
-							<svg
-								xmlns="http://www.w3.org/2000/svg"
-								viewBox="0 0 20 20"
-								fill="currentColor"
-								class="w-4 h-4 self-center"
-							>
-								<path
-									d="M10 2a.75.75 0 01.75.75v1.5a.75.75 0 01-1.5 0v-1.5A.75.75 0 0110 2zM10 15a.75.75 0 01.75.75v1.5a.75.75 0 01-1.5 0v-1.5A.75.75 0 0110 15zM10 7a3 3 0 100 6 3 3 0 000-6zM15.657 5.404a.75.75 0 10-1.06-1.06l-1.061 1.06a.75.75 0 001.06 1.06l1.06-1.06zM6.464 14.596a.75.75 0 10-1.06-1.06l-1.06 1.06a.75.75 0 001.06 1.06l1.06-1.06zM18 10a.75.75 0 01-.75.75h-1.5a.75.75 0 010-1.5h1.5A.75.75 0 0118 10zM5 10a.75.75 0 01-.75.75h-1.5a.75.75 0 010-1.5h1.5A.75.75 0 015 10zM14.596 15.657a.75.75 0 001.06-1.06l-1.06-1.061a.75.75 0 10-1.06 1.06l1.06 1.06zM5.404 6.464a.75.75 0 001.06-1.06l-1.06-1.06a.75.75 0 10-1.061 1.06l1.06 1.06z"
-								/>
-							</svg>
-						{/if}
-					</div>
-
 					<select
-						class="w-fit pr-8 rounded py-2 px-2 text-xs bg-transparent outline-none text-right"
-						bind:value={theme}
+						class=" dark:bg-gray-900 w-fit pr-8 rounded py-2 px-2 text-xs bg-transparent outline-none text-right"
+						bind:value={selectedTheme}
 						placeholder="Select a theme"
-						on:change={(e) => {
-							localStorage.theme = theme;
-
-							themes
-								.filter((e) => e !== theme)
-								.forEach((e) => {
-									e.split(' ').forEach((e) => {
-										document.documentElement.classList.remove(e);
-									});
-								});
-
-							theme.split(' ').forEach((e) => {
-								document.documentElement.classList.add(e);
-							});
+						on:change={() => themeChangeHandler(selectedTheme)}
+					>
+						<option value="system">⚙️ {$i18n.t('System')}</option>
+						<option value="dark">🌑 {$i18n.t('Dark')}</option>
+						<option value="oled-dark">🌃 {$i18n.t('OLED Dark')}</option>
+						<option value="light">☀️ {$i18n.t('Light')}</option>
+						<option value="rose-pine dark">🪻 {$i18n.t('Rosé Pine')}</option>
+						<option value="rose-pine-dawn light">🌷 {$i18n.t('Rosé Pine Dawn')}</option>
+					</select>
+				</div>
+			</div>
 
-							console.log(theme);
+			<div class=" flex w-full justify-between">
+				<div class=" self-center text-xs font-medium">{$i18n.t('Language')}</div>
+				<div class="flex items-center relative">
+					<select
+						class=" dark:bg-gray-900 w-fit pr-8 rounded py-2 px-2 text-xs bg-transparent outline-none text-right"
+						bind:value={lang}
+						placeholder="Select a language"
+						on:change={(e) => {
+							$i18n.changeLanguage(lang);
 						}}
 					>
-						<option value="dark">Dark</option>
-						<option value="light">Light</option>
-						<option value="rose-pine dark">Rosé Pine</option>
-						<option value="rose-pine-dawn light">Rosé Pine Dawn</option>
+						{#each languages as language}
+							<option value={language['code']}>{language['title']}</option>
+						{/each}
 					</select>
 				</div>
 			</div>
+			{#if $i18n.language === 'en-US'}
+				<div class="mb-2 text-xs text-gray-400 dark:text-gray-500">
+					Couldn't find your language?
+					<a
+						class=" text-gray-300 font-medium underline"
+						href="https://github.com/open-webui/open-webui/blob/main/docs/CONTRIBUTING.md#-translations-and-internationalization"
+						target="_blank"
+					>
+						Help us translate Open WebUI!
+					</a>
+				</div>
+			{/if}
 
 			<div>
 				<div class=" py-0.5 flex w-full justify-between">
-					<div class=" self-center text-xs font-medium">Notification</div>
+					<div class=" self-center text-xs font-medium">{$i18n.t('Desktop Notifications')}</div>
 
 					<button
 						class="p-1 px-3 text-xs flex rounded transition"
@@ -161,9 +195,9 @@
 						type="button"
 					>
 						{#if notificationEnabled === true}
-							<span class="ml-2 self-center">On</span>
+							<span class="ml-2 self-center">{$i18n.t('On')}</span>
 						{:else}
-							<span class="ml-2 self-center">Off</span>
+							<span class="ml-2 self-center">{$i18n.t('Off')}</span>
 						{/if}
 					</button>
 				</div>
@@ -173,7 +207,7 @@
 		<hr class=" dark:border-gray-700 my-3" />
 
 		<div>
-			<div class=" my-2.5 text-sm font-medium">System Prompt</div>
+			<div class=" my-2.5 text-sm font-medium">{$i18n.t('System Prompt')}</div>
 			<textarea
 				bind:value={system}
 				class="w-full rounded-lg p-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none resize-none"
@@ -183,13 +217,13 @@
 
 		<div class="mt-2 space-y-3 pr-1.5">
 			<div class="flex justify-between items-center text-sm">
-				<div class="  font-medium">Advanced Parameters</div>
+				<div class="  font-medium">{$i18n.t('Advanced Parameters')}</div>
 				<button
 					class=" text-xs font-medium text-gray-500"
 					type="button"
 					on:click={() => {
 						showAdvanced = !showAdvanced;
-					}}>{showAdvanced ? 'Hide' : 'Show'}</button
+					}}>{showAdvanced ? $i18n.t('Hide') : $i18n.t('Show')}</button
 				>
 			</div>
 
@@ -199,7 +233,7 @@
 
 				<div class=" py-1 w-full justify-between">
 					<div class="flex w-full justify-between">
-						<div class=" self-center text-xs font-medium">Keep Alive</div>
+						<div class=" self-center text-xs font-medium">{$i18n.t('Keep Alive')}</div>
 
 						<button
 							class="p-1 px-3 text-xs flex rounded transition"
@@ -209,9 +243,9 @@
 							}}
 						>
 							{#if keepAlive === null}
-								<span class="ml-2 self-center"> Default </span>
+								<span class="ml-2 self-center"> {$i18n.t('Default')} </span>
 							{:else}
-								<span class="ml-2 self-center"> Custom </span>
+								<span class="ml-2 self-center"> {$i18n.t('Custom')} </span>
 							{/if}
 						</button>
 					</div>
@@ -221,7 +255,7 @@
 							<input
 								class="w-full rounded py-1.5 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none border border-gray-100 dark:border-gray-600"
 								type="text"
-								placeholder={`e.g.) "30s","10m". Valid time units are "s", "m", "h".`}
+								placeholder={$i18n.t("e.g. '30s','10m'. Valid time units are 's', 'm', 'h'.")}
 								bind:value={keepAlive}
 							/>
 						</div>
@@ -230,7 +264,7 @@
 
 				<div>
 					<div class=" py-1 flex w-full justify-between">
-						<div class=" self-center text-sm font-medium">Request Mode</div>
+						<div class=" self-center text-sm font-medium">{$i18n.t('Request Mode')}</div>
 
 						<button
 							class="p-1 px-3 text-xs flex rounded transition"
@@ -239,7 +273,7 @@
 							}}
 						>
 							{#if requestFormat === ''}
-								<span class="ml-2 self-center"> Default </span>
+								<span class="ml-2 self-center"> {$i18n.t('Default')} </span>
 							{:else if requestFormat === 'json'}
 								<!-- <svg
                             xmlns="http://www.w3.org/2000/svg"
@@ -251,7 +285,7 @@
                                 d="M10 2a.75.75 0 01.75.75v1.5a.75.75 0 01-1.5 0v-1.5A.75.75 0 0110 2zM10 15a.75.75 0 01.75.75v1.5a.75.75 0 01-1.5 0v-1.5A.75.75 0 0110 15zM10 7a3 3 0 100 6 3 3 0 000-6zM15.657 5.404a.75.75 0 10-1.06-1.06l-1.061 1.06a.75.75 0 001.06 1.06l1.06-1.06zM6.464 14.596a.75.75 0 10-1.06-1.06l-1.06 1.06a.75.75 0 001.06 1.06l1.06-1.06zM18 10a.75.75 0 01-.75.75h-1.5a.75.75 0 010-1.5h1.5A.75.75 0 0118 10zM5 10a.75.75 0 01-.75.75h-1.5a.75.75 0 010-1.5h1.5A.75.75 0 015 10zM14.596 15.657a.75.75 0 001.06-1.06l-1.06-1.061a.75.75 0 10-1.06 1.06l1.06 1.06zM5.404 6.464a.75.75 0 001.06-1.06l-1.06-1.06a.75.75 0 10-1.061 1.06l1.06 1.06z"
                             />
                         </svg> -->
-								<span class="ml-2 self-center"> JSON </span>
+								<span class="ml-2 self-center"> {$i18n.t('JSON')} </span>
 							{/if}
 						</button>
 					</div>
@@ -286,7 +320,7 @@
 				dispatch('save');
 			}}
 		>
-			Save
+			{$i18n.t('Save')}
 		</button>
 	</div>
 </div>

+ 115 - 46
src/lib/components/chat/Settings/Images.svelte

@@ -1,17 +1,17 @@
 <script lang="ts">
 	import { toast } from 'svelte-sonner';
 
-	import { createEventDispatcher, onMount } from 'svelte';
+	import { createEventDispatcher, onMount, getContext } from 'svelte';
 	import { config, user } from '$lib/stores';
 	import {
-		getAUTOMATIC1111Url,
 		getImageGenerationModels,
 		getDefaultImageGenerationModel,
 		updateDefaultImageGenerationModel,
 		getImageSize,
 		getImageGenerationConfig,
 		updateImageGenerationConfig,
-		updateAUTOMATIC1111Url,
+		getImageGenerationEngineUrls,
+		updateImageGenerationEngineUrls,
 		updateImageSize,
 		getImageSteps,
 		updateImageSteps,
@@ -21,6 +21,8 @@
 	import { getBackendConfig } from '$lib/apis';
 	const dispatch = createEventDispatcher();
 
+	const i18n = getContext('i18n');
+
 	export let saveSettings: Function;
 
 	let loading = false;
@@ -29,6 +31,8 @@
 	let enableImageGeneration = false;
 
 	let AUTOMATIC1111_BASE_URL = '';
+	let COMFYUI_BASE_URL = '';
+
 	let OPENAI_API_KEY = '';
 
 	let selectedModel = '';
@@ -47,24 +51,47 @@
 		});
 	};
 
-	const updateAUTOMATIC1111UrlHandler = async () => {
-		const res = await updateAUTOMATIC1111Url(localStorage.token, AUTOMATIC1111_BASE_URL).catch(
-			(error) => {
+	const updateUrlHandler = async () => {
+		if (imageGenerationEngine === 'comfyui') {
+			const res = await updateImageGenerationEngineUrls(localStorage.token, {
+				COMFYUI_BASE_URL: COMFYUI_BASE_URL
+			}).catch((error) => {
 				toast.error(error);
+
+				console.log(error);
 				return null;
-			}
-		);
+			});
 
-		if (res) {
-			AUTOMATIC1111_BASE_URL = res;
+			if (res) {
+				COMFYUI_BASE_URL = res.COMFYUI_BASE_URL;
 
-			await getModels();
+				await getModels();
 
-			if (models) {
-				toast.success('Server connection verified');
+				if (models) {
+					toast.success($i18n.t('Server connection verified'));
+				}
+			} else {
+				({ COMFYUI_BASE_URL } = await getImageGenerationEngineUrls(localStorage.token));
 			}
 		} else {
-			AUTOMATIC1111_BASE_URL = await getAUTOMATIC1111Url(localStorage.token);
+			const res = await updateImageGenerationEngineUrls(localStorage.token, {
+				AUTOMATIC1111_BASE_URL: AUTOMATIC1111_BASE_URL
+			}).catch((error) => {
+				toast.error(error);
+				return null;
+			});
+
+			if (res) {
+				AUTOMATIC1111_BASE_URL = res.AUTOMATIC1111_BASE_URL;
+
+				await getModels();
+
+				if (models) {
+					toast.success($i18n.t('Server connection verified'));
+				}
+			} else {
+				({ AUTOMATIC1111_BASE_URL } = await getImageGenerationEngineUrls(localStorage.token));
+			}
 		}
 	};
 	const updateImageGeneration = async () => {
@@ -99,7 +126,11 @@
 				imageGenerationEngine = res.engine;
 				enableImageGeneration = res.enabled;
 			}
-			AUTOMATIC1111_BASE_URL = await getAUTOMATIC1111Url(localStorage.token);
+			const URLS = await getImageGenerationEngineUrls(localStorage.token);
+
+			AUTOMATIC1111_BASE_URL = URLS.AUTOMATIC1111_BASE_URL;
+			COMFYUI_BASE_URL = URLS.COMFYUI_BASE_URL;
+
 			OPENAI_API_KEY = await getOpenAIKey(localStorage.token);
 
 			imageSize = await getImageSize(localStorage.token);
@@ -116,11 +147,13 @@
 	class="flex flex-col h-full justify-between space-y-3 text-sm"
 	on:submit|preventDefault={async () => {
 		loading = true;
-		await updateOpenAIKey(localStorage.token, OPENAI_API_KEY);
 
-		await updateDefaultImageGenerationModel(localStorage.token, selectedModel);
+		if (imageGenerationEngine === 'openai') {
+			await updateOpenAIKey(localStorage.token, OPENAI_API_KEY);
+		}
 
 		await updateDefaultImageGenerationModel(localStorage.token, selectedModel);
+
 		await updateImageSize(localStorage.token, imageSize).catch((error) => {
 			toast.error(error);
 			return null;
@@ -134,39 +167,45 @@
 		loading = false;
 	}}
 >
-	<div class=" space-y-3 pr-1.5 overflow-y-scroll max-h-[20.5rem]">
+	<div class=" space-y-3 pr-1.5 overflow-y-scroll max-h-[24rem]">
 		<div>
-			<div class=" mb-1 text-sm font-medium">Image Settings</div>
+			<div class=" mb-1 text-sm font-medium">{$i18n.t('Image Settings')}</div>
 
 			<div class=" py-0.5 flex w-full justify-between">
-				<div class=" self-center text-xs font-medium">Image Generation Engine</div>
+				<div class=" self-center text-xs font-medium">{$i18n.t('Image Generation Engine')}</div>
 				<div class="flex items-center relative">
 					<select
 						class="w-fit pr-8 rounded px-2 p-1 text-xs bg-transparent outline-none text-right"
 						bind:value={imageGenerationEngine}
-						placeholder="Select a mode"
+						placeholder={$i18n.t('Select a mode')}
 						on:change={async () => {
 							await updateImageGeneration();
 						}}
 					>
-						<option value="">Default (Automatic1111)</option>
-						<option value="openai">Open AI (Dall-E)</option>
+						<option value="">{$i18n.t('Default (Automatic1111)')}</option>
+						<option value="comfyui">{$i18n.t('ComfyUI')}</option>
+						<option value="openai">{$i18n.t('Open AI (Dall-E)')}</option>
 					</select>
 				</div>
 			</div>
 
 			<div>
 				<div class=" py-0.5 flex w-full justify-between">
-					<div class=" self-center text-xs font-medium">Image Generation (Experimental)</div>
+					<div class=" self-center text-xs font-medium">
+						{$i18n.t('Image Generation (Experimental)')}
+					</div>
 
 					<button
 						class="p-1 px-3 text-xs flex rounded transition"
 						on:click={() => {
 							if (imageGenerationEngine === '' && AUTOMATIC1111_BASE_URL === '') {
-								toast.error('AUTOMATIC1111 Base URL is required.');
+								toast.error($i18n.t('AUTOMATIC1111 Base URL is required.'));
+								enableImageGeneration = false;
+							} else if (imageGenerationEngine === 'comfyui' && COMFYUI_BASE_URL === '') {
+								toast.error($i18n.t('ComfyUI Base URL is required.'));
 								enableImageGeneration = false;
 							} else if (imageGenerationEngine === 'openai' && OPENAI_API_KEY === '') {
-								toast.error('OpenAI API Key is required.');
+								toast.error($i18n.t('OpenAI API Key is required.'));
 								enableImageGeneration = false;
 							} else {
 								enableImageGeneration = !enableImageGeneration;
@@ -177,9 +216,9 @@
 						type="button"
 					>
 						{#if enableImageGeneration === true}
-							<span class="ml-2 self-center">On</span>
+							<span class="ml-2 self-center">{$i18n.t('On')}</span>
 						{:else}
-							<span class="ml-2 self-center">Off</span>
+							<span class="ml-2 self-center">{$i18n.t('Off')}</span>
 						{/if}
 					</button>
 				</div>
@@ -188,22 +227,20 @@
 		<hr class=" dark:border-gray-700" />
 
 		{#if imageGenerationEngine === ''}
-			<div class=" mb-2.5 text-sm font-medium">AUTOMATIC1111 Base URL</div>
+			<div class=" mb-2.5 text-sm font-medium">{$i18n.t('AUTOMATIC1111 Base URL')}</div>
 			<div class="flex w-full">
 				<div class="flex-1 mr-2">
 					<input
 						class="w-full rounded-lg py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
-						placeholder="Enter URL (e.g. http://127.0.0.1:7860/)"
+						placeholder={$i18n.t('Enter URL (e.g. http://127.0.0.1:7860/)')}
 						bind:value={AUTOMATIC1111_BASE_URL}
 					/>
 				</div>
 				<button
-					class="px-3 bg-gray-200 hover:bg-gray-300 dark:bg-gray-600 dark:hover:bg-gray-700 rounded-lg transition"
+					class="px-2.5 bg-gray-100 hover:bg-gray-200 text-gray-800 dark:bg-gray-850 dark:hover:bg-gray-800 dark:text-gray-100 rounded-lg transition"
 					type="button"
 					on:click={() => {
-						// updateOllamaAPIUrlHandler();
-
-						updateAUTOMATIC1111UrlHandler();
+						updateUrlHandler();
 					}}
 				>
 					<svg
@@ -222,22 +259,53 @@
 			</div>
 
 			<div class="mt-2 text-xs text-gray-400 dark:text-gray-500">
-				Include `--api` flag when running stable-diffusion-webui
+				{$i18n.t('Include `--api` flag when running stable-diffusion-webui')}
 				<a
 					class=" text-gray-300 font-medium"
 					href="https://github.com/AUTOMATIC1111/stable-diffusion-webui/discussions/3734"
 					target="_blank"
 				>
-					(e.g. `sh webui.sh --api`)
+					{$i18n.t('(e.g. `sh webui.sh --api`)')}
 				</a>
 			</div>
+		{:else if imageGenerationEngine === 'comfyui'}
+			<div class=" mb-2.5 text-sm font-medium">{$i18n.t('ComfyUI Base URL')}</div>
+			<div class="flex w-full">
+				<div class="flex-1 mr-2">
+					<input
+						class="w-full rounded-lg py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
+						placeholder={$i18n.t('Enter URL (e.g. http://127.0.0.1:7860/)')}
+						bind:value={COMFYUI_BASE_URL}
+					/>
+				</div>
+				<button
+					class="px-2.5 bg-gray-100 hover:bg-gray-200 text-gray-800 dark:bg-gray-850 dark:hover:bg-gray-800 dark:text-gray-100 rounded-lg transition"
+					type="button"
+					on:click={() => {
+						updateUrlHandler();
+					}}
+				>
+					<svg
+						xmlns="http://www.w3.org/2000/svg"
+						viewBox="0 0 20 20"
+						fill="currentColor"
+						class="w-4 h-4"
+					>
+						<path
+							fill-rule="evenodd"
+							d="M15.312 11.424a5.5 5.5 0 01-9.201 2.466l-.312-.311h2.433a.75.75 0 000-1.5H3.989a.75.75 0 00-.75.75v4.242a.75.75 0 001.5 0v-2.43l.31.31a7 7 0 0011.712-3.138.75.75 0 00-1.449-.39zm1.23-3.723a.75.75 0 00.219-.53V2.929a.75.75 0 00-1.5 0V5.36l-.31-.31A7 7 0 003.239 8.188a.75.75 0 101.448.389A5.5 5.5 0 0113.89 6.11l.311.31h-2.432a.75.75 0 000 1.5h4.243a.75.75 0 00.53-.219z"
+							clip-rule="evenodd"
+						/>
+					</svg>
+				</button>
+			</div>
 		{:else if imageGenerationEngine === 'openai'}
-			<div class=" mb-2.5 text-sm font-medium">OpenAI API Key</div>
+			<div class=" mb-2.5 text-sm font-medium">{$i18n.t('OpenAI API Key')}</div>
 			<div class="flex w-full">
 				<div class="flex-1 mr-2">
 					<input
 						class="w-full rounded-lg py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
-						placeholder="Enter API Key"
+						placeholder={$i18n.t('Enter API Key')}
 						bind:value={OPENAI_API_KEY}
 					/>
 				</div>
@@ -248,16 +316,17 @@
 			<hr class=" dark:border-gray-700" />
 
 			<div>
-				<div class=" mb-2.5 text-sm font-medium">Set Default Model</div>
+				<div class=" mb-2.5 text-sm font-medium">{$i18n.t('Set Default Model')}</div>
 				<div class="flex w-full">
 					<div class="flex-1 mr-2">
 						<select
 							class="w-full rounded-lg py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
 							bind:value={selectedModel}
-							placeholder="Select a model"
+							placeholder={$i18n.t('Select a model')}
+							required
 						>
 							{#if !selectedModel}
-								<option value="" disabled selected>Select a model</option>
+								<option value="" disabled selected>{$i18n.t('Select a model')}</option>
 							{/if}
 							{#each models ?? [] as model}
 								<option value={model.id} class="bg-gray-100 dark:bg-gray-700">{model.name}</option>
@@ -268,12 +337,12 @@
 			</div>
 
 			<div>
-				<div class=" mb-2.5 text-sm font-medium">Set Image Size</div>
+				<div class=" mb-2.5 text-sm font-medium">{$i18n.t('Set Image Size')}</div>
 				<div class="flex w-full">
 					<div class="flex-1 mr-2">
 						<input
 							class="w-full rounded-lg py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
-							placeholder="Enter Image Size (e.g. 512x512)"
+							placeholder={$i18n.t('Enter Image Size (e.g. 512x512)')}
 							bind:value={imageSize}
 						/>
 					</div>
@@ -281,12 +350,12 @@
 			</div>
 
 			<div>
-				<div class=" mb-2.5 text-sm font-medium">Set Steps</div>
+				<div class=" mb-2.5 text-sm font-medium">{$i18n.t('Set Steps')}</div>
 				<div class="flex w-full">
 					<div class="flex-1 mr-2">
 						<input
 							class="w-full rounded-lg py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
-							placeholder="Enter Number of Steps (e.g. 50)"
+							placeholder={$i18n.t('Enter Number of Steps (e.g. 50)')}
 							bind:value={steps}
 						/>
 					</div>
@@ -303,7 +372,7 @@
 			type="submit"
 			disabled={loading}
 		>
-			Save
+			{$i18n.t('Save')}
 
 			{#if loading}
 				<div class="ml-2 self-center">

+ 75 - 34
src/lib/components/chat/Settings/Interface.svelte

@@ -1,17 +1,20 @@
 <script lang="ts">
 	import { getBackendConfig } from '$lib/apis';
 	import { setDefaultPromptSuggestions } from '$lib/apis/configs';
-	import { config, models, user } from '$lib/stores';
-	import { createEventDispatcher, onMount } from 'svelte';
+	import { config, models, settings, user } from '$lib/stores';
+	import { createEventDispatcher, onMount, getContext } from 'svelte';
 	import { toast } from 'svelte-sonner';
 	const dispatch = createEventDispatcher();
 
+	const i18n = getContext('i18n');
+
 	export let saveSettings: Function;
 
 	// Addons
 	let titleAutoGenerate = true;
 	let responseAutoCopy = false;
 	let titleAutoGenerateModel = '';
+	let titleAutoGenerateModelExternal = '';
 	let fullScreenMode = false;
 	let titleGenerationPrompt = '';
 
@@ -31,7 +34,12 @@
 
 	const toggleTitleAutoGenerate = async () => {
 		titleAutoGenerate = !titleAutoGenerate;
-		saveSettings({ titleAutoGenerate: titleAutoGenerate });
+		saveSettings({
+			title: {
+				...$settings.title,
+				auto: titleAutoGenerate
+			}
+		});
 	};
 
 	const toggleResponseAutoCopy = async () => {
@@ -63,8 +71,13 @@
 		}
 
 		saveSettings({
-			titleAutoGenerateModel: titleAutoGenerateModel !== '' ? titleAutoGenerateModel : undefined,
-			titleGenerationPrompt: titleGenerationPrompt ? titleGenerationPrompt : undefined
+			title: {
+				...$settings.title,
+				model: titleAutoGenerateModel !== '' ? titleAutoGenerateModel : undefined,
+				modelExternal:
+					titleAutoGenerateModelExternal !== '' ? titleAutoGenerateModelExternal : undefined,
+				prompt: titleGenerationPrompt ? titleGenerationPrompt : undefined
+			}
 		});
 	};
 
@@ -75,14 +88,18 @@
 
 		let settings = JSON.parse(localStorage.getItem('settings') ?? '{}');
 
-		titleAutoGenerate = settings.titleAutoGenerate ?? true;
+		titleAutoGenerate = settings?.title?.auto ?? true;
+		titleAutoGenerateModel = settings?.title?.model ?? '';
+		titleAutoGenerateModelExternal = settings?.title?.modelExternal ?? '';
+		titleGenerationPrompt =
+			settings?.title?.prompt ??
+			$i18n.t(
+				"Create a concise, 3-5 word phrase as a header for the following query, strictly adhering to the 3-5 word limit and avoiding the use of the word 'title':"
+			) + ' {{prompt}}';
+
 		responseAutoCopy = settings.responseAutoCopy ?? false;
 		showUsername = settings.showUsername ?? false;
 		fullScreenMode = settings.fullScreenMode ?? false;
-		titleAutoGenerateModel = settings.titleAutoGenerateModel ?? '';
-		titleGenerationPrompt =
-			settings.titleGenerationPrompt ??
-			`Create a concise, 3-5 word phrase as a header for the following query, strictly adhering to the 3-5 word limit and avoiding the use of the word 'title': {{prompt}}`;
 	});
 </script>
 
@@ -93,13 +110,13 @@
 		dispatch('save');
 	}}
 >
-	<div class=" space-y-3 pr-1.5 overflow-y-scroll h-80">
+	<div class=" space-y-3 pr-1.5 overflow-y-scroll max-h-[22rem]">
 		<div>
-			<div class=" mb-1 text-sm font-medium">WebUI Add-ons</div>
+			<div class=" mb-1 text-sm font-medium">{$i18n.t('WebUI Add-ons')}</div>
 
 			<div>
 				<div class=" py-0.5 flex w-full justify-between">
-					<div class=" self-center text-xs font-medium">Title Auto-Generation</div>
+					<div class=" self-center text-xs font-medium">{$i18n.t('Title Auto-Generation')}</div>
 
 					<button
 						class="p-1 px-3 text-xs flex rounded transition"
@@ -109,9 +126,9 @@
 						type="button"
 					>
 						{#if titleAutoGenerate === true}
-							<span class="ml-2 self-center">On</span>
+							<span class="ml-2 self-center">{$i18n.t('On')}</span>
 						{:else}
-							<span class="ml-2 self-center">Off</span>
+							<span class="ml-2 self-center">{$i18n.t('Off')}</span>
 						{/if}
 					</button>
 				</div>
@@ -119,7 +136,9 @@
 
 			<div>
 				<div class=" py-0.5 flex w-full justify-between">
-					<div class=" self-center text-xs font-medium">Response AutoCopy to Clipboard</div>
+					<div class=" self-center text-xs font-medium">
+						{$i18n.t('Response AutoCopy to Clipboard')}
+					</div>
 
 					<button
 						class="p-1 px-3 text-xs flex rounded transition"
@@ -129,9 +148,9 @@
 						type="button"
 					>
 						{#if responseAutoCopy === true}
-							<span class="ml-2 self-center">On</span>
+							<span class="ml-2 self-center">{$i18n.t('On')}</span>
 						{:else}
-							<span class="ml-2 self-center">Off</span>
+							<span class="ml-2 self-center">{$i18n.t('Off')}</span>
 						{/if}
 					</button>
 				</div>
@@ -139,7 +158,7 @@
 
 			<div>
 				<div class=" py-0.5 flex w-full justify-between">
-					<div class=" self-center text-xs font-medium">Full Screen Mode</div>
+					<div class=" self-center text-xs font-medium">{$i18n.t('Full Screen Mode')}</div>
 
 					<button
 						class="p-1 px-3 text-xs flex rounded transition"
@@ -149,9 +168,9 @@
 						type="button"
 					>
 						{#if fullScreenMode === true}
-							<span class="ml-2 self-center">On</span>
+							<span class="ml-2 self-center">{$i18n.t('On')}</span>
 						{:else}
-							<span class="ml-2 self-center">Off</span>
+							<span class="ml-2 self-center">{$i18n.t('Off')}</span>
 						{/if}
 					</button>
 				</div>
@@ -160,7 +179,7 @@
 			<div>
 				<div class=" py-0.5 flex w-full justify-between">
 					<div class=" self-center text-xs font-medium">
-						Display the username instead of "You" in the Chat
+						{$i18n.t('Display the username instead of You in the Chat')}
 					</div>
 
 					<button
@@ -171,9 +190,9 @@
 						type="button"
 					>
 						{#if showUsername === true}
-							<span class="ml-2 self-center">On</span>
+							<span class="ml-2 self-center">{$i18n.t('On')}</span>
 						{:else}
-							<span class="ml-2 self-center">Off</span>
+							<span class="ml-2 self-center">{$i18n.t('Off')}</span>
 						{/if}
 					</button>
 				</div>
@@ -183,15 +202,16 @@
 		<hr class=" dark:border-gray-700" />
 
 		<div>
-			<div class=" mb-2.5 text-sm font-medium">Set Title Auto-Generation Model</div>
-			<div class="flex w-full">
-				<div class="flex-1 mr-2">
+			<div class=" mb-2.5 text-sm font-medium">{$i18n.t('Set Title Auto-Generation Model')}</div>
+			<div class="flex w-full gap-2 pr-2">
+				<div class="flex-1">
+					<div class=" text-xs mb-1">Local Models</div>
 					<select
 						class="w-full rounded-lg py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
 						bind:value={titleAutoGenerateModel}
-						placeholder="Select a model"
+						placeholder={$i18n.t('Select a model')}
 					>
-						<option value="" selected>Current Model</option>
+						<option value="" selected>{$i18n.t('Current Model')}</option>
 						{#each $models as model}
 							{#if model.size != null}
 								<option value={model.name} class="bg-gray-100 dark:bg-gray-700">
@@ -201,9 +221,28 @@
 						{/each}
 					</select>
 				</div>
+
+				<div class="flex-1">
+					<div class=" text-xs mb-1">External Models</div>
+					<select
+						class="w-full rounded-lg py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
+						bind:value={titleAutoGenerateModelExternal}
+						placeholder={$i18n.t('Select a model')}
+					>
+						<option value="" selected>{$i18n.t('Current Model')}</option>
+						{#each $models as model}
+							{#if model.name !== 'hr'}
+								<option value={model.name} class="bg-gray-100 dark:bg-gray-700">
+									{model.name}
+								</option>
+							{/if}
+						{/each}
+					</select>
+				</div>
 			</div>
+
 			<div class="mt-3 mr-2">
-				<div class=" mb-2.5 text-sm font-medium">Title Generation Prompt</div>
+				<div class=" mb-2.5 text-sm font-medium">{$i18n.t('Title Generation Prompt')}</div>
 				<textarea
 					bind:value={titleGenerationPrompt}
 					class="w-full rounded-lg p-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none resize-none"
@@ -217,7 +256,9 @@
 
 			<div class=" space-y-3 pr-1.5 overflow-y-scroll max-h-80">
 				<div class="flex w-full justify-between mb-2">
-					<div class=" self-center text-sm font-semibold">Default Prompt Suggestions</div>
+					<div class=" self-center text-sm font-semibold">
+						{$i18n.t('Default Prompt Suggestions')}
+					</div>
 
 					<button
 						class="p-1 px-3 text-xs flex rounded transition"
@@ -290,19 +331,19 @@
 
 				{#if promptSuggestions.length > 0}
 					<div class="text-xs text-left w-full mt-2">
-						Adjusting these settings will apply changes universally to all users.
+						{$i18n.t('Adjusting these settings will apply changes universally to all users.')}
 					</div>
 				{/if}
 			</div>
 		{/if}
 	</div>
 
-	<div class="flex justify-end pt-3 text-sm font-medium">
+	<div class="flex justify-end text-sm font-medium">
 		<button
 			class=" px-4 py-2 bg-emerald-700 hover:bg-emerald-800 text-gray-100 transition rounded-lg"
 			type="submit"
 		>
-			Save
+			{$i18n.t('Save')}
 		</button>
 	</div>
 </form>

+ 216 - 97
src/lib/components/chat/Settings/Models.svelte

@@ -5,17 +5,22 @@
 	import {
 		createModel,
 		deleteModel,
+		downloadModel,
 		getOllamaUrls,
 		getOllamaVersion,
-		pullModel
+		pullModel,
+		cancelOllamaRequest,
+		uploadModel
 	} from '$lib/apis/ollama';
 	import { WEBUI_API_BASE_URL, WEBUI_BASE_URL } from '$lib/constants';
 	import { WEBUI_NAME, models, user } from '$lib/stores';
 	import { splitStream } from '$lib/utils';
-	import { onMount } from 'svelte';
+	import { onMount, getContext } from 'svelte';
 	import { addLiteLLMModel, deleteLiteLLMModel, getLiteLLMModelInfo } from '$lib/apis/litellm';
 	import Tooltip from '$lib/components/common/Tooltip.svelte';
 
+	const i18n = getContext('i18n');
+
 	export let getModels: Function;
 
 	let showLiteLLM = false;
@@ -58,11 +63,13 @@
 	let pullProgress = null;
 
 	let modelUploadMode = 'file';
-	let modelInputFile = '';
+	let modelInputFile: File[] | null = null;
 	let modelFileUrl = '';
 	let modelFileContent = `TEMPLATE """{{ .System }}\nUSER: {{ .Prompt }}\nASSISTANT: """\nPARAMETER num_ctx 4096\nPARAMETER stop "</s>"\nPARAMETER stop "USER:"\nPARAMETER stop "ASSISTANT:"`;
 	let modelFileDigest = '';
+
 	let uploadProgress = null;
+	let uploadMessage = '';
 
 	let deleteModelTag = '';
 
@@ -134,11 +141,17 @@
 	const pullModelHandler = async () => {
 		const sanitizedModelTag = modelTag.trim();
 		if (modelDownloadStatus[sanitizedModelTag]) {
-			toast.error(`Model '${sanitizedModelTag}' is already in queue for downloading.`);
+			toast.error(
+				$i18n.t(`Model '{{modelTag}}' is already in queue for downloading.`, {
+					modelTag: sanitizedModelTag
+				})
+			);
 			return;
 		}
 		if (Object.keys(modelDownloadStatus).length === 3) {
-			toast.error('Maximum of 3 models can be downloaded simultaneously. Please try again later.');
+			toast.error(
+				$i18n.t('Maximum of 3 models can be downloaded simultaneously. Please try again later.')
+			);
 			return;
 		}
 
@@ -151,15 +164,17 @@
 				// Remove the downloaded model
 				delete modelDownloadStatus[modelName];
 
-				console.log(data);
+				modelDownloadStatus = { ...modelDownloadStatus };
 
 				if (!data.success) {
 					toast.error(data.error);
 				} else {
-					toast.success(`Model '${modelName}' has been successfully downloaded.`);
+					toast.success(
+						$i18n.t(`Model '{{modelName}}' has been successfully downloaded.`, { modelName })
+					);
 
 					const notification = new Notification($WEBUI_NAME, {
-						body: `Model '${modelName}' has been successfully downloaded.`,
+						body: $i18n.t(`Model '{{modelName}}' has been successfully downloaded.`, { modelName }),
 						icon: `${WEBUI_BASE_URL}/static/favicon.png`
 					});
 
@@ -174,35 +189,32 @@
 
 	const uploadModelHandler = async () => {
 		modelTransferring = true;
-		uploadProgress = 0;
 
 		let uploaded = false;
 		let fileResponse = null;
 		let name = '';
 
 		if (modelUploadMode === 'file') {
-			const file = modelInputFile[0];
-			const formData = new FormData();
-			formData.append('file', file);
-
-			fileResponse = await fetch(`${WEBUI_API_BASE_URL}/utils/upload`, {
-				method: 'POST',
-				headers: {
-					...($user && { Authorization: `Bearer ${localStorage.token}` })
-				},
-				body: formData
-			}).catch((error) => {
-				console.log(error);
-				return null;
-			});
+			const file = modelInputFile ? modelInputFile[0] : null;
+
+			if (file) {
+				uploadMessage = 'Uploading...';
+
+				fileResponse = await uploadModel(localStorage.token, file, selectedOllamaUrlIdx).catch(
+					(error) => {
+						toast.error(error);
+						return null;
+					}
+				);
+			}
 		} else {
-			fileResponse = await fetch(`${WEBUI_API_BASE_URL}/utils/download?url=${modelFileUrl}`, {
-				method: 'GET',
-				headers: {
-					...($user && { Authorization: `Bearer ${localStorage.token}` })
-				}
-			}).catch((error) => {
-				console.log(error);
+			uploadProgress = 0;
+			fileResponse = await downloadModel(
+				localStorage.token,
+				modelFileUrl,
+				selectedOllamaUrlIdx
+			).catch((error) => {
+				toast.error(error);
 				return null;
 			});
 		}
@@ -225,6 +237,9 @@
 							let data = JSON.parse(line.replace(/^data: /, ''));
 
 							if (data.progress) {
+								if (uploadMessage) {
+									uploadMessage = '';
+								}
 								uploadProgress = data.progress;
 							}
 
@@ -243,6 +258,9 @@
 					console.log(error);
 				}
 			}
+		} else {
+			const error = await fileResponse?.json();
+			toast.error(error?.detail ?? error);
 		}
 
 		if (uploaded) {
@@ -308,7 +326,11 @@
 		}
 
 		modelFileUrl = '';
-		modelInputFile = '';
+
+		if (modelUploadInputElement) {
+			modelUploadInputElement.value = '';
+		}
+		modelInputFile = null;
 		modelTransferring = false;
 		uploadProgress = null;
 
@@ -323,7 +345,7 @@
 		);
 
 		if (res) {
-			toast.success(`Deleted ${deleteModelTag}`);
+			toast.success($i18n.t(`Deleted {{deleteModelTag}}`, { deleteModelTag }));
 		}
 
 		deleteModelTag = '';
@@ -354,12 +376,24 @@
 					for (const line of lines) {
 						if (line !== '') {
 							let data = JSON.parse(line);
+							console.log(data);
 							if (data.error) {
 								throw data.error;
 							}
 							if (data.detail) {
 								throw data.detail;
 							}
+
+							if (data.id) {
+								modelDownloadStatus[opts.modelName] = {
+									...modelDownloadStatus[opts.modelName],
+									requestId: data.id,
+									reader,
+									done: false
+								};
+								console.log(data);
+							}
+
 							if (data.status) {
 								if (data.digest) {
 									let downloadProgress = 0;
@@ -369,11 +403,17 @@
 										downloadProgress = 100;
 									}
 									modelDownloadStatus[opts.modelName] = {
+										...modelDownloadStatus[opts.modelName],
 										pullProgress: downloadProgress,
 										digest: data.digest
 									};
 								} else {
 									toast.success(data.status);
+
+									modelDownloadStatus[opts.modelName] = {
+										...modelDownloadStatus[opts.modelName],
+										done: data.status === 'success'
+									};
 								}
 							}
 						}
@@ -386,7 +426,14 @@
 					opts.callback({ success: false, error, modelName: opts.modelName });
 				}
 			}
-			opts.callback({ success: true, modelName: opts.modelName });
+
+			console.log(modelDownloadStatus[opts.modelName]);
+
+			if (modelDownloadStatus[opts.modelName].done) {
+				opts.callback({ success: true, modelName: opts.modelName });
+			} else {
+				opts.callback({ success: false, error: 'Download canceled', modelName: opts.modelName });
+			}
 		}
 	};
 
@@ -410,7 +457,7 @@
 				}
 			}
 		} else {
-			toast.error(`Model ${liteLLMModelName} already exists.`);
+			toast.error($i18n.t(`Model {{modelName}} already exists.`, { modelName: liteLLMModelName }));
 		}
 
 		liteLLMModelName = '';
@@ -456,13 +503,25 @@
 		ollamaVersion = await getOllamaVersion(localStorage.token).catch((error) => false);
 		liteLLMModelInfo = await getLiteLLMModelInfo(localStorage.token);
 	});
+
+	const cancelModelPullHandler = async (model: string) => {
+		const { reader, requestId } = modelDownloadStatus[model];
+		if (reader) {
+			await reader.cancel();
+
+			await cancelOllamaRequest(localStorage.token, requestId);
+			delete modelDownloadStatus[model];
+			await deleteModel(localStorage.token, model);
+			toast.success(`${model} download has been canceled`);
+		}
+	};
 </script>
 
 <div class="flex flex-col h-full justify-between text-sm">
-	<div class=" space-y-3 pr-1.5 overflow-y-scroll h-[23rem]">
+	<div class=" space-y-3 pr-1.5 overflow-y-scroll h-[24rem]">
 		{#if ollamaVersion}
 			<div class="space-y-2 pr-1.5">
-				<div class="text-sm font-medium">Manage Ollama Models</div>
+				<div class="text-sm font-medium">{$i18n.t('Manage Ollama Models')}</div>
 
 				{#if OLLAMA_URLS.length > 0}
 					<div class="flex gap-2">
@@ -470,7 +529,7 @@
 							<select
 								class="w-full rounded-lg py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
 								bind:value={selectedOllamaUrlIdx}
-								placeholder="Select an Ollama instance"
+								placeholder={$i18n.t('Select an Ollama instance')}
 							>
 								{#each OLLAMA_URLS as url, idx}
 									<option value={idx} class="bg-gray-100 dark:bg-gray-700">{url}</option>
@@ -513,12 +572,14 @@
 
 				<div class="space-y-2">
 					<div>
-						<div class=" mb-2 text-sm font-medium">Pull a model from Ollama.com</div>
+						<div class=" mb-2 text-sm font-medium">{$i18n.t('Pull a model from Ollama.com')}</div>
 						<div class="flex w-full">
 							<div class="flex-1 mr-2">
 								<input
 									class="w-full rounded-lg py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
-									placeholder="Enter model tag (e.g. mistral:7b)"
+									placeholder={$i18n.t('Enter model tag (e.g. {{modelTag}})', {
+										modelTag: 'mistral:7b'
+									})}
 									bind:value={modelTag}
 								/>
 							</div>
@@ -573,47 +634,84 @@
 							</button>
 						</div>
 
-						<div>
-							<div class="mt-2 mb-1 text-xs text-gray-400 dark:text-gray-500">
-								To access the available model names for downloading, <a
-									class=" text-gray-500 dark:text-gray-300 font-medium underline"
-									href="https://ollama.com/library"
-									target="_blank">click here.</a
-								>
-							</div>
+						<div class="mt-2 mb-1 text-xs text-gray-400 dark:text-gray-500">
+							{$i18n.t('To access the available model names for downloading,')}
+							<a
+								class=" text-gray-500 dark:text-gray-300 font-medium underline"
+								href="https://ollama.com/library"
+								target="_blank">{$i18n.t('click here.')}</a
+							>
 						</div>
 
 						{#if Object.keys(modelDownloadStatus).length > 0}
 							{#each Object.keys(modelDownloadStatus) as model}
-								<div class="flex flex-col">
-									<div class="font-medium mb-1">{model}</div>
-									<div class="">
-										<div
-											class="dark:bg-gray-600 bg-gray-500 text-xs font-medium text-gray-100 text-center p-0.5 leading-none rounded-full"
-											style="width: {Math.max(15, modelDownloadStatus[model].pullProgress ?? 0)}%"
-										>
-											{modelDownloadStatus[model].pullProgress ?? 0}%
-										</div>
-										<div class="mt-1 text-xs dark:text-gray-500" style="font-size: 0.5rem;">
-											{modelDownloadStatus[model].digest}
+								{#if 'pullProgress' in modelDownloadStatus[model]}
+									<div class="flex flex-col">
+										<div class="font-medium mb-1">{model}</div>
+										<div class="">
+											<div class="flex flex-row justify-between space-x-4 pr-2">
+												<div class=" flex-1">
+													<div
+														class="dark:bg-gray-600 bg-gray-500 text-xs font-medium text-gray-100 text-center p-0.5 leading-none rounded-full"
+														style="width: {Math.max(
+															15,
+															modelDownloadStatus[model].pullProgress ?? 0
+														)}%"
+													>
+														{modelDownloadStatus[model].pullProgress ?? 0}%
+													</div>
+												</div>
+
+												<Tooltip content="Cancel">
+													<button
+														class="text-gray-800 dark:text-gray-100"
+														on:click={() => {
+															cancelModelPullHandler(model);
+														}}
+													>
+														<svg
+															class="w-4 h-4 text-gray-800 dark:text-white"
+															aria-hidden="true"
+															xmlns="http://www.w3.org/2000/svg"
+															width="24"
+															height="24"
+															fill="currentColor"
+															viewBox="0 0 24 24"
+														>
+															<path
+																stroke="currentColor"
+																stroke-linecap="round"
+																stroke-linejoin="round"
+																stroke-width="2"
+																d="M6 18 17.94 6M18 18 6.06 6"
+															/>
+														</svg>
+													</button>
+												</Tooltip>
+											</div>
+											{#if 'digest' in modelDownloadStatus[model]}
+												<div class="mt-1 text-xs dark:text-gray-500" style="font-size: 0.5rem;">
+													{modelDownloadStatus[model].digest}
+												</div>
+											{/if}
 										</div>
 									</div>
-								</div>
+								{/if}
 							{/each}
 						{/if}
 					</div>
 
 					<div>
-						<div class=" mb-2 text-sm font-medium">Delete a model</div>
+						<div class=" mb-2 text-sm font-medium">{$i18n.t('Delete a model')}</div>
 						<div class="flex w-full">
 							<div class="flex-1 mr-2">
 								<select
 									class="w-full rounded-lg py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
 									bind:value={deleteModelTag}
-									placeholder="Select a model"
+									placeholder={$i18n.t('Select a model')}
 								>
 									{#if !deleteModelTag}
-										<option value="" disabled selected>Select a model</option>
+										<option value="" disabled selected>{$i18n.t('Select a model')}</option>
 									{/if}
 									{#each $models.filter((m) => m.size != null && (selectedOllamaUrlIdx === null ? true : (m?.urls ?? []).includes(selectedOllamaUrlIdx))) as model}
 										<option value={model.name} class="bg-gray-100 dark:bg-gray-700"
@@ -646,13 +744,13 @@
 
 					<div class="pt-1">
 						<div class="flex justify-between items-center text-xs">
-							<div class=" text-sm font-medium">Experimental</div>
+							<div class=" text-sm font-medium">{$i18n.t('Experimental')}</div>
 							<button
 								class=" text-xs font-medium text-gray-500"
 								type="button"
 								on:click={() => {
 									showExperimentalOllama = !showExperimentalOllama;
-								}}>{showExperimentalOllama ? 'Hide' : 'Show'}</button
+								}}>{showExperimentalOllama ? $i18n.t('Hide') : $i18n.t('Show')}</button
 							>
 						</div>
 					</div>
@@ -664,7 +762,7 @@
 							}}
 						>
 							<div class=" mb-2 flex w-full justify-between">
-								<div class="  text-sm font-medium">Upload a GGUF model</div>
+								<div class="  text-sm font-medium">{$i18n.t('Upload a GGUF model')}</div>
 
 								<button
 									class="p-1 px-3 text-xs flex rounded transition"
@@ -678,9 +776,9 @@
 									type="button"
 								>
 									{#if modelUploadMode === 'file'}
-										<span class="ml-2 self-center">File Mode</span>
+										<span class="ml-2 self-center">{$i18n.t('File Mode')}</span>
 									{:else}
-										<span class="ml-2 self-center">URL Mode</span>
+										<span class="ml-2 self-center">{$i18n.t('URL Mode')}</span>
 									{/if}
 								</button>
 							</div>
@@ -704,7 +802,7 @@
 
 											<button
 												type="button"
-												class="w-full rounded-lg text-left py-2 px-4 dark:text-gray-300 dark:bg-gray-850"
+												class="w-full rounded-lg text-left py-2 px-4 bg-white dark:text-gray-300 dark:bg-gray-850"
 												on:click={() => {
 													modelUploadInputElement.click();
 												}}
@@ -712,21 +810,21 @@
 												{#if modelInputFile && modelInputFile.length > 0}
 													{modelInputFile[0].name}
 												{:else}
-													Click here to select
+													{$i18n.t('Click here to select')}
 												{/if}
 											</button>
 										</div>
 									{:else}
 										<div class="flex-1 {modelFileUrl !== '' ? 'mr-2' : ''}">
 											<input
-												class="w-full rounded-lg text-left py-2 px-4 dark:text-gray-300 dark:bg-gray-850 outline-none {modelFileUrl !==
+												class="w-full rounded-lg text-left py-2 px-4 bg-white dark:text-gray-300 dark:bg-gray-850 outline-none {modelFileUrl !==
 												''
 													? 'mr-2'
 													: ''}"
 												type="url"
 												required
 												bind:value={modelFileUrl}
-												placeholder="Type Hugging Face Resolve (Download) URL"
+												placeholder={$i18n.t('Type Hugging Face Resolve (Download) URL')}
 											/>
 										</div>
 									{/if}
@@ -734,7 +832,7 @@
 
 								{#if (modelUploadMode === 'file' && modelInputFile && modelInputFile.length > 0) || (modelUploadMode === 'url' && modelFileUrl !== '')}
 									<button
-										class="px-3 text-gray-100 bg-emerald-600 hover:bg-emerald-700 disabled:bg-gray-700 disabled:cursor-not-allowed rounded transition"
+										class="px-2.5 bg-gray-100 hover:bg-gray-200 text-gray-800 dark:bg-gray-850 dark:hover:bg-gray-800 dark:text-gray-100 rounded-lg disabled:cursor-not-allowed transition"
 										type="submit"
 										disabled={modelTransferring}
 									>
@@ -786,26 +884,43 @@
 							{#if (modelUploadMode === 'file' && modelInputFile && modelInputFile.length > 0) || (modelUploadMode === 'url' && modelFileUrl !== '')}
 								<div>
 									<div>
-										<div class=" my-2.5 text-sm font-medium">Modelfile Content</div>
+										<div class=" my-2.5 text-sm font-medium">{$i18n.t('Modelfile Content')}</div>
 										<textarea
 											bind:value={modelFileContent}
-											class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none resize-none"
+											class="w-full rounded-lg py-2 px-4 text-sm bg-gray-100 dark:text-gray-100 dark:bg-gray-850 outline-none resize-none"
 											rows="6"
 										/>
 									</div>
 								</div>
 							{/if}
 							<div class=" mt-1 text-xs text-gray-400 dark:text-gray-500">
-								To access the GGUF models available for downloading, <a
+								{$i18n.t('To access the GGUF models available for downloading,')}
+								<a
 									class=" text-gray-500 dark:text-gray-300 font-medium underline"
 									href="https://huggingface.co/models?search=gguf"
-									target="_blank">click here.</a
+									target="_blank">{$i18n.t('click here.')}</a
 								>
 							</div>
 
-							{#if uploadProgress !== null}
+							{#if uploadMessage}
+								<div class="mt-2">
+									<div class=" mb-2 text-xs">{$i18n.t('Upload Progress')}</div>
+
+									<div class="w-full rounded-full dark:bg-gray-800">
+										<div
+											class="dark:bg-gray-600 bg-gray-500 text-xs font-medium text-gray-100 text-center p-0.5 leading-none rounded-full"
+											style="width: 100%"
+										>
+											{uploadMessage}
+										</div>
+									</div>
+									<div class="mt-1 text-xs dark:text-gray-500" style="font-size: 0.5rem;">
+										{modelFileDigest}
+									</div>
+								</div>
+							{:else if uploadProgress !== null}
 								<div class="mt-2">
-									<div class=" mb-2 text-xs">Upload Progress</div>
+									<div class=" mb-2 text-xs">{$i18n.t('Upload Progress')}</div>
 
 									<div class="w-full rounded-full dark:bg-gray-800">
 										<div
@@ -832,13 +947,13 @@
 				<div>
 					<div class="mb-2">
 						<div class="flex justify-between items-center text-xs">
-							<div class=" text-sm font-medium">Manage LiteLLM Models</div>
+							<div class=" text-sm font-medium">{$i18n.t('Manage LiteLLM Models')}</div>
 							<button
 								class=" text-xs font-medium text-gray-500"
 								type="button"
 								on:click={() => {
 									showLiteLLM = !showLiteLLM;
-								}}>{showLiteLLM ? 'Hide' : 'Show'}</button
+								}}>{showLiteLLM ? $i18n.t('Hide') : $i18n.t('Show')}</button
 							>
 						</div>
 					</div>
@@ -846,14 +961,16 @@
 					{#if showLiteLLM}
 						<div>
 							<div class="flex justify-between items-center text-xs">
-								<div class=" text-sm font-medium">Add a model</div>
+								<div class=" text-sm font-medium">{$i18n.t('Add a model')}</div>
 								<button
 									class=" text-xs font-medium text-gray-500"
 									type="button"
 									on:click={() => {
 										showLiteLLMParams = !showLiteLLMParams;
 									}}
-									>{showLiteLLMParams ? 'Hide Additional Params' : 'Show Additional Params'}</button
+									>{showLiteLLMParams
+										? $i18n.t('Hide Additional Params')
+										: $i18n.t('Show Additional Params')}</button
 								>
 							</div>
 						</div>
@@ -863,7 +980,7 @@
 								<div class="flex-1 mr-2">
 									<input
 										class="w-full rounded-lg py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
-										placeholder="Enter LiteLLM Model (litellm_params.model)"
+										placeholder={$i18n.t('Enter LiteLLM Model (litellm_params.model)')}
 										bind:value={liteLLMModel}
 										autocomplete="off"
 									/>
@@ -890,7 +1007,7 @@
 
 							{#if showLiteLLMParams}
 								<div>
-									<div class=" mb-1.5 text-sm font-medium">Model Name</div>
+									<div class=" mb-1.5 text-sm font-medium">{$i18n.t('Model Name')}</div>
 									<div class="flex w-full">
 										<div class="flex-1">
 											<input
@@ -904,12 +1021,14 @@
 								</div>
 
 								<div>
-									<div class=" mb-1.5 text-sm font-medium">API Base URL</div>
+									<div class=" mb-1.5 text-sm font-medium">{$i18n.t('API Base URL')}</div>
 									<div class="flex w-full">
 										<div class="flex-1">
 											<input
 												class="w-full rounded-lg py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
-												placeholder="Enter LiteLLM API Base URL (litellm_params.api_base)"
+												placeholder={$i18n.t(
+													'Enter LiteLLM API Base URL (litellm_params.api_base)'
+												)}
 												bind:value={liteLLMAPIBase}
 												autocomplete="off"
 											/>
@@ -918,12 +1037,12 @@
 								</div>
 
 								<div>
-									<div class=" mb-1.5 text-sm font-medium">API Key</div>
+									<div class=" mb-1.5 text-sm font-medium">{$i18n.t('API Key')}</div>
 									<div class="flex w-full">
 										<div class="flex-1">
 											<input
 												class="w-full rounded-lg py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
-												placeholder="Enter LiteLLM API Key (litellm_params.api_key)"
+												placeholder={$i18n.t('Enter LiteLLM API Key (litellm_params.api_key)')}
 												bind:value={liteLLMAPIKey}
 												autocomplete="off"
 											/>
@@ -932,12 +1051,12 @@
 								</div>
 
 								<div>
-									<div class="mb-1.5 text-sm font-medium">API RPM</div>
+									<div class="mb-1.5 text-sm font-medium">{$i18n.t('API RPM')}</div>
 									<div class="flex w-full">
 										<div class="flex-1">
 											<input
 												class="w-full rounded-lg py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
-												placeholder="Enter LiteLLM API RPM (litellm_params.rpm)"
+												placeholder={$i18n.t('Enter LiteLLM API RPM (litellm_params.rpm)')}
 												bind:value={liteLLMRPM}
 												autocomplete="off"
 											/>
@@ -946,12 +1065,12 @@
 								</div>
 
 								<div>
-									<div class="mb-1.5 text-sm font-medium">Max Tokens</div>
+									<div class="mb-1.5 text-sm font-medium">{$i18n.t('Max Tokens')}</div>
 									<div class="flex w-full">
 										<div class="flex-1">
 											<input
 												class="w-full rounded-lg py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
-												placeholder="Enter Max Tokens (litellm_params.max_tokens)"
+												placeholder={$i18n.t('Enter Max Tokens (litellm_params.max_tokens)')}
 												bind:value={liteLLMMaxTokens}
 												type="number"
 												min="1"
@@ -964,27 +1083,27 @@
 						</div>
 
 						<div class="mb-2 text-xs text-gray-400 dark:text-gray-500">
-							Not sure what to add?
+							{$i18n.t('Not sure what to add?')}
 							<a
 								class=" text-gray-300 font-medium underline"
 								href="https://litellm.vercel.app/docs/proxy/configs#quick-start"
 								target="_blank"
 							>
-								Click here for help.
+								{$i18n.t('Click here for help.')}
 							</a>
 						</div>
 
 						<div>
-							<div class=" mb-2.5 text-sm font-medium">Delete a model</div>
+							<div class=" mb-2.5 text-sm font-medium">{$i18n.t('Delete a model')}</div>
 							<div class="flex w-full">
 								<div class="flex-1 mr-2">
 									<select
 										class="w-full rounded-lg py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
 										bind:value={deleteLiteLLMModelId}
-										placeholder="Select a model"
+										placeholder={$i18n.t('Select a model')}
 									>
 										{#if !deleteLiteLLMModelId}
-											<option value="" disabled selected>Select a model</option>
+											<option value="" disabled selected>{$i18n.t('Select a model')}</option>
 										{/if}
 										{#each liteLLMModelInfo as model}
 											<option value={model.model_info.id} class="bg-gray-100 dark:bg-gray-700"

+ 20 - 17
src/lib/components/chat/SettingsModal.svelte

@@ -1,4 +1,5 @@
 <script lang="ts">
+	import { getContext } from 'svelte';
 	import { toast } from 'svelte-sonner';
 	import { models, settings, user } from '$lib/stores';
 
@@ -17,6 +18,8 @@
 	import Connections from './Settings/Connections.svelte';
 	import Images from './Settings/Images.svelte';
 
+	const i18n = getContext('i18n');
+
 	export let show = false;
 
 	const saveSettings = async (updated) => {
@@ -58,7 +61,7 @@
 <Modal bind:show>
 	<div>
 		<div class=" flex justify-between dark:text-gray-300 px-5 py-4">
-			<div class=" text-lg font-medium self-center">Settings</div>
+			<div class=" text-lg font-medium self-center">{$i18n.t('Settings')}</div>
 			<button
 				class="self-center"
 				on:click={() => {
@@ -106,7 +109,7 @@
 							/>
 						</svg>
 					</div>
-					<div class=" self-center">General</div>
+					<div class=" self-center">{$i18n.t('General')}</div>
 				</button>
 
 				{#if $user?.role === 'admin'}
@@ -131,7 +134,7 @@
 								/>
 							</svg>
 						</div>
-						<div class=" self-center">Connections</div>
+						<div class=" self-center">{$i18n.t('Connections')}</div>
 					</button>
 
 					<button
@@ -157,7 +160,7 @@
 								/>
 							</svg>
 						</div>
-						<div class=" self-center">Models</div>
+						<div class=" self-center">{$i18n.t('Models')}</div>
 					</button>
 				{/if}
 
@@ -184,7 +187,7 @@
 							/>
 						</svg>
 					</div>
-					<div class=" self-center">Interface</div>
+					<div class=" self-center">{$i18n.t('Interface')}</div>
 				</button>
 
 				<button
@@ -211,7 +214,7 @@
 							/>
 						</svg>
 					</div>
-					<div class=" self-center">Audio</div>
+					<div class=" self-center">{$i18n.t('Audio')}</div>
 				</button>
 
 				{#if $user.role === 'admin'}
@@ -238,7 +241,7 @@
 								/>
 							</svg>
 						</div>
-						<div class=" self-center">Images</div>
+						<div class=" self-center">{$i18n.t('Images')}</div>
 					</button>
 				{/if}
 
@@ -265,7 +268,7 @@
 							/>
 						</svg>
 					</div>
-					<div class=" self-center">Chats</div>
+					<div class=" self-center">{$i18n.t('Chats')}</div>
 				</button>
 
 				<button
@@ -291,7 +294,7 @@
 							/>
 						</svg>
 					</div>
-					<div class=" self-center">Account</div>
+					<div class=" self-center">{$i18n.t('Account')}</div>
 				</button>
 
 				<button
@@ -317,16 +320,16 @@
 							/>
 						</svg>
 					</div>
-					<div class=" self-center">About</div>
+					<div class=" self-center">{$i18n.t('About')}</div>
 				</button>
 			</div>
-			<div class="flex-1 md:min-h-[380px]">
+			<div class="flex-1 md:min-h-[25rem]">
 				{#if selectedTab === 'general'}
 					<General
 						{getModels}
 						{saveSettings}
 						on:save={() => {
-							toast.success('Settings saved successfully!');
+							toast.success($i18n.t('Settings saved successfully!'));
 						}}
 					/>
 				{:else if selectedTab === 'models'}
@@ -335,28 +338,28 @@
 					<Connections
 						{getModels}
 						on:save={() => {
-							toast.success('Settings saved successfully!');
+							toast.success($i18n.t('Settings saved successfully!'));
 						}}
 					/>
 				{:else if selectedTab === 'interface'}
 					<Interface
 						{saveSettings}
 						on:save={() => {
-							toast.success('Settings saved successfully!');
+							toast.success($i18n.t('Settings saved successfully!'));
 						}}
 					/>
 				{:else if selectedTab === 'audio'}
 					<Audio
 						{saveSettings}
 						on:save={() => {
-							toast.success('Settings saved successfully!');
+							toast.success($i18n.t('Settings saved successfully!'));
 						}}
 					/>
 				{:else if selectedTab === 'images'}
 					<Images
 						{saveSettings}
 						on:save={() => {
-							toast.success('Settings saved successfully!');
+							toast.success($i18n.t('Settings saved successfully!'));
 						}}
 					/>
 				{:else if selectedTab === 'chats'}
@@ -364,7 +367,7 @@
 				{:else if selectedTab === 'account'}
 					<Account
 						saveHandler={() => {
-							toast.success('Settings saved successfully!');
+							toast.success($i18n.t('Settings saved successfully!'));
 						}}
 					/>
 				{:else if selectedTab === 'about'}

+ 6 - 3
src/lib/components/chat/ShareChatModal.svelte

@@ -1,6 +1,9 @@
 <script lang="ts">
+	import { getContext } from 'svelte';
 	import Modal from '../common/Modal.svelte';
 
+	const i18n = getContext('i18n');
+
 	export let downloadChat: Function;
 	export let shareChat: Function;
 
@@ -17,11 +20,11 @@
 				show = false;
 			}}
 		>
-			Share to OpenWebUI Community
+			{$i18n.t('Share to OpenWebUI Community')}
 		</button>
 
 		<div class="flex justify-center space-x-1 mt-1.5">
-			<div class=" self-center text-gray-400 text-xs font-medium">or</div>
+			<div class=" self-center text-gray-400 text-xs font-medium">{$i18n.t('or')}</div>
 
 			<button
 				class=" self-center rounded-full text-xs font-medium text-gray-700 dark:text-gray-500 underline"
@@ -31,7 +34,7 @@
 					show = false;
 				}}
 			>
-				Download as a File
+				{$i18n.t('Download as a File')}
 			</button>
 		</div>
 	</div>

+ 12 - 9
src/lib/components/chat/ShortcutsModal.svelte

@@ -1,13 +1,16 @@
 <script lang="ts">
+	import { getContext } from 'svelte';
 	import Modal from '../common/Modal.svelte';
 
+	const i18n = getContext('i18n');
+
 	export let show = false;
 </script>
 
 <Modal bind:show>
 	<div>
 		<div class=" flex justify-between dark:text-gray-300 px-5 py-4">
-			<div class=" text-lg font-medium self-center">Keyboard shortcuts</div>
+			<div class=" text-lg font-medium self-center">{$i18n.t('Keyboard shortcuts')}</div>
 			<button
 				class="self-center"
 				on:click={() => {
@@ -32,7 +35,7 @@
 			<div class=" flex flex-col w-full sm:flex-row sm:justify-center sm:space-x-6">
 				<div class="flex flex-col space-y-3 w-full self-start">
 					<div class="w-full flex justify-between items-center">
-						<div class=" text-sm">Open new chat</div>
+						<div class=" text-sm">{$i18n.t('Open new chat')}</div>
 
 						<div class="flex space-x-1 text-xs">
 							<div
@@ -56,7 +59,7 @@
 					</div>
 
 					<div class="w-full flex justify-between items-center">
-						<div class=" text-sm">Focus chat input</div>
+						<div class=" text-sm">{$i18n.t('Focus chat input')}</div>
 
 						<div class="flex space-x-1 text-xs">
 							<div
@@ -74,7 +77,7 @@
 					</div>
 
 					<div class="w-full flex justify-between items-center">
-						<div class=" text-sm">Copy last code block</div>
+						<div class=" text-sm">{$i18n.t('Copy last code block')}</div>
 
 						<div class="flex space-x-1 text-xs">
 							<div
@@ -98,7 +101,7 @@
 					</div>
 
 					<div class="w-full flex justify-between items-center">
-						<div class=" text-sm">Copy last response</div>
+						<div class=" text-sm">{$i18n.t('Copy last response')}</div>
 
 						<div class="flex space-x-1 text-xs">
 							<div
@@ -124,7 +127,7 @@
 
 				<div class="flex flex-col space-y-3 w-full self-start">
 					<div class="w-full flex justify-between items-center">
-						<div class=" text-sm">Toggle settings</div>
+						<div class=" text-sm">{$i18n.t('Toggle settings')}</div>
 
 						<div class="flex space-x-1 text-xs">
 							<div
@@ -141,7 +144,7 @@
 					</div>
 
 					<div class="w-full flex justify-between items-center">
-						<div class=" text-sm">Toggle sidebar</div>
+						<div class=" text-sm">{$i18n.t('Toggle sidebar')}</div>
 
 						<div class="flex space-x-1 text-xs">
 							<div
@@ -165,7 +168,7 @@
 					</div>
 
 					<div class="w-full flex justify-between items-center">
-						<div class=" text-sm">Delete chat</div>
+						<div class=" text-sm">{$i18n.t('Delete chat')}</div>
 
 						<div class="flex space-x-1 text-xs">
 							<div
@@ -188,7 +191,7 @@
 					</div>
 
 					<div class="w-full flex justify-between items-center">
-						<div class=" text-sm">Show shortcuts</div>
+						<div class=" text-sm">{$i18n.t('Show shortcuts')}</div>
 
 						<div class="flex space-x-1 text-xs">
 							<div

+ 20 - 0
src/lib/components/chat/TagChatModal.svelte

@@ -0,0 +1,20 @@
+<script lang="ts">
+	import { getContext } from 'svelte';
+	import Modal from '../common/Modal.svelte';
+
+	import Tags from '../common/Tags.svelte';
+
+	const i18n = getContext('i18n');
+
+	export let tags;
+	export let deleteTag: Function;
+	export let addTag: Function;
+
+	export let show = false;
+</script>
+
+<Modal bind:show size="xs">
+	<div class="px-4 pt-4 pb-5 w-full flex flex-col justify-center">
+		<Tags {tags} {deleteTag} {addTag} />
+	</div>
+</Modal>

+ 40 - 0
src/lib/components/common/Dropdown.svelte

@@ -0,0 +1,40 @@
+<script lang="ts">
+	import { DropdownMenu } from 'bits-ui';
+	import { createEventDispatcher } from 'svelte';
+
+	import { flyAndScale } from '$lib/utils/transitions';
+
+	const dispatch = createEventDispatcher();
+</script>
+
+<DropdownMenu.Root
+	onOpenChange={(state) => {
+		dispatch('change', state);
+	}}
+>
+	<DropdownMenu.Trigger>
+		<slot />
+	</DropdownMenu.Trigger>
+
+	<slot name="content">
+		<DropdownMenu.Content
+			class="w-full max-w-[130px] rounded-lg px-1 py-1.5 border border-gray-700 z-50 bg-gray-850 text-white"
+			sideOffset={8}
+			side="bottom"
+			align="start"
+			transition={flyAndScale}
+		>
+			<DropdownMenu.Item class="flex items-center px-3 py-2 text-sm  font-medium">
+				<div class="flex items-center">Profile</div>
+			</DropdownMenu.Item>
+
+			<DropdownMenu.Item class="flex items-center px-3 py-2 text-sm  font-medium">
+				<div class="flex items-center">Profile</div>
+			</DropdownMenu.Item>
+
+			<DropdownMenu.Item class="flex items-center px-3 py-2 text-sm  font-medium">
+				<div class="flex items-center">Profile</div>
+			</DropdownMenu.Item>
+		</DropdownMenu.Content>
+	</slot>
+</DropdownMenu.Root>

+ 17 - 4
src/lib/components/common/ImagePreview.svelte

@@ -2,6 +2,22 @@
 	export let show = false;
 	export let src = '';
 	export let alt = '';
+
+	const downloadImage = (url, filename) => {
+		fetch(url)
+			.then((response) => response.blob())
+			.then((blob) => {
+				const objectUrl = window.URL.createObjectURL(blob);
+				const link = document.createElement('a');
+				link.href = objectUrl;
+				link.download = filename;
+				document.body.appendChild(link);
+				link.click();
+				document.body.removeChild(link);
+				window.URL.revokeObjectURL(objectUrl);
+			})
+			.catch((error) => console.error('Error downloading image:', error));
+	};
 </script>
 
 {#if show}
@@ -35,10 +51,7 @@
 				<button
 					class=" p-5"
 					on:click={() => {
-						const a = document.createElement('a');
-						a.href = src;
-						a.download = 'Image.png';
-						a.click();
+						downloadImage(src, 'Image.png');
 					}}
 				>
 					<svg

+ 4 - 2
src/lib/components/common/Modal.svelte

@@ -2,6 +2,8 @@
 	import { onMount } from 'svelte';
 	import { fade } from 'svelte/transition';
 
+	import { flyAndScale } from '$lib/utils/transitions';
+
 	export let show = true;
 	export let size = 'md';
 
@@ -41,10 +43,10 @@
 		}}
 	>
 		<div
-			class=" modal-content m-auto rounded-2xl max-w-full {sizeToWidth(
+			class=" m-auto rounded-2xl max-w-full {sizeToWidth(
 				size
 			)} mx-2 bg-gray-50 dark:bg-gray-900 shadow-3xl"
-			in:fade={{ duration: 10 }}
+			in:flyAndScale
 			on:click={(e) => {
 				e.stopPropagation();
 			}}

+ 95 - 0
src/lib/components/common/Selector.svelte

@@ -0,0 +1,95 @@
+<script lang="ts">
+	import { Select } from 'bits-ui';
+
+	import { flyAndScale } from '$lib/utils/transitions';
+
+	import { createEventDispatcher } from 'svelte';
+	import ChevronDown from '../icons/ChevronDown.svelte';
+	import Check from '../icons/Check.svelte';
+	import Search from '../icons/Search.svelte';
+
+	const dispatch = createEventDispatcher();
+
+	export let value = '';
+	export let placeholder = 'Select a model';
+	export let searchEnabled = true;
+	export let searchPlaceholder = 'Search a model';
+
+	export let items = [
+		{ value: 'mango', label: 'Mango' },
+		{ value: 'watermelon', label: 'Watermelon' },
+		{ value: 'apple', label: 'Apple' },
+		{ value: 'pineapple', label: 'Pineapple' },
+		{ value: 'orange', label: 'Orange' }
+	];
+
+	let searchValue = '';
+
+	$: filteredItems = searchValue
+		? items.filter((item) => item.value.includes(searchValue.toLowerCase()))
+		: items;
+</script>
+
+<Select.Root
+	{items}
+	onOpenChange={() => {
+		searchValue = '';
+	}}
+	selected={items.find((item) => item.value === value)}
+	onSelectedChange={(selectedItem) => {
+		value = selectedItem.value;
+	}}
+>
+	<Select.Trigger class="relative w-full" aria-label={placeholder}>
+		<Select.Value
+			class="inline-flex h-input px-0.5 w-full outline-none bg-transparent truncate text-lg font-semibold placeholder-gray-400  focus:outline-none"
+			{placeholder}
+		/>
+		<ChevronDown className="absolute end-2 top-1/2 -translate-y-[45%] size-3.5" strokeWidth="2.5" />
+	</Select.Trigger>
+	<Select.Content
+		class="w-full rounded-lg  bg-white dark:bg-gray-900 dark:text-white shadow-lg border border-gray-300/30 dark:border-gray-700/50  outline-none"
+		transition={flyAndScale}
+		sideOffset={4}
+	>
+		<slot>
+			{#if searchEnabled}
+				<div class="flex items-center gap-2.5 px-5 mt-3.5 mb-3">
+					<Search className="size-4" strokeWidth="2.5" />
+
+					<input
+						bind:value={searchValue}
+						class="w-full text-sm bg-transparent outline-none"
+						placeholder={searchPlaceholder}
+					/>
+				</div>
+
+				<hr class="border-gray-100 dark:border-gray-800" />
+			{/if}
+
+			<div class="px-3 my-2 max-h-80 overflow-y-auto">
+				{#each filteredItems as item}
+					<Select.Item
+						class="flex w-full font-medium line-clamp-1 select-none items-center rounded-button py-2 pl-3 pr-1.5 text-sm  text-gray-700 dark:text-gray-100  outline-none transition-all duration-75 hover:bg-gray-100 dark:hover:bg-gray-850 rounded-lg cursor-pointer data-[highlighted]:bg-muted"
+						value={item.value}
+						label={item.label}
+					>
+						{item.label}
+
+						{#if value === item.value}
+							<div class="ml-auto">
+								<Check />
+							</div>
+						{/if}
+					</Select.Item>
+				{:else}
+					<div>
+						<div class="block px-5 py-2 text-sm text-gray-700 dark:text-gray-100">
+							No results found
+						</div>
+					</div>
+				{/each}
+			</div>
+		</slot>
+	</Select.Content>
+</Select.Root>

+ 2 - 1
src/lib/components/common/Tags.svelte

@@ -8,7 +8,7 @@
 	export let addTag: Function;
 </script>
 
-<div class="flex flex-row space-x-0.5 line-clamp-1">
+<div class="flex flex-row flex-wrap gap-0.5 line-clamp-1">
 	<TagList
 		{tags}
 		on:delete={(e) => {
@@ -17,6 +17,7 @@
 	/>
 
 	<TagInput
+		label={tags.length == 0 ? 'Add Tags' : ''}
 		on:add={(e) => {
 			addTag(e.detail);
 		}}

+ 38 - 17
src/lib/components/common/Tags/TagInput.svelte

@@ -1,28 +1,31 @@
 <script lang="ts">
-	import { createEventDispatcher } from 'svelte';
+	import { createEventDispatcher, getContext } from 'svelte';
+	import { tags } from '$lib/stores';
+	import { toast } from 'svelte-sonner';
 	const dispatch = createEventDispatcher();
 
+	const i18n = getContext('i18n');
+
+	export let label = '';
 	let showTagInput = false;
 	let tagName = '';
+
+	const addTagHandler = async () => {
+		tagName = tagName.trim();
+		if (tagName !== '') {
+			dispatch('add', tagName);
+			tagName = '';
+			showTagInput = false;
+		} else {
+			toast.error('Invalid Tag');
+		}
+	};
 </script>
 
 <div class="flex space-x-1 pl-1.5">
 	{#if showTagInput}
 		<div class="flex items-center">
-			<input
-				bind:value={tagName}
-				class=" cursor-pointer self-center text-xs h-fit bg-transparent outline-none line-clamp-1 w-[4rem]"
-				placeholder="Add a tag"
-			/>
-
-			<button
-				type="button"
-				on:click={() => {
-					dispatch('add', tagName);
-					tagName = '';
-					showTagInput = false;
-				}}
-			>
+			<button type="button" on:click={addTagHandler}>
 				<svg
 					xmlns="http://www.w3.org/2000/svg"
 					viewBox="0 0 16 16"
@@ -36,9 +39,23 @@
 					/>
 				</svg>
 			</button>
+			<input
+				bind:value={tagName}
+				class=" pl-2 cursor-pointer self-center text-xs h-fit bg-transparent outline-none line-clamp-1 w-[5.5rem]"
+				placeholder={$i18n.t('Add a tag')}
+				list="tagOptions"
+				on:keydown={(event) => {
+					if (event.key === 'Enter') {
+						addTagHandler();
+					}
+				}}
+			/>
+			<datalist id="tagOptions">
+				{#each $tags as tag}
+					<option value={tag.name} />
+				{/each}
+			</datalist>
 		</div>
-
-		<!-- TODO: Tag Suggestions -->
 	{/if}
 
 	<button
@@ -61,4 +78,8 @@
 			</svg>
 		</div>
 	</button>
+
+	{#if label && !showTagInput}
+		<span class="text-xs pl-1.5 self-center">{label}</span>
+	{/if}
 </div>

+ 1 - 1
src/lib/components/common/Tags/TagList.svelte

@@ -7,7 +7,7 @@
 
 {#each tags as tag}
 	<div
-		class="px-2 py-0.5 space-x-1 flex h-fit items-center rounded-full transition border dark:border-gray-600 dark:text-white"
+		class="px-2 py-0.5 space-x-1 flex h-fit items-center rounded-full transition border dark:border-gray-800 dark:text-white"
 	>
 		<div class=" text-[0.7rem] font-medium self-center line-clamp-1">
 			{tag.name}

+ 1 - 1
src/lib/components/common/Tooltip.svelte

@@ -29,6 +29,6 @@
 	});
 </script>
 
-<div bind:this={tooltipElement} aria-label={content}>
+<div bind:this={tooltipElement} aria-label={content} class="flex">
 	<slot />
 </div>

+ 9 - 7
src/lib/components/documents/AddDocModal.svelte

@@ -1,7 +1,7 @@
 <script lang="ts">
 	import { toast } from 'svelte-sonner';
 	import dayjs from 'dayjs';
-	import { onMount } from 'svelte';
+	import { onMount, getContext } from 'svelte';
 
 	import { createNewDoc, getDocs, tagDocByName, updateDocByName } from '$lib/apis/documents';
 	import Modal from '../common/Modal.svelte';
@@ -13,6 +13,8 @@
 	import { transformFileName } from '$lib/utils';
 	import { SUPPORTED_FILE_EXTENSIONS, SUPPORTED_FILE_TYPE } from '$lib/constants';
 
+	const i18n = getContext('i18n');
+
 	export let show = false;
 	export let selectedDoc;
 	let uploadDocInputElement: HTMLInputElement;
@@ -71,7 +73,7 @@
 			inputFiles = null;
 			uploadDocInputElement.value = '';
 		} else {
-			toast.error(`File not found.`);
+			toast.error($i18n.t(`File not found.`));
 		}
 
 		show = false;
@@ -96,7 +98,7 @@
 <Modal size="sm" bind:show>
 	<div>
 		<div class=" flex justify-between dark:text-gray-300 px-5 py-4">
-			<div class=" text-lg font-medium self-center">Add Docs</div>
+			<div class=" text-lg font-medium self-center">{$i18n.t('Add Docs')}</div>
 			<button
 				class="self-center"
 				on:click={() => {
@@ -136,7 +138,7 @@
 						/>
 
 						<button
-							class="w-full text-sm font-medium py-3 bg-gray-850 hover:bg-gray-800 text-center rounded-xl"
+							class="w-full text-sm font-medium py-3 bg-gray-100 hover:bg-gray-200 dark:bg-gray-850 dark:hover:bg-gray-800 text-center rounded-xl"
 							type="button"
 							on:click={() => {
 								uploadDocInputElement.click();
@@ -145,14 +147,14 @@
 							{#if inputFiles}
 								{inputFiles.length > 0 ? `${inputFiles.length}` : ''} document(s) selected.
 							{:else}
-								Click here to select documents.
+								{$i18n.t('Click here to select documents.')}
 							{/if}
 						</button>
 					</div>
 
 					<div class=" flex flex-col space-y-1.5">
 						<div class="flex flex-col w-full">
-							<div class=" mb-1.5 text-xs text-gray-500">Tags</div>
+							<div class=" mb-1.5 text-xs text-gray-500">{$i18n.t('Tags')}</div>
 
 							<Tags {tags} addTag={addTagHandler} deleteTag={deleteTagHandler} />
 						</div>
@@ -163,7 +165,7 @@
 							class=" px-4 py-2 bg-emerald-600 hover:bg-emerald-700 text-gray-100 transition rounded"
 							type="submit"
 						>
-							Save
+							{$i18n.t('Save')}
 						</button>
 					</div>
 				</form>

+ 8 - 6
src/lib/components/documents/EditDocModal.svelte

@@ -1,7 +1,7 @@
 <script lang="ts">
 	import { toast } from 'svelte-sonner';
 	import dayjs from 'dayjs';
-	import { onMount } from 'svelte';
+	import { onMount, getContext } from 'svelte';
 
 	import { getDocs, tagDocByName, updateDocByName } from '$lib/apis/documents';
 	import Modal from '../common/Modal.svelte';
@@ -10,6 +10,8 @@
 	import Tags from '../common/Tags.svelte';
 	import { addTagById } from '$lib/apis/chats';
 
+	const i18n = getContext('i18n');
+
 	export let show = false;
 	export let selectedDoc;
 
@@ -74,7 +76,7 @@
 <Modal size="sm" bind:show>
 	<div>
 		<div class=" flex justify-between dark:text-gray-300 px-5 py-4">
-			<div class=" text-lg font-medium self-center">Edit Doc</div>
+			<div class=" text-lg font-medium self-center">{$i18n.t('Edit Doc')}</div>
 			<button
 				class="self-center"
 				on:click={() => {
@@ -105,7 +107,7 @@
 				>
 					<div class=" flex flex-col space-y-1.5">
 						<div class="flex flex-col w-full">
-							<div class=" mb-1 text-xs text-gray-500">Name Tag</div>
+							<div class=" mb-1 text-xs text-gray-500">{$i18n.t('Name Tag')}</div>
 
 							<div class="flex flex-1">
 								<div
@@ -134,7 +136,7 @@
 						</div>
 
 						<div class="flex flex-col w-full">
-							<div class=" mb-1 text-xs text-gray-500">Title</div>
+							<div class=" mb-1 text-xs text-gray-500">{$i18n.t('Title')}</div>
 
 							<div class="flex-1">
 								<input
@@ -148,7 +150,7 @@
 						</div>
 
 						<div class="flex flex-col w-full">
-							<div class=" mb-1.5 text-xs text-gray-500">Tags</div>
+							<div class=" mb-1.5 text-xs text-gray-500">{$i18n.t('Tags')}</div>
 
 							<Tags {tags} addTag={addTagHandler} deleteTag={deleteTagHandler} />
 						</div>
@@ -159,7 +161,7 @@
 							class=" px-4 py-2 bg-emerald-600 hover:bg-emerald-700 text-gray-100 transition rounded"
 							type="submit"
 						>
-							Save
+							{$i18n.t('Save')}
 						</button>
 					</div>
 				</form>

+ 170 - 70
src/lib/components/documents/Settings/General.svelte

@@ -5,16 +5,22 @@
 		updateRAGConfig,
 		getQuerySettings,
 		scanDocs,
-		updateQuerySettings
+		updateQuerySettings,
+		resetVectorDB
 	} from '$lib/apis/rag';
+
 	import { documents } from '$lib/stores';
-	import { onMount } from 'svelte';
+	import { onMount, getContext } from 'svelte';
 	import { toast } from 'svelte-sonner';
 
+	const i18n = getContext('i18n');
+
 	export let saveHandler: Function;
 
 	let loading = false;
 
+	let showResetConfirm = false;
+
 	let chunkSize = 0;
 	let chunkOverlap = 0;
 	let pdfExtractImages = true;
@@ -31,7 +37,7 @@
 
 		if (res) {
 			await documents.set(await getDocs(localStorage.token));
-			toast.success('Scan complete!');
+			toast.success($i18n.t('Scan complete!'));
 		}
 	};
 
@@ -69,10 +75,12 @@
 >
 	<div class=" space-y-3 pr-1.5 overflow-y-scroll max-h-80">
 		<div>
-			<div class=" mb-2 text-sm font-medium">General Settings</div>
+			<div class=" mb-2 text-sm font-medium">{$i18n.t('General Settings')}</div>
 
 			<div class="  flex w-full justify-between">
-				<div class=" self-center text-xs font-medium">Scan for documents from '/data/docs'</div>
+				<div class=" self-center text-xs font-medium">
+					{$i18n.t('Scan for documents from {{path}}', { path: '/data/docs' })}
+				</div>
 
 				<button
 					class=" self-center text-xs p-1 px-3 bg-gray-100 dark:bg-gray-800 dark:hover:bg-gray-700 rounded flex flex-row space-x-1 items-center {loading
@@ -85,7 +93,7 @@
 					type="button"
 					disabled={loading}
 				>
-					<div class="self-center font-medium">Scan</div>
+					<div class="self-center font-medium">{$i18n.t('Scan')}</div>
 
 					<!-- <svg
 						xmlns="http://www.w3.org/2000/svg"
@@ -133,77 +141,76 @@
 
 		<hr class=" dark:border-gray-700" />
 
-		<div class=" space-y-3">
-			<div class=" space-y-3">
-				<div class=" text-sm font-medium">Chunk Params</div>
+		<div class=" ">
+			<div class=" text-sm font-medium">{$i18n.t('Chunk Params')}</div>
 
-				<div class=" flex gap-2">
-					<div class="  flex w-full justify-between gap-2">
-						<div class="self-center text-xs font-medium min-w-fit">Chunk Size</div>
+			<div class=" flex">
+				<div class="  flex w-full justify-between">
+					<div class="self-center text-xs font-medium min-w-fit">{$i18n.t('Chunk Size')}</div>
 
-						<div class="self-center">
-							<input
-								class=" w-full rounded py-1.5 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none border border-gray-100 dark:border-gray-600"
-								type="number"
-								placeholder="Enter Chunk Size"
-								bind:value={chunkSize}
-								autocomplete="off"
-								min="0"
-							/>
-						</div>
+					<div class="self-center p-3">
+						<input
+							class=" w-full rounded py-1.5 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none border border-gray-100 dark:border-gray-600"
+							type="number"
+							placeholder={$i18n.t('Enter Chunk Size')}
+							bind:value={chunkSize}
+							autocomplete="off"
+							min="0"
+						/>
 					</div>
+				</div>
 
-					<div class="flex w-full gap-2">
-						<div class=" self-center text-xs font-medium min-w-fit">Chunk Overlap</div>
+				<div class="flex w-full">
+					<div class=" self-center text-xs font-medium min-w-fit">{$i18n.t('Chunk Overlap')}</div>
 
-						<div class="self-center">
-							<input
-								class="w-full rounded py-1.5 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none border border-gray-100 dark:border-gray-600"
-								type="number"
-								placeholder="Enter Chunk Overlap"
-								bind:value={chunkOverlap}
-								autocomplete="off"
-								min="0"
-							/>
-						</div>
+					<div class="self-center p-3">
+						<input
+							class="w-full rounded py-1.5 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none border border-gray-100 dark:border-gray-600"
+							type="number"
+							placeholder={$i18n.t('Enter Chunk Overlap')}
+							bind:value={chunkOverlap}
+							autocomplete="off"
+							min="0"
+						/>
 					</div>
 				</div>
+			</div>
 
-				<div>
-					<div class="flex justify-between items-center text-xs">
-						<div class=" text-xs font-medium">PDF Extract Images (OCR)</div>
+			<div>
+				<div class="flex justify-between items-center text-xs">
+					<div class=" text-xs font-medium">{$i18n.t('PDF Extract Images (OCR)')}</div>
 
-						<button
-							class=" text-xs font-medium text-gray-500"
-							type="button"
-							on:click={() => {
-								pdfExtractImages = !pdfExtractImages;
-							}}>{pdfExtractImages ? 'On' : 'Off'}</button
-						>
-					</div>
+					<button
+						class=" text-xs font-medium text-gray-500"
+						type="button"
+						on:click={() => {
+							pdfExtractImages = !pdfExtractImages;
+						}}>{pdfExtractImages ? $i18n.t('On') : $i18n.t('Off')}</button
+					>
 				</div>
 			</div>
+		</div>
 
-			<div>
-				<div class=" text-sm font-medium">Query Params</div>
+		<div>
+			<div class=" text-sm font-medium">{$i18n.t('Query Params')}</div>
 
-				<div class=" flex py-2">
-					<div class="  flex w-full justify-between gap-2">
-						<div class="self-center text-xs font-medium flex-1">Top K</div>
+			<div class=" flex">
+				<div class="  flex w-full justify-between">
+					<div class="self-center text-xs font-medium flex-1">{$i18n.t('Top K')}</div>
 
-						<div class="self-center">
-							<input
-								class=" w-full rounded py-1.5 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none border border-gray-100 dark:border-gray-600"
-								type="number"
-								placeholder="Enter Top K"
-								bind:value={querySettings.k}
-								autocomplete="off"
-								min="0"
-							/>
-						</div>
+					<div class="self-center p-3">
+						<input
+							class=" w-full rounded py-1.5 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none border border-gray-100 dark:border-gray-600"
+							type="number"
+							placeholder={$i18n.t('Enter Top K')}
+							bind:value={querySettings.k}
+							autocomplete="off"
+							min="0"
+						/>
 					</div>
+				</div>
 
-					<!-- <div class="flex w-full">
+				<!-- <div class="flex w-full">
 						<div class=" self-center text-xs font-medium min-w-fit">Chunk Overlap</div>
 	
 						<div class="self-center p-3">
@@ -217,18 +224,111 @@
 							/>
 						</div>
 					</div> -->
+			</div>
+
+			<div>
+				<div class=" mb-2.5 text-sm font-medium">{$i18n.t('RAG Template')}</div>
+				<textarea
+					bind:value={querySettings.template}
+					class="w-full rounded p-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none resize-none"
+					rows="4"
+				/>
+			</div>
+		</div>
+
+		<hr class=" dark:border-gray-700" />
+
+		{#if showResetConfirm}
+			<div class="flex justify-between rounded-md items-center py-2 px-3.5 w-full transition">
+				<div class="flex items-center space-x-3">
+					<svg
+						xmlns="http://www.w3.org/2000/svg"
+						viewBox="0 0 16 16"
+						fill="currentColor"
+						class="w-4 h-4"
+					>
+						<path d="M2 3a1 1 0 0 1 1-1h10a1 1 0 0 1 1 1v1a1 1 0 0 1-1 1H3a1 1 0 0 1-1-1V3Z" />
+						<path
+							fill-rule="evenodd"
+							d="M13 6H3v6a2 2 0 0 0 2 2h6a2 2 0 0 0 2-2V6ZM5.72 7.47a.75.75 0 0 1 1.06 0L8 8.69l1.22-1.22a.75.75 0 1 1 1.06 1.06L9.06 9.75l1.22 1.22a.75.75 0 1 1-1.06 1.06L8 10.81l-1.22 1.22a.75.75 0 0 1-1.06-1.06l1.22-1.22-1.22-1.22a.75.75 0 0 1 0-1.06Z"
+							clip-rule="evenodd"
+						/>
+					</svg>
+					<span>{$i18n.t('Are you sure?')}</span>
 				</div>
 
-				<div>
-					<div class=" mb-2.5 text-sm font-medium">RAG Template</div>
-					<textarea
-						bind:value={querySettings.template}
-						class="w-full rounded p-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none resize-none"
-						rows="4"
-					/>
+				<div class="flex space-x-1.5 items-center">
+					<button
+						class="hover:text-white transition"
+						on:click={() => {
+							const res = resetVectorDB(localStorage.token).catch((error) => {
+								toast.error(error);
+								return null;
+							});
+
+							if (res) {
+								toast.success($i18n.t('Success'));
+							}
+
+							showResetConfirm = false;
+						}}
+					>
+						<svg
+							xmlns="http://www.w3.org/2000/svg"
+							viewBox="0 0 20 20"
+							fill="currentColor"
+							class="w-4 h-4"
+						>
+							<path
+								fill-rule="evenodd"
+								d="M16.704 4.153a.75.75 0 01.143 1.052l-8 10.5a.75.75 0 01-1.127.075l-4.5-4.5a.75.75 0 011.06-1.06l3.894 3.893 7.48-9.817a.75.75 0 011.05-.143z"
+								clip-rule="evenodd"
+							/>
+						</svg>
+					</button>
+					<button
+						class="hover:text-white transition"
+						on:click={() => {
+							showResetConfirm = false;
+						}}
+					>
+						<svg
+							xmlns="http://www.w3.org/2000/svg"
+							viewBox="0 0 20 20"
+							fill="currentColor"
+							class="w-4 h-4"
+						>
+							<path
+								d="M6.28 5.22a.75.75 0 00-1.06 1.06L8.94 10l-3.72 3.72a.75.75 0 101.06 1.06L10 11.06l3.72 3.72a.75.75 0 101.06-1.06L11.06 10l3.72-3.72a.75.75 0 00-1.06-1.06L10 8.94 6.28 5.22z"
+							/>
+						</svg>
+					</button>
 				</div>
 			</div>
-		</div>
+		{:else}
+			<button
+				class=" flex rounded-md py-2 px-3.5 w-full hover:bg-gray-200 dark:hover:bg-gray-800 transition"
+				on:click={() => {
+					showResetConfirm = true;
+				}}
+			>
+				<div class=" self-center mr-3">
+					<svg
+						xmlns="http://www.w3.org/2000/svg"
+						viewBox="0 0 16 16"
+						fill="currentColor"
+						class="w-4 h-4"
+					>
+						<path
+							fill-rule="evenodd"
+							d="M3.5 2A1.5 1.5 0 0 0 2 3.5v9A1.5 1.5 0 0 0 3.5 14h9a1.5 1.5 0 0 0 1.5-1.5v-7A1.5 1.5 0 0 0 12.5 4H9.621a1.5 1.5 0 0 1-1.06-.44L7.439 2.44A1.5 1.5 0 0 0 6.38 2H3.5Zm6.75 7.75a.75.75 0 0 0 0-1.5h-4.5a.75.75 0 0 0 0 1.5h4.5Z"
+							clip-rule="evenodd"
+						/>
+					</svg>
+				</div>
+				<div class=" self-center text-sm font-medium">{$i18n.t('Reset Vector Storage')}</div>
+			</button>
+		{/if}
 	</div>
 
 	<div class="flex justify-end pt-3 text-sm font-medium">
@@ -236,7 +336,7 @@
 			class=" px-4 py-2 bg-emerald-600 hover:bg-emerald-700 text-gray-100 transition rounded"
 			type="submit"
 		>
-			Save
+			{$i18n.t('Save')}
 		</button>
 	</div>
 </form>

+ 5 - 2
src/lib/components/documents/SettingsModal.svelte

@@ -1,7 +1,10 @@
 <script>
+	import { getContext } from 'svelte';
 	import Modal from '../common/Modal.svelte';
 	import General from './Settings/General.svelte';
 
+	const i18n = getContext('i18n');
+
 	export let show = false;
 
 	let selectedTab = 'general';
@@ -10,7 +13,7 @@
 <Modal bind:show>
 	<div>
 		<div class=" flex justify-between dark:text-gray-300 px-5 py-4">
-			<div class=" text-lg font-medium self-center">Document Settings</div>
+			<div class=" text-lg font-medium self-center">{$i18n.t('Document Settings')}</div>
 			<button
 				class="self-center"
 				on:click={() => {
@@ -58,7 +61,7 @@
 							/>
 						</svg>
 					</div>
-					<div class=" self-center">General</div>
+					<div class=" self-center">{$i18n.t('General')}</div>
 				</button>
 			</div>
 			<div class="flex-1 md:min-h-[380px]">

+ 15 - 0
src/lib/components/icons/Check.svelte

@@ -0,0 +1,15 @@
+<script lang="ts">
+	export let className = 'w-4 h-4';
+	export let strokeWidth = '1.5';
+</script>
+
+<svg
+	xmlns="http://www.w3.org/2000/svg"
+	fill="none"
+	viewBox="0 0 24 24"
+	stroke-width={strokeWidth}
+	stroke="currentColor"
+	class={className}
+>
+	<path stroke-linecap="round" stroke-linejoin="round" d="m4.5 12.75 6 6 9-13.5" />
+</svg>

Một số tệp đã không được hiển thị bởi vì quá nhiều tập tin thay đổi trong này khác