Переглянути джерело

Merge branch 'dev' into k_reranker

Timothy Jaeryang Baek 4 місяців тому
батько
коміт
9d834a8e90
100 змінених файлів з 2439 додано та 658 видалено
  1. 5 3
      .github/ISSUE_TEMPLATE/bug_report.yaml
  2. 1 0
      .github/ISSUE_TEMPLATE/config.yml
  3. 3 3
      .github/pull_request_template.md
  4. 1 1
      Dockerfile
  5. 55 4
      backend/open_webui/config.py
  6. 27 15
      backend/open_webui/env.py
  7. 3 0
      backend/open_webui/functions.py
  8. 40 10
      backend/open_webui/main.py
  9. 53 1
      backend/open_webui/retrieval/loaders/main.py
  10. 93 0
      backend/open_webui/retrieval/loaders/tavily.py
  11. 21 12
      backend/open_webui/retrieval/utils.py
  12. 13 6
      backend/open_webui/retrieval/vector/dbs/chroma.py
  13. 110 65
      backend/open_webui/retrieval/vector/dbs/opensearch.py
  14. 172 76
      backend/open_webui/retrieval/web/utils.py
  15. 3 1
      backend/open_webui/routers/audio.py
  16. 1 1
      backend/open_webui/routers/auths.py
  17. 103 0
      backend/open_webui/routers/chats.py
  18. 1 1
      backend/open_webui/routers/files.py
  19. 2 4
      backend/open_webui/routers/images.py
  20. 17 7
      backend/open_webui/routers/knowledge.py
  21. 7 2
      backend/open_webui/routers/ollama.py
  22. 32 13
      backend/open_webui/routers/openai.py
  23. 2 2
      backend/open_webui/routers/pipelines.py
  24. 7 0
      backend/open_webui/routers/retrieval.py
  25. 13 5
      backend/open_webui/routers/users.py
  26. 45 36
      backend/open_webui/socket/main.py
  27. 1 0
      backend/open_webui/utils/filter.py
  28. 76 25
      backend/open_webui/utils/middleware.py
  29. 1 0
      backend/open_webui/utils/models.py
  30. 8 5
      backend/open_webui/utils/oauth.py
  31. 10 0
      backend/open_webui/utils/payload.py
  32. 10 6
      backend/open_webui/utils/plugin.py
  33. 0 0
      backend/open_webui/utils/telemetry/__init__.py
  34. 26 0
      backend/open_webui/utils/telemetry/constants.py
  35. 31 0
      backend/open_webui/utils/telemetry/exporters.py
  36. 202 0
      backend/open_webui/utils/telemetry/instrumentors.py
  37. 23 0
      backend/open_webui/utils/telemetry/setup.py
  38. 3 0
      backend/open_webui/utils/tools.py
  39. 16 2
      backend/requirements.txt
  40. 2 1
      backend/start_windows.bat
  41. 18 8
      package-lock.json
  42. 2 1
      package.json
  43. 2 1
      pyproject.toml
  44. 1 1
      src/app.css
  45. 7 0
      src/lib/apis/index.ts
  46. 32 4
      src/lib/components/AddConnectionModal.svelte
  47. 13 1
      src/lib/components/admin/Settings/Connections/OllamaConnection.svelte
  48. 12 1
      src/lib/components/admin/Settings/Connections/OpenAIConnection.svelte
  49. 31 4
      src/lib/components/admin/Settings/Documents.svelte
  50. 11 2
      src/lib/components/admin/Settings/Evaluations/ArenaModelModal.svelte
  51. 9 5
      src/lib/components/admin/Settings/Images.svelte
  52. 2 1
      src/lib/components/admin/Settings/Models/ModelList.svelte
  53. 6 2
      src/lib/components/admin/Settings/WebSearch.svelte
  54. 14 3
      src/lib/components/admin/Users/Groups/EditGroupModal.svelte
  55. 15 1
      src/lib/components/admin/Users/UserList/UserChatsModal.svelte
  56. 7 4
      src/lib/components/channel/Messages.svelte
  57. 18 16
      src/lib/components/chat/Chat.svelte
  58. 36 40
      src/lib/components/chat/Controls/Controls.svelte
  59. 1 1
      src/lib/components/chat/MessageInput/Commands/Knowledge.svelte
  60. 6 1
      src/lib/components/chat/MessageInput/Commands/Prompts.svelte
  61. 6 6
      src/lib/components/chat/Messages/Citations.svelte
  62. 3 3
      src/lib/components/chat/Messages/CitationsModal.svelte
  63. 4 2
      src/lib/components/chat/Messages/CodeBlock.svelte
  64. 108 0
      src/lib/components/chat/Messages/Markdown/AlertRenderer.svelte
  65. 15 5
      src/lib/components/chat/Messages/Markdown/MarkdownTokens.svelte
  66. 5 3
      src/lib/components/chat/Messages/ResponseMessage.svelte
  67. 1 1
      src/lib/components/chat/Messages/UserMessage.svelte
  68. 146 40
      src/lib/components/chat/ModelSelector/Selector.svelte
  69. 13 29
      src/lib/components/chat/Navbar.svelte
  70. 1 0
      src/lib/components/chat/Settings/Advanced/AdvancedParams.svelte
  71. 13 1
      src/lib/components/chat/Settings/Connections/Connection.svelte
  72. 56 0
      src/lib/components/chat/Settings/Interface.svelte
  73. 0 1
      src/lib/components/chat/SettingsModal.svelte
  74. 4 3
      src/lib/components/chat/Suggestions.svelte
  75. 7 1
      src/lib/components/common/Checkbox.svelte
  76. 38 4
      src/lib/components/common/CodeEditor.svelte
  77. 7 3
      src/lib/components/common/FileItem.svelte
  78. 6 2
      src/lib/components/common/FileItemModal.svelte
  79. 36 4
      src/lib/components/common/ImagePreview.svelte
  80. 12 1
      src/lib/components/common/Valves.svelte
  81. 73 20
      src/lib/components/layout/Navbar/Menu.svelte
  82. 8 0
      src/lib/components/layout/Sidebar.svelte
  83. 14 0
      src/lib/components/layout/Sidebar/ChatItem.svelte
  84. 73 25
      src/lib/components/layout/Sidebar/ChatMenu.svelte
  85. 20 7
      src/lib/components/layout/Sidebar/RecursiveFolder.svelte
  86. 19 1
      src/lib/components/layout/Sidebar/SearchInput.svelte
  87. 18 2
      src/lib/components/workspace/Knowledge/KnowledgeBase.svelte
  88. 10 3
      src/lib/components/workspace/Models/FiltersSelector.svelte
  89. 2 2
      src/lib/components/workspace/common/AccessControl.svelte
  90. 15 1
      src/lib/i18n/locales/ar-BH/translation.json
  91. 15 1
      src/lib/i18n/locales/bg-BG/translation.json
  92. 15 1
      src/lib/i18n/locales/bn-BD/translation.json
  93. 16 2
      src/lib/i18n/locales/ca-ES/translation.json
  94. 15 1
      src/lib/i18n/locales/ceb-PH/translation.json
  95. 15 1
      src/lib/i18n/locales/cs-CZ/translation.json
  96. 15 1
      src/lib/i18n/locales/da-DK/translation.json
  97. 88 74
      src/lib/i18n/locales/de-DE/translation.json
  98. 15 1
      src/lib/i18n/locales/dg-DG/translation.json
  99. 15 1
      src/lib/i18n/locales/el-GR/translation.json
  100. 15 1
      src/lib/i18n/locales/en-GB/translation.json

+ 5 - 3
.github/ISSUE_TEMPLATE/bug_report.yaml

@@ -27,6 +27,8 @@ body:
       options:
       options:
         - label: I have searched the existing issues and discussions.
         - label: I have searched the existing issues and discussions.
           required: true
           required: true
+        - label: I am using the latest version of Open WebUI.
+          required: true
 
 
   - type: dropdown
   - type: dropdown
     id: installation-method
     id: installation-method
@@ -83,9 +85,9 @@ body:
           required: true
           required: true
         - label: I am using the latest version of **both** Open WebUI and Ollama.
         - label: I am using the latest version of **both** Open WebUI and Ollama.
           required: true
           required: true
-        - label: I have checked the browser console logs.
+        - label: I have included the browser console logs.
           required: true
           required: true
-        - label: I have checked the Docker container logs.
+        - label: I have included the Docker container logs.
           required: true
           required: true
         - label: I have listed steps to reproduce the bug in detail.
         - label: I have listed steps to reproduce the bug in detail.
           required: true
           required: true
@@ -110,7 +112,7 @@ body:
     id: reproduction-steps
     id: reproduction-steps
     attributes:
     attributes:
       label: Steps to Reproduce
       label: Steps to Reproduce
-      description: Provide step-by-step instructions to reproduce the issue.
+      description: Providing clear, step-by-step instructions helps us reproduce and fix the issue faster. If we can't reproduce it, we can't fix it.
       placeholder: |
       placeholder: |
         1. Go to '...'
         1. Go to '...'
         2. Click on '...'
         2. Click on '...'

+ 1 - 0
.github/ISSUE_TEMPLATE/config.yml

@@ -0,0 +1 @@
+blank_issues_enabled: false

+ 3 - 3
.github/pull_request_template.md

@@ -9,9 +9,9 @@
 - [ ] **Changelog:** Ensure a changelog entry following the format of [Keep a Changelog](https://keepachangelog.com/) is added at the bottom of the PR description.
 - [ ] **Changelog:** Ensure a changelog entry following the format of [Keep a Changelog](https://keepachangelog.com/) is added at the bottom of the PR description.
 - [ ] **Documentation:** Have you updated relevant documentation [Open WebUI Docs](https://github.com/open-webui/docs), or other documentation sources?
 - [ ] **Documentation:** Have you updated relevant documentation [Open WebUI Docs](https://github.com/open-webui/docs), or other documentation sources?
 - [ ] **Dependencies:** Are there any new dependencies? Have you updated the dependency versions in the documentation?
 - [ ] **Dependencies:** Are there any new dependencies? Have you updated the dependency versions in the documentation?
-- [ ] **Testing:** Have you written and run sufficient tests for validating the changes?
+- [ ] **Testing:** Have you written and run sufficient tests to validate the changes?
 - [ ] **Code review:** Have you performed a self-review of your code, addressing any coding standard issues and ensuring adherence to the project's coding standards?
 - [ ] **Code review:** Have you performed a self-review of your code, addressing any coding standard issues and ensuring adherence to the project's coding standards?
-- [ ] **Prefix:** To cleary categorize this pull request, prefix the pull request title, using one of the following:
+- [ ] **Prefix:** To clearly categorize this pull request, prefix the pull request title using one of the following:
   - **BREAKING CHANGE**: Significant changes that may affect compatibility
   - **BREAKING CHANGE**: Significant changes that may affect compatibility
   - **build**: Changes that affect the build system or external dependencies
   - **build**: Changes that affect the build system or external dependencies
   - **ci**: Changes to our continuous integration processes or workflows
   - **ci**: Changes to our continuous integration processes or workflows
@@ -22,7 +22,7 @@
   - **i18n**: Internationalization or localization changes
   - **i18n**: Internationalization or localization changes
   - **perf**: Performance improvement
   - **perf**: Performance improvement
   - **refactor**: Code restructuring for better maintainability, readability, or scalability
   - **refactor**: Code restructuring for better maintainability, readability, or scalability
-  - **style**: Changes that do not affect the meaning of the code (white-space, formatting, missing semi-colons, etc.)
+  - **style**: Changes that do not affect the meaning of the code (white space, formatting, missing semi-colons, etc.)
   - **test**: Adding missing tests or correcting existing tests
   - **test**: Adding missing tests or correcting existing tests
   - **WIP**: Work in progress, a temporary label for incomplete or ongoing work
   - **WIP**: Work in progress, a temporary label for incomplete or ongoing work
 
 

+ 1 - 1
Dockerfile

@@ -132,7 +132,7 @@ RUN if [ "$USE_OLLAMA" = "true" ]; then \
 # install python dependencies
 # install python dependencies
 COPY --chown=$UID:$GID ./backend/requirements.txt ./requirements.txt
 COPY --chown=$UID:$GID ./backend/requirements.txt ./requirements.txt
 
 
-RUN pip3 install uv && \
+RUN pip3 install --no-cache-dir uv && \
     if [ "$USE_CUDA" = "true" ]; then \
     if [ "$USE_CUDA" = "true" ]; then \
     # If you use CUDA the whisper and embedding model will be downloaded on first use
     # If you use CUDA the whisper and embedding model will be downloaded on first use
     pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/$USE_CUDA_DOCKER_VER --no-cache-dir && \
     pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/$USE_CUDA_DOCKER_VER --no-cache-dir && \

+ 55 - 4
backend/open_webui/config.py

@@ -3,6 +3,7 @@ import logging
 import os
 import os
 import shutil
 import shutil
 import base64
 import base64
+import redis
 
 
 from datetime import datetime
 from datetime import datetime
 from pathlib import Path
 from pathlib import Path
@@ -17,6 +18,7 @@ from open_webui.env import (
     DATA_DIR,
     DATA_DIR,
     DATABASE_URL,
     DATABASE_URL,
     ENV,
     ENV,
+    REDIS_URL,
     FRONTEND_BUILD_DIR,
     FRONTEND_BUILD_DIR,
     OFFLINE_MODE,
     OFFLINE_MODE,
     OPEN_WEBUI_DIR,
     OPEN_WEBUI_DIR,
@@ -248,9 +250,14 @@ class PersistentConfig(Generic[T]):
 
 
 class AppConfig:
 class AppConfig:
     _state: dict[str, PersistentConfig]
     _state: dict[str, PersistentConfig]
+    _redis: Optional[redis.Redis] = None
 
 
-    def __init__(self):
+    def __init__(self, redis_url: Optional[str] = None):
         super().__setattr__("_state", {})
         super().__setattr__("_state", {})
+        if redis_url:
+            super().__setattr__(
+                "_redis", redis.Redis.from_url(redis_url, decode_responses=True)
+            )
 
 
     def __setattr__(self, key, value):
     def __setattr__(self, key, value):
         if isinstance(value, PersistentConfig):
         if isinstance(value, PersistentConfig):
@@ -259,7 +266,31 @@ class AppConfig:
             self._state[key].value = value
             self._state[key].value = value
             self._state[key].save()
             self._state[key].save()
 
 
+            if self._redis:
+                redis_key = f"open-webui:config:{key}"
+                self._redis.set(redis_key, json.dumps(self._state[key].value))
+
     def __getattr__(self, key):
     def __getattr__(self, key):
+        if key not in self._state:
+            raise AttributeError(f"Config key '{key}' not found")
+
+        # If Redis is available, check for an updated value
+        if self._redis:
+            redis_key = f"open-webui:config:{key}"
+            redis_value = self._redis.get(redis_key)
+
+            if redis_value is not None:
+                try:
+                    decoded_value = json.loads(redis_value)
+
+                    # Update the in-memory value if different
+                    if self._state[key].value != decoded_value:
+                        self._state[key].value = decoded_value
+                        log.info(f"Updated {key} from Redis: {decoded_value}")
+
+                except json.JSONDecodeError:
+                    log.error(f"Invalid JSON format in Redis for {key}: {redis_value}")
+
         return self._state[key].value
         return self._state[key].value
 
 
 
 
@@ -1276,7 +1307,7 @@ Strictly return in JSON format:
 ENABLE_AUTOCOMPLETE_GENERATION = PersistentConfig(
 ENABLE_AUTOCOMPLETE_GENERATION = PersistentConfig(
     "ENABLE_AUTOCOMPLETE_GENERATION",
     "ENABLE_AUTOCOMPLETE_GENERATION",
     "task.autocomplete.enable",
     "task.autocomplete.enable",
-    os.environ.get("ENABLE_AUTOCOMPLETE_GENERATION", "True").lower() == "true",
+    os.environ.get("ENABLE_AUTOCOMPLETE_GENERATION", "False").lower() == "true",
 )
 )
 
 
 AUTOCOMPLETE_GENERATION_INPUT_MAX_LENGTH = PersistentConfig(
 AUTOCOMPLETE_GENERATION_INPUT_MAX_LENGTH = PersistentConfig(
@@ -1548,8 +1579,10 @@ QDRANT_API_KEY = os.environ.get("QDRANT_API_KEY", None)
 
 
 # OpenSearch
 # OpenSearch
 OPENSEARCH_URI = os.environ.get("OPENSEARCH_URI", "https://localhost:9200")
 OPENSEARCH_URI = os.environ.get("OPENSEARCH_URI", "https://localhost:9200")
-OPENSEARCH_SSL = os.environ.get("OPENSEARCH_SSL", True)
-OPENSEARCH_CERT_VERIFY = os.environ.get("OPENSEARCH_CERT_VERIFY", False)
+OPENSEARCH_SSL = os.environ.get("OPENSEARCH_SSL", "true").lower() == "true"
+OPENSEARCH_CERT_VERIFY = (
+    os.environ.get("OPENSEARCH_CERT_VERIFY", "false").lower() == "true"
+)
 OPENSEARCH_USERNAME = os.environ.get("OPENSEARCH_USERNAME", None)
 OPENSEARCH_USERNAME = os.environ.get("OPENSEARCH_USERNAME", None)
 OPENSEARCH_PASSWORD = os.environ.get("OPENSEARCH_PASSWORD", None)
 OPENSEARCH_PASSWORD = os.environ.get("OPENSEARCH_PASSWORD", None)
 
 
@@ -1623,6 +1656,12 @@ TIKA_SERVER_URL = PersistentConfig(
     os.getenv("TIKA_SERVER_URL", "http://tika:9998"),  # Default for sidecar deployment
     os.getenv("TIKA_SERVER_URL", "http://tika:9998"),  # Default for sidecar deployment
 )
 )
 
 
+DOCLING_SERVER_URL = PersistentConfig(
+    "DOCLING_SERVER_URL",
+    "rag.docling_server_url",
+    os.getenv("DOCLING_SERVER_URL", "http://docling:5001"),
+)
+
 DOCUMENT_INTELLIGENCE_ENDPOINT = PersistentConfig(
 DOCUMENT_INTELLIGENCE_ENDPOINT = PersistentConfig(
     "DOCUMENT_INTELLIGENCE_ENDPOINT",
     "DOCUMENT_INTELLIGENCE_ENDPOINT",
     "rag.document_intelligence_endpoint",
     "rag.document_intelligence_endpoint",
@@ -1955,6 +1994,12 @@ TAVILY_API_KEY = PersistentConfig(
     os.getenv("TAVILY_API_KEY", ""),
     os.getenv("TAVILY_API_KEY", ""),
 )
 )
 
 
+TAVILY_EXTRACT_DEPTH = PersistentConfig(
+    "TAVILY_EXTRACT_DEPTH",
+    "rag.web.search.tavily_extract_depth",
+    os.getenv("TAVILY_EXTRACT_DEPTH", "basic"),
+)
+
 JINA_API_KEY = PersistentConfig(
 JINA_API_KEY = PersistentConfig(
     "JINA_API_KEY",
     "JINA_API_KEY",
     "rag.web.search.jina_api_key",
     "rag.web.search.jina_api_key",
@@ -2041,6 +2086,12 @@ PLAYWRIGHT_WS_URI = PersistentConfig(
     os.environ.get("PLAYWRIGHT_WS_URI", None),
     os.environ.get("PLAYWRIGHT_WS_URI", None),
 )
 )
 
 
+PLAYWRIGHT_TIMEOUT = PersistentConfig(
+    "PLAYWRIGHT_TIMEOUT",
+    "rag.web.loader.engine.playwright.timeout",
+    int(os.environ.get("PLAYWRIGHT_TIMEOUT", "10")),
+)
+
 FIRECRAWL_API_KEY = PersistentConfig(
 FIRECRAWL_API_KEY = PersistentConfig(
     "FIRECRAWL_API_KEY",
     "FIRECRAWL_API_KEY",
     "firecrawl.api_key",
     "firecrawl.api_key",

+ 27 - 15
backend/open_webui/env.py

@@ -105,7 +105,6 @@ for source in log_sources:
 
 
 log.setLevel(SRC_LOG_LEVELS["CONFIG"])
 log.setLevel(SRC_LOG_LEVELS["CONFIG"])
 
 
-
 WEBUI_NAME = os.environ.get("WEBUI_NAME", "Open WebUI")
 WEBUI_NAME = os.environ.get("WEBUI_NAME", "Open WebUI")
 if WEBUI_NAME != "Open WebUI":
 if WEBUI_NAME != "Open WebUI":
     WEBUI_NAME += " (Open WebUI)"
     WEBUI_NAME += " (Open WebUI)"
@@ -130,7 +129,6 @@ else:
     except Exception:
     except Exception:
         PACKAGE_DATA = {"version": "0.0.0"}
         PACKAGE_DATA = {"version": "0.0.0"}
 
 
-
 VERSION = PACKAGE_DATA["version"]
 VERSION = PACKAGE_DATA["version"]
 
 
 
 
@@ -161,7 +159,6 @@ try:
 except Exception:
 except Exception:
     changelog_content = (pkgutil.get_data("open_webui", "CHANGELOG.md") or b"").decode()
     changelog_content = (pkgutil.get_data("open_webui", "CHANGELOG.md") or b"").decode()
 
 
-
 # Convert markdown content to HTML
 # Convert markdown content to HTML
 html_content = markdown.markdown(changelog_content)
 html_content = markdown.markdown(changelog_content)
 
 
@@ -192,7 +189,6 @@ for version in soup.find_all("h2"):
 
 
     changelog_json[version_number] = version_data
     changelog_json[version_number] = version_data
 
 
-
 CHANGELOG = changelog_json
 CHANGELOG = changelog_json
 
 
 ####################################
 ####################################
@@ -209,7 +205,6 @@ ENABLE_FORWARD_USER_INFO_HEADERS = (
     os.environ.get("ENABLE_FORWARD_USER_INFO_HEADERS", "False").lower() == "true"
     os.environ.get("ENABLE_FORWARD_USER_INFO_HEADERS", "False").lower() == "true"
 )
 )
 
 
-
 ####################################
 ####################################
 # WEBUI_BUILD_HASH
 # WEBUI_BUILD_HASH
 ####################################
 ####################################
@@ -244,7 +239,6 @@ if FROM_INIT_PY:
 
 
     DATA_DIR = Path(os.getenv("DATA_DIR", OPEN_WEBUI_DIR / "data"))
     DATA_DIR = Path(os.getenv("DATA_DIR", OPEN_WEBUI_DIR / "data"))
 
 
-
 STATIC_DIR = Path(os.getenv("STATIC_DIR", OPEN_WEBUI_DIR / "static"))
 STATIC_DIR = Path(os.getenv("STATIC_DIR", OPEN_WEBUI_DIR / "static"))
 
 
 FONTS_DIR = Path(os.getenv("FONTS_DIR", OPEN_WEBUI_DIR / "static" / "fonts"))
 FONTS_DIR = Path(os.getenv("FONTS_DIR", OPEN_WEBUI_DIR / "static" / "fonts"))
@@ -256,7 +250,6 @@ if FROM_INIT_PY:
         os.getenv("FRONTEND_BUILD_DIR", OPEN_WEBUI_DIR / "frontend")
         os.getenv("FRONTEND_BUILD_DIR", OPEN_WEBUI_DIR / "frontend")
     ).resolve()
     ).resolve()
 
 
-
 ####################################
 ####################################
 # Database
 # Database
 ####################################
 ####################################
@@ -321,7 +314,6 @@ RESET_CONFIG_ON_START = (
     os.environ.get("RESET_CONFIG_ON_START", "False").lower() == "true"
     os.environ.get("RESET_CONFIG_ON_START", "False").lower() == "true"
 )
 )
 
 
-
 ENABLE_REALTIME_CHAT_SAVE = (
 ENABLE_REALTIME_CHAT_SAVE = (
     os.environ.get("ENABLE_REALTIME_CHAT_SAVE", "False").lower() == "true"
     os.environ.get("ENABLE_REALTIME_CHAT_SAVE", "False").lower() == "true"
 )
 )
@@ -330,7 +322,7 @@ ENABLE_REALTIME_CHAT_SAVE = (
 # REDIS
 # REDIS
 ####################################
 ####################################
 
 
-REDIS_URL = os.environ.get("REDIS_URL", "redis://localhost:6379/0")
+REDIS_URL = os.environ.get("REDIS_URL", "")
 
 
 ####################################
 ####################################
 # WEBUI_AUTH (Required for security)
 # WEBUI_AUTH (Required for security)
@@ -399,18 +391,16 @@ else:
 
 
 AIOHTTP_CLIENT_TIMEOUT_MODEL_LIST = os.environ.get(
 AIOHTTP_CLIENT_TIMEOUT_MODEL_LIST = os.environ.get(
     "AIOHTTP_CLIENT_TIMEOUT_MODEL_LIST",
     "AIOHTTP_CLIENT_TIMEOUT_MODEL_LIST",
-    os.environ.get("AIOHTTP_CLIENT_TIMEOUT_OPENAI_MODEL_LIST", ""),
+    os.environ.get("AIOHTTP_CLIENT_TIMEOUT_OPENAI_MODEL_LIST", "10"),
 )
 )
 
 
-
 if AIOHTTP_CLIENT_TIMEOUT_MODEL_LIST == "":
 if AIOHTTP_CLIENT_TIMEOUT_MODEL_LIST == "":
     AIOHTTP_CLIENT_TIMEOUT_MODEL_LIST = None
     AIOHTTP_CLIENT_TIMEOUT_MODEL_LIST = None
 else:
 else:
     try:
     try:
         AIOHTTP_CLIENT_TIMEOUT_MODEL_LIST = int(AIOHTTP_CLIENT_TIMEOUT_MODEL_LIST)
         AIOHTTP_CLIENT_TIMEOUT_MODEL_LIST = int(AIOHTTP_CLIENT_TIMEOUT_MODEL_LIST)
     except Exception:
     except Exception:
-        AIOHTTP_CLIENT_TIMEOUT_MODEL_LIST = 5
-
+        AIOHTTP_CLIENT_TIMEOUT_MODEL_LIST = 10
 
 
 ####################################
 ####################################
 # OFFLINE_MODE
 # OFFLINE_MODE
@@ -424,13 +414,12 @@ if OFFLINE_MODE:
 ####################################
 ####################################
 # AUDIT LOGGING
 # AUDIT LOGGING
 ####################################
 ####################################
-ENABLE_AUDIT_LOGS = os.getenv("ENABLE_AUDIT_LOGS", "false").lower() == "true"
 # Where to store log file
 # Where to store log file
 AUDIT_LOGS_FILE_PATH = f"{DATA_DIR}/audit.log"
 AUDIT_LOGS_FILE_PATH = f"{DATA_DIR}/audit.log"
 # Maximum size of a file before rotating into a new log file
 # Maximum size of a file before rotating into a new log file
 AUDIT_LOG_FILE_ROTATION_SIZE = os.getenv("AUDIT_LOG_FILE_ROTATION_SIZE", "10MB")
 AUDIT_LOG_FILE_ROTATION_SIZE = os.getenv("AUDIT_LOG_FILE_ROTATION_SIZE", "10MB")
 # METADATA | REQUEST | REQUEST_RESPONSE
 # METADATA | REQUEST | REQUEST_RESPONSE
-AUDIT_LOG_LEVEL = os.getenv("AUDIT_LOG_LEVEL", "REQUEST_RESPONSE").upper()
+AUDIT_LOG_LEVEL = os.getenv("AUDIT_LOG_LEVEL", "NONE").upper()
 try:
 try:
     MAX_BODY_LOG_SIZE = int(os.environ.get("MAX_BODY_LOG_SIZE") or 2048)
     MAX_BODY_LOG_SIZE = int(os.environ.get("MAX_BODY_LOG_SIZE") or 2048)
 except ValueError:
 except ValueError:
@@ -442,3 +431,26 @@ AUDIT_EXCLUDED_PATHS = os.getenv("AUDIT_EXCLUDED_PATHS", "/chats,/chat,/folders"
 )
 )
 AUDIT_EXCLUDED_PATHS = [path.strip() for path in AUDIT_EXCLUDED_PATHS]
 AUDIT_EXCLUDED_PATHS = [path.strip() for path in AUDIT_EXCLUDED_PATHS]
 AUDIT_EXCLUDED_PATHS = [path.lstrip("/") for path in AUDIT_EXCLUDED_PATHS]
 AUDIT_EXCLUDED_PATHS = [path.lstrip("/") for path in AUDIT_EXCLUDED_PATHS]
+
+####################################
+# OPENTELEMETRY
+####################################
+
+ENABLE_OTEL = os.environ.get("ENABLE_OTEL", "False").lower() == "true"
+OTEL_EXPORTER_OTLP_ENDPOINT = os.environ.get(
+    "OTEL_EXPORTER_OTLP_ENDPOINT", "http://localhost:4317"
+)
+OTEL_SERVICE_NAME = os.environ.get("OTEL_SERVICE_NAME", "open-webui")
+OTEL_RESOURCE_ATTRIBUTES = os.environ.get(
+    "OTEL_RESOURCE_ATTRIBUTES", ""
+)  # e.g. key1=val1,key2=val2
+OTEL_TRACES_SAMPLER = os.environ.get(
+    "OTEL_TRACES_SAMPLER", "parentbased_always_on"
+).lower()
+
+####################################
+# TOOLS/FUNCTIONS PIP OPTIONS
+####################################
+
+PIP_OPTIONS = os.getenv("PIP_OPTIONS", "").split()
+PIP_PACKAGE_INDEX_OPTIONS = os.getenv("PIP_PACKAGE_INDEX_OPTIONS", "").split()

+ 3 - 0
backend/open_webui/functions.py

@@ -223,6 +223,9 @@ async def generate_function_chat_completion(
     extra_params = {
     extra_params = {
         "__event_emitter__": __event_emitter__,
         "__event_emitter__": __event_emitter__,
         "__event_call__": __event_call__,
         "__event_call__": __event_call__,
+        "__chat_id__": metadata.get("chat_id", None),
+        "__session_id__": metadata.get("session_id", None),
+        "__message_id__": metadata.get("message_id", None),
         "__task__": __task__,
         "__task__": __task__,
         "__task_body__": __task_body__,
         "__task_body__": __task_body__,
         "__files__": files,
         "__files__": files,

+ 40 - 10
backend/open_webui/main.py

@@ -84,7 +84,7 @@ from open_webui.routers.retrieval import (
     get_rf,
     get_rf,
 )
 )
 
 
-from open_webui.internal.db import Session
+from open_webui.internal.db import Session, engine
 
 
 from open_webui.models.functions import Functions
 from open_webui.models.functions import Functions
 from open_webui.models.models import Models
 from open_webui.models.models import Models
@@ -155,6 +155,7 @@ from open_webui.config import (
     AUDIO_TTS_AZURE_SPEECH_REGION,
     AUDIO_TTS_AZURE_SPEECH_REGION,
     AUDIO_TTS_AZURE_SPEECH_OUTPUT_FORMAT,
     AUDIO_TTS_AZURE_SPEECH_OUTPUT_FORMAT,
     PLAYWRIGHT_WS_URI,
     PLAYWRIGHT_WS_URI,
+    PLAYWRIGHT_TIMEOUT,
     FIRECRAWL_API_BASE_URL,
     FIRECRAWL_API_BASE_URL,
     FIRECRAWL_API_KEY,
     FIRECRAWL_API_KEY,
     RAG_WEB_LOADER_ENGINE,
     RAG_WEB_LOADER_ENGINE,
@@ -186,6 +187,7 @@ from open_webui.config import (
     CHUNK_SIZE,
     CHUNK_SIZE,
     CONTENT_EXTRACTION_ENGINE,
     CONTENT_EXTRACTION_ENGINE,
     TIKA_SERVER_URL,
     TIKA_SERVER_URL,
+    DOCLING_SERVER_URL,
     DOCUMENT_INTELLIGENCE_ENDPOINT,
     DOCUMENT_INTELLIGENCE_ENDPOINT,
     DOCUMENT_INTELLIGENCE_KEY,
     DOCUMENT_INTELLIGENCE_KEY,
     RAG_TOP_K,
     RAG_TOP_K,
@@ -213,6 +215,7 @@ from open_webui.config import (
     SERPSTACK_API_KEY,
     SERPSTACK_API_KEY,
     SERPSTACK_HTTPS,
     SERPSTACK_HTTPS,
     TAVILY_API_KEY,
     TAVILY_API_KEY,
+    TAVILY_EXTRACT_DEPTH,
     BING_SEARCH_V7_ENDPOINT,
     BING_SEARCH_V7_ENDPOINT,
     BING_SEARCH_V7_SUBSCRIPTION_KEY,
     BING_SEARCH_V7_SUBSCRIPTION_KEY,
     BRAVE_SEARCH_API_KEY,
     BRAVE_SEARCH_API_KEY,
@@ -313,6 +316,7 @@ from open_webui.env import (
     AUDIT_EXCLUDED_PATHS,
     AUDIT_EXCLUDED_PATHS,
     AUDIT_LOG_LEVEL,
     AUDIT_LOG_LEVEL,
     CHANGELOG,
     CHANGELOG,
+    REDIS_URL,
     GLOBAL_LOG_LEVEL,
     GLOBAL_LOG_LEVEL,
     MAX_BODY_LOG_SIZE,
     MAX_BODY_LOG_SIZE,
     SAFE_MODE,
     SAFE_MODE,
@@ -328,6 +332,7 @@ from open_webui.env import (
     BYPASS_MODEL_ACCESS_CONTROL,
     BYPASS_MODEL_ACCESS_CONTROL,
     RESET_CONFIG_ON_START,
     RESET_CONFIG_ON_START,
     OFFLINE_MODE,
     OFFLINE_MODE,
+    ENABLE_OTEL,
 )
 )
 
 
 
 
@@ -355,7 +360,6 @@ from open_webui.utils.security_headers import SecurityHeadersMiddleware
 
 
 from open_webui.tasks import stop_task, list_tasks  # Import from tasks.py
 from open_webui.tasks import stop_task, list_tasks  # Import from tasks.py
 
 
-
 if SAFE_MODE:
 if SAFE_MODE:
     print("SAFE MODE ENABLED")
     print("SAFE MODE ENABLED")
     Functions.deactivate_all_functions()
     Functions.deactivate_all_functions()
@@ -419,11 +423,24 @@ app = FastAPI(
 
 
 oauth_manager = OAuthManager(app)
 oauth_manager = OAuthManager(app)
 
 
-app.state.config = AppConfig()
+app.state.config = AppConfig(redis_url=REDIS_URL)
 
 
 app.state.WEBUI_NAME = WEBUI_NAME
 app.state.WEBUI_NAME = WEBUI_NAME
 app.state.LICENSE_METADATA = None
 app.state.LICENSE_METADATA = None
 
 
+
+########################################
+#
+# OPENTELEMETRY
+#
+########################################
+
+if ENABLE_OTEL:
+    from open_webui.utils.telemetry.setup import setup as setup_opentelemetry
+
+    setup_opentelemetry(app=app, db_engine=engine)
+
+
 ########################################
 ########################################
 #
 #
 # OLLAMA
 # OLLAMA
@@ -551,6 +568,7 @@ app.state.config.ENABLE_RAG_WEB_LOADER_SSL_VERIFICATION = (
 
 
 app.state.config.CONTENT_EXTRACTION_ENGINE = CONTENT_EXTRACTION_ENGINE
 app.state.config.CONTENT_EXTRACTION_ENGINE = CONTENT_EXTRACTION_ENGINE
 app.state.config.TIKA_SERVER_URL = TIKA_SERVER_URL
 app.state.config.TIKA_SERVER_URL = TIKA_SERVER_URL
+app.state.config.DOCLING_SERVER_URL = DOCLING_SERVER_URL
 app.state.config.DOCUMENT_INTELLIGENCE_ENDPOINT = DOCUMENT_INTELLIGENCE_ENDPOINT
 app.state.config.DOCUMENT_INTELLIGENCE_ENDPOINT = DOCUMENT_INTELLIGENCE_ENDPOINT
 app.state.config.DOCUMENT_INTELLIGENCE_KEY = DOCUMENT_INTELLIGENCE_KEY
 app.state.config.DOCUMENT_INTELLIGENCE_KEY = DOCUMENT_INTELLIGENCE_KEY
 
 
@@ -614,8 +632,10 @@ app.state.config.RAG_WEB_SEARCH_CONCURRENT_REQUESTS = RAG_WEB_SEARCH_CONCURRENT_
 app.state.config.RAG_WEB_LOADER_ENGINE = RAG_WEB_LOADER_ENGINE
 app.state.config.RAG_WEB_LOADER_ENGINE = RAG_WEB_LOADER_ENGINE
 app.state.config.RAG_WEB_SEARCH_TRUST_ENV = RAG_WEB_SEARCH_TRUST_ENV
 app.state.config.RAG_WEB_SEARCH_TRUST_ENV = RAG_WEB_SEARCH_TRUST_ENV
 app.state.config.PLAYWRIGHT_WS_URI = PLAYWRIGHT_WS_URI
 app.state.config.PLAYWRIGHT_WS_URI = PLAYWRIGHT_WS_URI
+app.state.config.PLAYWRIGHT_TIMEOUT = PLAYWRIGHT_TIMEOUT
 app.state.config.FIRECRAWL_API_BASE_URL = FIRECRAWL_API_BASE_URL
 app.state.config.FIRECRAWL_API_BASE_URL = FIRECRAWL_API_BASE_URL
 app.state.config.FIRECRAWL_API_KEY = FIRECRAWL_API_KEY
 app.state.config.FIRECRAWL_API_KEY = FIRECRAWL_API_KEY
+app.state.config.TAVILY_EXTRACT_DEPTH = TAVILY_EXTRACT_DEPTH
 
 
 app.state.EMBEDDING_FUNCTION = None
 app.state.EMBEDDING_FUNCTION = None
 app.state.ef = None
 app.state.ef = None
@@ -949,14 +969,24 @@ async def get_models(request: Request, user=Depends(get_verified_user)):
 
 
         return filtered_models
         return filtered_models
 
 
-    models = await get_all_models(request, user=user)
+    all_models = await get_all_models(request, user=user)
+
+    models = []
+    for model in all_models:
+        # Filter out filter pipelines
+        if "pipeline" in model and model["pipeline"].get("type", None) == "filter":
+            continue
+
+        model_tags = [
+            tag.get("name")
+            for tag in model.get("info", {}).get("meta", {}).get("tags", [])
+        ]
+        tags = [tag.get("name") for tag in model.get("tags", [])]
+
+        tags = list(set(model_tags + tags))
+        model["tags"] = [{"name": tag} for tag in tags]
 
 
-    # Filter out filter pipelines
-    models = [
-        model
-        for model in models
-        if "pipeline" not in model or model["pipeline"].get("type", None) != "filter"
-    ]
+        models.append(model)
 
 
     model_order_list = request.app.state.config.MODEL_ORDER_LIST
     model_order_list = request.app.state.config.MODEL_ORDER_LIST
     if model_order_list:
     if model_order_list:

+ 53 - 1
backend/open_webui/retrieval/loaders/main.py

@@ -105,7 +105,7 @@ class TikaLoader:
 
 
         if r.ok:
         if r.ok:
             raw_metadata = r.json()
             raw_metadata = r.json()
-            text = raw_metadata.get("X-TIKA:content", "<No text content found>")
+            text = raw_metadata.get("X-TIKA:content", "<No text content found>").strip()
 
 
             if "Content-Type" in raw_metadata:
             if "Content-Type" in raw_metadata:
                 headers["Content-Type"] = raw_metadata["Content-Type"]
                 headers["Content-Type"] = raw_metadata["Content-Type"]
@@ -117,6 +117,52 @@ class TikaLoader:
             raise Exception(f"Error calling Tika: {r.reason}")
             raise Exception(f"Error calling Tika: {r.reason}")
 
 
 
 
+class DoclingLoader:
+    def __init__(self, url, file_path=None, mime_type=None):
+        self.url = url.rstrip("/")
+        self.file_path = file_path
+        self.mime_type = mime_type
+
+    def load(self) -> list[Document]:
+        with open(self.file_path, "rb") as f:
+            files = {
+                "files": (
+                    self.file_path,
+                    f,
+                    self.mime_type or "application/octet-stream",
+                )
+            }
+
+            params = {
+                "image_export_mode": "placeholder",
+                "table_mode": "accurate",
+            }
+
+            endpoint = f"{self.url}/v1alpha/convert/file"
+            r = requests.post(endpoint, files=files, data=params)
+
+        if r.ok:
+            result = r.json()
+            document_data = result.get("document", {})
+            text = document_data.get("md_content", "<No text content found>")
+
+            metadata = {"Content-Type": self.mime_type} if self.mime_type else {}
+
+            log.debug("Docling extracted text: %s", text)
+
+            return [Document(page_content=text, metadata=metadata)]
+        else:
+            error_msg = f"Error calling Docling API: {r.reason}"
+            if r.text:
+                try:
+                    error_data = r.json()
+                    if "detail" in error_data:
+                        error_msg += f" - {error_data['detail']}"
+                except Exception:
+                    error_msg += f" - {r.text}"
+            raise Exception(f"Error calling Docling: {error_msg}")
+
+
 class Loader:
 class Loader:
     def __init__(self, engine: str = "", **kwargs):
     def __init__(self, engine: str = "", **kwargs):
         self.engine = engine
         self.engine = engine
@@ -149,6 +195,12 @@ class Loader:
                     file_path=file_path,
                     file_path=file_path,
                     mime_type=file_content_type,
                     mime_type=file_content_type,
                 )
                 )
+        elif self.engine == "docling" and self.kwargs.get("DOCLING_SERVER_URL"):
+            loader = DoclingLoader(
+                url=self.kwargs.get("DOCLING_SERVER_URL"),
+                file_path=file_path,
+                mime_type=file_content_type,
+            )
         elif (
         elif (
             self.engine == "document_intelligence"
             self.engine == "document_intelligence"
             and self.kwargs.get("DOCUMENT_INTELLIGENCE_ENDPOINT") != ""
             and self.kwargs.get("DOCUMENT_INTELLIGENCE_ENDPOINT") != ""

+ 93 - 0
backend/open_webui/retrieval/loaders/tavily.py

@@ -0,0 +1,93 @@
+import requests
+import logging
+from typing import Iterator, List, Literal, Union
+
+from langchain_core.document_loaders import BaseLoader
+from langchain_core.documents import Document
+from open_webui.env import SRC_LOG_LEVELS
+
+log = logging.getLogger(__name__)
+log.setLevel(SRC_LOG_LEVELS["RAG"])
+
+
+class TavilyLoader(BaseLoader):
+    """Extract web page content from URLs using Tavily Extract API.
+
+    This is a LangChain document loader that uses Tavily's Extract API to
+    retrieve content from web pages and return it as Document objects.
+
+    Args:
+        urls: URL or list of URLs to extract content from.
+        api_key: The Tavily API key.
+        extract_depth: Depth of extraction, either "basic" or "advanced".
+        continue_on_failure: Whether to continue if extraction of a URL fails.
+    """
+
+    def __init__(
+        self,
+        urls: Union[str, List[str]],
+        api_key: str,
+        extract_depth: Literal["basic", "advanced"] = "basic",
+        continue_on_failure: bool = True,
+    ) -> None:
+        """Initialize Tavily Extract client.
+
+        Args:
+            urls: URL or list of URLs to extract content from.
+            api_key: The Tavily API key.
+            include_images: Whether to include images in the extraction.
+            extract_depth: Depth of extraction, either "basic" or "advanced".
+                advanced extraction retrieves more data, including tables and
+                embedded content, with higher success but may increase latency.
+                basic costs 1 credit per 5 successful URL extractions,
+                advanced costs 2 credits per 5 successful URL extractions.
+            continue_on_failure: Whether to continue if extraction of a URL fails.
+        """
+        if not urls:
+            raise ValueError("At least one URL must be provided.")
+
+        self.api_key = api_key
+        self.urls = urls if isinstance(urls, list) else [urls]
+        self.extract_depth = extract_depth
+        self.continue_on_failure = continue_on_failure
+        self.api_url = "https://api.tavily.com/extract"
+
+    def lazy_load(self) -> Iterator[Document]:
+        """Extract and yield documents from the URLs using Tavily Extract API."""
+        batch_size = 20
+        for i in range(0, len(self.urls), batch_size):
+            batch_urls = self.urls[i : i + batch_size]
+            try:
+                headers = {
+                    "Content-Type": "application/json",
+                    "Authorization": f"Bearer {self.api_key}",
+                }
+                # Use string for single URL, array for multiple URLs
+                urls_param = batch_urls[0] if len(batch_urls) == 1 else batch_urls
+                payload = {"urls": urls_param, "extract_depth": self.extract_depth}
+                # Make the API call
+                response = requests.post(self.api_url, headers=headers, json=payload)
+                response.raise_for_status()
+                response_data = response.json()
+                # Process successful results
+                for result in response_data.get("results", []):
+                    url = result.get("url", "")
+                    content = result.get("raw_content", "")
+                    if not content:
+                        log.warning(f"No content extracted from {url}")
+                        continue
+                    # Add URLs as metadata
+                    metadata = {"source": url}
+                    yield Document(
+                        page_content=content,
+                        metadata=metadata,
+                    )
+                for failed in response_data.get("failed_results", []):
+                    url = failed.get("url", "")
+                    error = failed.get("error", "Unknown error")
+                    log.error(f"Failed to extract content from {url}: {error}")
+            except Exception as e:
+                if self.continue_on_failure:
+                    log.error(f"Error extracting content from batch {batch_urls}: {e}")
+                else:
+                    raise e

+ 21 - 12
backend/open_webui/retrieval/utils.py

@@ -189,8 +189,7 @@ def merge_and_sort_query_results(
     query_results: list[dict], k: int, reverse: bool = False
     query_results: list[dict], k: int, reverse: bool = False
 ) -> dict:
 ) -> dict:
     # Initialize lists to store combined data
     # Initialize lists to store combined data
-    combined = []
-    seen_hashes = set()  # To store unique document hashes
+    combined = dict()  # To store documents with unique document hashes
 
 
     for data in query_results:
     for data in query_results:
         distances = data["distances"][0]
         distances = data["distances"][0]
@@ -203,10 +202,19 @@ def merge_and_sort_query_results(
                     document.encode()
                     document.encode()
                 ).hexdigest()  # Compute a hash for uniqueness
                 ).hexdigest()  # Compute a hash for uniqueness
 
 
-                if doc_hash not in seen_hashes:
-                    seen_hashes.add(doc_hash)
-                    combined.append((distance, document, metadata))
+                if doc_hash not in combined.keys():
+                    combined[doc_hash] = (distance, document, metadata)
+                    continue  # if doc is new, no further comparison is needed
 
 
+                # if doc is alredy in, but new distance is better, update
+                if not reverse and distance < combined[doc_hash][0]:
+                    # Chroma uses unconventional cosine similarity, so we don't need to reverse the results
+                    # https://docs.trychroma.com/docs/collections/configure#configuring-chroma-collections
+                    combined[doc_hash] = (distance, document, metadata)
+                if reverse and distance > combined[doc_hash][0]:
+                    combined[doc_hash] = (distance, document, metadata)
+
+    combined = list(combined.values())
     # Sort the list based on distances
     # Sort the list based on distances
     combined.sort(key=lambda x: x[0], reverse=reverse)
     combined.sort(key=lambda x: x[0], reverse=reverse)
 
 
@@ -215,6 +223,12 @@ def merge_and_sort_query_results(
         zip(*combined[:k]) if combined else ([], [], [])
         zip(*combined[:k]) if combined else ([], [], [])
     )
     )
 
 
+    # if chromaDB, the distance is 0 (best) to 2 (worse)
+    # re-order to -1 (worst) to 1 (best) for relevance score
+    if not reverse:
+        sorted_distances = tuple(-dist for dist in sorted_distances)
+        sorted_distances = tuple(dist + 1 for dist in sorted_distances)
+
     # Create and return the output dictionary
     # Create and return the output dictionary
     return {
     return {
         "distances": [list(sorted_distances)],
         "distances": [list(sorted_distances)],
@@ -306,13 +320,8 @@ def query_collection_with_hybrid_search(
         raise Exception(
         raise Exception(
             "Hybrid search failed for all collections. Using Non hybrid search as fallback."
             "Hybrid search failed for all collections. Using Non hybrid search as fallback."
         )
         )
-
-    if VECTOR_DB == "chroma":
-        # Chroma uses unconventional cosine similarity, so we don't need to reverse the results
-        # https://docs.trychroma.com/docs/collections/configure#configuring-chroma-collections
-        return merge_and_sort_query_results(results, k=k_reranker, reverse=False)
-    else:
-        return merge_and_sort_query_results(results, k=k_reranker, reverse=True)
+        
+    return merge_and_sort_query_results(results, k=k, reverse=True)
 
 
 
 
 def get_embedding_function(
 def get_embedding_function(

+ 13 - 6
backend/open_webui/retrieval/vector/dbs/chroma.py

@@ -166,12 +166,19 @@ class ChromaClient:
         filter: Optional[dict] = None,
         filter: Optional[dict] = None,
     ):
     ):
         # Delete the items from the collection based on the ids.
         # Delete the items from the collection based on the ids.
-        collection = self.client.get_collection(name=collection_name)
-        if collection:
-            if ids:
-                collection.delete(ids=ids)
-            elif filter:
-                collection.delete(where=filter)
+        try:
+            collection = self.client.get_collection(name=collection_name)
+            if collection:
+                if ids:
+                    collection.delete(ids=ids)
+                elif filter:
+                    collection.delete(where=filter)
+        except Exception as e:
+            # If collection doesn't exist, that's fine - nothing to delete
+            log.debug(
+                f"Attempted to delete from non-existent collection {collection_name}. Ignoring."
+            )
+            pass
 
 
     def reset(self):
     def reset(self):
         # Resets the database. This will delete all collections and item entries.
         # Resets the database. This will delete all collections and item entries.

+ 110 - 65
backend/open_webui/retrieval/vector/dbs/opensearch.py

@@ -1,4 +1,5 @@
 from opensearchpy import OpenSearch
 from opensearchpy import OpenSearch
+from opensearchpy.helpers import bulk
 from typing import Optional
 from typing import Optional
 
 
 from open_webui.retrieval.vector.main import VectorItem, SearchResult, GetResult
 from open_webui.retrieval.vector.main import VectorItem, SearchResult, GetResult
@@ -21,7 +22,13 @@ class OpenSearchClient:
             http_auth=(OPENSEARCH_USERNAME, OPENSEARCH_PASSWORD),
             http_auth=(OPENSEARCH_USERNAME, OPENSEARCH_PASSWORD),
         )
         )
 
 
+    def _get_index_name(self, collection_name: str) -> str:
+        return f"{self.index_prefix}_{collection_name}"
+
     def _result_to_get_result(self, result) -> GetResult:
     def _result_to_get_result(self, result) -> GetResult:
+        if not result["hits"]["hits"]:
+            return None
+
         ids = []
         ids = []
         documents = []
         documents = []
         metadatas = []
         metadatas = []
@@ -31,9 +38,12 @@ class OpenSearchClient:
             documents.append(hit["_source"].get("text"))
             documents.append(hit["_source"].get("text"))
             metadatas.append(hit["_source"].get("metadata"))
             metadatas.append(hit["_source"].get("metadata"))
 
 
-        return GetResult(ids=ids, documents=documents, metadatas=metadatas)
+        return GetResult(ids=[ids], documents=[documents], metadatas=[metadatas])
 
 
     def _result_to_search_result(self, result) -> SearchResult:
     def _result_to_search_result(self, result) -> SearchResult:
+        if not result["hits"]["hits"]:
+            return None
+
         ids = []
         ids = []
         distances = []
         distances = []
         documents = []
         documents = []
@@ -46,34 +56,40 @@ class OpenSearchClient:
             metadatas.append(hit["_source"].get("metadata"))
             metadatas.append(hit["_source"].get("metadata"))
 
 
         return SearchResult(
         return SearchResult(
-            ids=ids, distances=distances, documents=documents, metadatas=metadatas
+            ids=[ids],
+            distances=[distances],
+            documents=[documents],
+            metadatas=[metadatas],
         )
         )
 
 
     def _create_index(self, collection_name: str, dimension: int):
     def _create_index(self, collection_name: str, dimension: int):
         body = {
         body = {
+            "settings": {"index": {"knn": True}},
             "mappings": {
             "mappings": {
                 "properties": {
                 "properties": {
                     "id": {"type": "keyword"},
                     "id": {"type": "keyword"},
                     "vector": {
                     "vector": {
-                        "type": "dense_vector",
-                        "dims": dimension,  # Adjust based on your vector dimensions
-                        "index": true,
+                        "type": "knn_vector",
+                        "dimension": dimension,  # Adjust based on your vector dimensions
+                        "index": True,
                         "similarity": "faiss",
                         "similarity": "faiss",
                         "method": {
                         "method": {
                             "name": "hnsw",
                             "name": "hnsw",
-                            "space_type": "ip",  # Use inner product to approximate cosine similarity
+                            "space_type": "innerproduct",  # Use inner product to approximate cosine similarity
                             "engine": "faiss",
                             "engine": "faiss",
-                            "ef_construction": 128,
-                            "m": 16,
+                            "parameters": {
+                                "ef_construction": 128,
+                                "m": 16,
+                            },
                         },
                         },
                     },
                     },
                     "text": {"type": "text"},
                     "text": {"type": "text"},
                     "metadata": {"type": "object"},
                     "metadata": {"type": "object"},
                 }
                 }
-            }
+            },
         }
         }
         self.client.indices.create(
         self.client.indices.create(
-            index=f"{self.index_prefix}_{collection_name}", body=body
+            index=self._get_index_name(collection_name), body=body
         )
         )
 
 
     def _create_batches(self, items: list[VectorItem], batch_size=100):
     def _create_batches(self, items: list[VectorItem], batch_size=100):
@@ -83,39 +99,45 @@ class OpenSearchClient:
     def has_collection(self, collection_name: str) -> bool:
     def has_collection(self, collection_name: str) -> bool:
         # has_collection here means has index.
         # has_collection here means has index.
         # We are simply adapting to the norms of the other DBs.
         # We are simply adapting to the norms of the other DBs.
-        return self.client.indices.exists(
-            index=f"{self.index_prefix}_{collection_name}"
-        )
+        return self.client.indices.exists(index=self._get_index_name(collection_name))
 
 
-    def delete_colleciton(self, collection_name: str):
+    def delete_collection(self, collection_name: str):
         # delete_collection here means delete index.
         # delete_collection here means delete index.
         # We are simply adapting to the norms of the other DBs.
         # We are simply adapting to the norms of the other DBs.
-        self.client.indices.delete(index=f"{self.index_prefix}_{collection_name}")
+        self.client.indices.delete(index=self._get_index_name(collection_name))
 
 
     def search(
     def search(
-        self, collection_name: str, vectors: list[list[float]], limit: int
+        self, collection_name: str, vectors: list[list[float | int]], limit: int
     ) -> Optional[SearchResult]:
     ) -> Optional[SearchResult]:
-        query = {
-            "size": limit,
-            "_source": ["text", "metadata"],
-            "query": {
-                "script_score": {
-                    "query": {"match_all": {}},
-                    "script": {
-                        "source": "cosineSimilarity(params.vector, 'vector') + 1.0",
-                        "params": {
-                            "vector": vectors[0]
-                        },  # Assuming single query vector
-                    },
-                }
-            },
-        }
+        try:
+            if not self.has_collection(collection_name):
+                return None
+
+            query = {
+                "size": limit,
+                "_source": ["text", "metadata"],
+                "query": {
+                    "script_score": {
+                        "query": {"match_all": {}},
+                        "script": {
+                            "source": "cosineSimilarity(params.query_value, doc[params.field]) + 1.0",
+                            "params": {
+                                "field": "vector",
+                                "query_value": vectors[0],
+                            },  # Assuming single query vector
+                        },
+                    }
+                },
+            }
 
 
-        result = self.client.search(
-            index=f"{self.index_prefix}_{collection_name}", body=query
-        )
+            result = self.client.search(
+                index=self._get_index_name(collection_name), body=query
+            )
+
+            return self._result_to_search_result(result)
 
 
-        return self._result_to_search_result(result)
+        except Exception as e:
+            return None
 
 
     def query(
     def query(
         self, collection_name: str, filter: dict, limit: Optional[int] = None
         self, collection_name: str, filter: dict, limit: Optional[int] = None
@@ -129,13 +151,15 @@ class OpenSearchClient:
         }
         }
 
 
         for field, value in filter.items():
         for field, value in filter.items():
-            query_body["query"]["bool"]["filter"].append({"term": {field: value}})
+            query_body["query"]["bool"]["filter"].append(
+                {"match": {"metadata." + str(field): value}}
+            )
 
 
         size = limit if limit else 10
         size = limit if limit else 10
 
 
         try:
         try:
             result = self.client.search(
             result = self.client.search(
-                index=f"{self.index_prefix}_{collection_name}",
+                index=self._get_index_name(collection_name),
                 body=query_body,
                 body=query_body,
                 size=size,
                 size=size,
             )
             )
@@ -146,14 +170,14 @@ class OpenSearchClient:
             return None
             return None
 
 
     def _create_index_if_not_exists(self, collection_name: str, dimension: int):
     def _create_index_if_not_exists(self, collection_name: str, dimension: int):
-        if not self.has_index(collection_name):
+        if not self.has_collection(collection_name):
             self._create_index(collection_name, dimension)
             self._create_index(collection_name, dimension)
 
 
     def get(self, collection_name: str) -> Optional[GetResult]:
     def get(self, collection_name: str) -> Optional[GetResult]:
         query = {"query": {"match_all": {}}, "_source": ["text", "metadata"]}
         query = {"query": {"match_all": {}}, "_source": ["text", "metadata"]}
 
 
         result = self.client.search(
         result = self.client.search(
-            index=f"{self.index_prefix}_{collection_name}", body=query
+            index=self._get_index_name(collection_name), body=query
         )
         )
         return self._result_to_get_result(result)
         return self._result_to_get_result(result)
 
 
@@ -165,18 +189,18 @@ class OpenSearchClient:
         for batch in self._create_batches(items):
         for batch in self._create_batches(items):
             actions = [
             actions = [
                 {
                 {
-                    "index": {
-                        "_id": item["id"],
-                        "_source": {
-                            "vector": item["vector"],
-                            "text": item["text"],
-                            "metadata": item["metadata"],
-                        },
-                    }
+                    "_op_type": "index",
+                    "_index": self._get_index_name(collection_name),
+                    "_id": item["id"],
+                    "_source": {
+                        "vector": item["vector"],
+                        "text": item["text"],
+                        "metadata": item["metadata"],
+                    },
                 }
                 }
                 for item in batch
                 for item in batch
             ]
             ]
-            self.client.bulk(actions)
+            bulk(self.client, actions)
 
 
     def upsert(self, collection_name: str, items: list[VectorItem]):
     def upsert(self, collection_name: str, items: list[VectorItem]):
         self._create_index_if_not_exists(
         self._create_index_if_not_exists(
@@ -186,26 +210,47 @@ class OpenSearchClient:
         for batch in self._create_batches(items):
         for batch in self._create_batches(items):
             actions = [
             actions = [
                 {
                 {
-                    "index": {
-                        "_id": item["id"],
-                        "_index": f"{self.index_prefix}_{collection_name}",
-                        "_source": {
-                            "vector": item["vector"],
-                            "text": item["text"],
-                            "metadata": item["metadata"],
-                        },
-                    }
+                    "_op_type": "update",
+                    "_index": self._get_index_name(collection_name),
+                    "_id": item["id"],
+                    "doc": {
+                        "vector": item["vector"],
+                        "text": item["text"],
+                        "metadata": item["metadata"],
+                    },
+                    "doc_as_upsert": True,
                 }
                 }
                 for item in batch
                 for item in batch
             ]
             ]
-            self.client.bulk(actions)
-
-    def delete(self, collection_name: str, ids: list[str]):
-        actions = [
-            {"delete": {"_index": f"{self.index_prefix}_{collection_name}", "_id": id}}
-            for id in ids
-        ]
-        self.client.bulk(body=actions)
+            bulk(self.client, actions)
+
+    def delete(
+        self,
+        collection_name: str,
+        ids: Optional[list[str]] = None,
+        filter: Optional[dict] = None,
+    ):
+        if ids:
+            actions = [
+                {
+                    "_op_type": "delete",
+                    "_index": self._get_index_name(collection_name),
+                    "_id": id,
+                }
+                for id in ids
+            ]
+            bulk(self.client, actions)
+        elif filter:
+            query_body = {
+                "query": {"bool": {"filter": []}},
+            }
+            for field, value in filter.items():
+                query_body["query"]["bool"]["filter"].append(
+                    {"match": {"metadata." + str(field): value}}
+                )
+            self.client.delete_by_query(
+                index=self._get_index_name(collection_name), body=query_body
+            )
 
 
     def reset(self):
     def reset(self):
         indices = self.client.indices.get(index=f"{self.index_prefix}_*")
         indices = self.client.indices.get(index=f"{self.index_prefix}_*")

+ 172 - 76
backend/open_webui/retrieval/web/utils.py

@@ -24,13 +24,17 @@ from langchain_community.document_loaders import PlaywrightURLLoader, WebBaseLoa
 from langchain_community.document_loaders.firecrawl import FireCrawlLoader
 from langchain_community.document_loaders.firecrawl import FireCrawlLoader
 from langchain_community.document_loaders.base import BaseLoader
 from langchain_community.document_loaders.base import BaseLoader
 from langchain_core.documents import Document
 from langchain_core.documents import Document
+from open_webui.retrieval.loaders.tavily import TavilyLoader
 from open_webui.constants import ERROR_MESSAGES
 from open_webui.constants import ERROR_MESSAGES
 from open_webui.config import (
 from open_webui.config import (
     ENABLE_RAG_LOCAL_WEB_FETCH,
     ENABLE_RAG_LOCAL_WEB_FETCH,
     PLAYWRIGHT_WS_URI,
     PLAYWRIGHT_WS_URI,
+    PLAYWRIGHT_TIMEOUT,
     RAG_WEB_LOADER_ENGINE,
     RAG_WEB_LOADER_ENGINE,
     FIRECRAWL_API_BASE_URL,
     FIRECRAWL_API_BASE_URL,
     FIRECRAWL_API_KEY,
     FIRECRAWL_API_KEY,
+    TAVILY_API_KEY,
+    TAVILY_EXTRACT_DEPTH,
 )
 )
 from open_webui.env import SRC_LOG_LEVELS
 from open_webui.env import SRC_LOG_LEVELS
 
 
@@ -113,7 +117,47 @@ def verify_ssl_cert(url: str) -> bool:
         return False
         return False
 
 
 
 
-class SafeFireCrawlLoader(BaseLoader):
+class RateLimitMixin:
+    async def _wait_for_rate_limit(self):
+        """Wait to respect the rate limit if specified."""
+        if self.requests_per_second and self.last_request_time:
+            min_interval = timedelta(seconds=1.0 / self.requests_per_second)
+            time_since_last = datetime.now() - self.last_request_time
+            if time_since_last < min_interval:
+                await asyncio.sleep((min_interval - time_since_last).total_seconds())
+        self.last_request_time = datetime.now()
+
+    def _sync_wait_for_rate_limit(self):
+        """Synchronous version of rate limit wait."""
+        if self.requests_per_second and self.last_request_time:
+            min_interval = timedelta(seconds=1.0 / self.requests_per_second)
+            time_since_last = datetime.now() - self.last_request_time
+            if time_since_last < min_interval:
+                time.sleep((min_interval - time_since_last).total_seconds())
+        self.last_request_time = datetime.now()
+
+
+class URLProcessingMixin:
+    def _verify_ssl_cert(self, url: str) -> bool:
+        """Verify SSL certificate for a URL."""
+        return verify_ssl_cert(url)
+
+    async def _safe_process_url(self, url: str) -> bool:
+        """Perform safety checks before processing a URL."""
+        if self.verify_ssl and not self._verify_ssl_cert(url):
+            raise ValueError(f"SSL certificate verification failed for {url}")
+        await self._wait_for_rate_limit()
+        return True
+
+    def _safe_process_url_sync(self, url: str) -> bool:
+        """Synchronous version of safety checks."""
+        if self.verify_ssl and not self._verify_ssl_cert(url):
+            raise ValueError(f"SSL certificate verification failed for {url}")
+        self._sync_wait_for_rate_limit()
+        return True
+
+
+class SafeFireCrawlLoader(BaseLoader, RateLimitMixin, URLProcessingMixin):
     def __init__(
     def __init__(
         self,
         self,
         web_paths,
         web_paths,
@@ -184,7 +228,7 @@ class SafeFireCrawlLoader(BaseLoader):
                 yield from loader.lazy_load()
                 yield from loader.lazy_load()
             except Exception as e:
             except Exception as e:
                 if self.continue_on_failure:
                 if self.continue_on_failure:
-                    log.exception(e, "Error loading %s", url)
+                    log.exception(f"Error loading {url}: {e}")
                     continue
                     continue
                 raise e
                 raise e
 
 
@@ -204,47 +248,124 @@ class SafeFireCrawlLoader(BaseLoader):
                     yield document
                     yield document
             except Exception as e:
             except Exception as e:
                 if self.continue_on_failure:
                 if self.continue_on_failure:
-                    log.exception(e, "Error loading %s", url)
+                    log.exception(f"Error loading {url}: {e}")
                     continue
                     continue
                 raise e
                 raise e
 
 
-    def _verify_ssl_cert(self, url: str) -> bool:
-        return verify_ssl_cert(url)
 
 
-    async def _wait_for_rate_limit(self):
-        """Wait to respect the rate limit if specified."""
-        if self.requests_per_second and self.last_request_time:
-            min_interval = timedelta(seconds=1.0 / self.requests_per_second)
-            time_since_last = datetime.now() - self.last_request_time
-            if time_since_last < min_interval:
-                await asyncio.sleep((min_interval - time_since_last).total_seconds())
-        self.last_request_time = datetime.now()
+class SafeTavilyLoader(BaseLoader, RateLimitMixin, URLProcessingMixin):
+    def __init__(
+        self,
+        web_paths: Union[str, List[str]],
+        api_key: str,
+        extract_depth: Literal["basic", "advanced"] = "basic",
+        continue_on_failure: bool = True,
+        requests_per_second: Optional[float] = None,
+        verify_ssl: bool = True,
+        trust_env: bool = False,
+        proxy: Optional[Dict[str, str]] = None,
+    ):
+        """Initialize SafeTavilyLoader with rate limiting and SSL verification support.
 
 
-    def _sync_wait_for_rate_limit(self):
-        """Synchronous version of rate limit wait."""
-        if self.requests_per_second and self.last_request_time:
-            min_interval = timedelta(seconds=1.0 / self.requests_per_second)
-            time_since_last = datetime.now() - self.last_request_time
-            if time_since_last < min_interval:
-                time.sleep((min_interval - time_since_last).total_seconds())
-        self.last_request_time = datetime.now()
+        Args:
+            web_paths: List of URLs/paths to process.
+            api_key: The Tavily API key.
+            extract_depth: Depth of extraction ("basic" or "advanced").
+            continue_on_failure: Whether to continue if extraction of a URL fails.
+            requests_per_second: Number of requests per second to limit to.
+            verify_ssl: If True, verify SSL certificates.
+            trust_env: If True, use proxy settings from environment variables.
+            proxy: Optional proxy configuration.
+        """
+        # Initialize proxy configuration if using environment variables
+        proxy_server = proxy.get("server") if proxy else None
+        if trust_env and not proxy_server:
+            env_proxies = urllib.request.getproxies()
+            env_proxy_server = env_proxies.get("https") or env_proxies.get("http")
+            if env_proxy_server:
+                if proxy:
+                    proxy["server"] = env_proxy_server
+                else:
+                    proxy = {"server": env_proxy_server}
 
 
-    async def _safe_process_url(self, url: str) -> bool:
-        """Perform safety checks before processing a URL."""
-        if self.verify_ssl and not self._verify_ssl_cert(url):
-            raise ValueError(f"SSL certificate verification failed for {url}")
-        await self._wait_for_rate_limit()
-        return True
+        # Store parameters for creating TavilyLoader instances
+        self.web_paths = web_paths if isinstance(web_paths, list) else [web_paths]
+        self.api_key = api_key
+        self.extract_depth = extract_depth
+        self.continue_on_failure = continue_on_failure
+        self.verify_ssl = verify_ssl
+        self.trust_env = trust_env
+        self.proxy = proxy
 
 
-    def _safe_process_url_sync(self, url: str) -> bool:
-        """Synchronous version of safety checks."""
-        if self.verify_ssl and not self._verify_ssl_cert(url):
-            raise ValueError(f"SSL certificate verification failed for {url}")
-        self._sync_wait_for_rate_limit()
-        return True
+        # Add rate limiting
+        self.requests_per_second = requests_per_second
+        self.last_request_time = None
 
 
+    def lazy_load(self) -> Iterator[Document]:
+        """Load documents with rate limiting support, delegating to TavilyLoader."""
+        valid_urls = []
+        for url in self.web_paths:
+            try:
+                self._safe_process_url_sync(url)
+                valid_urls.append(url)
+            except Exception as e:
+                log.warning(f"SSL verification failed for {url}: {str(e)}")
+                if not self.continue_on_failure:
+                    raise e
+        if not valid_urls:
+            if self.continue_on_failure:
+                log.warning("No valid URLs to process after SSL verification")
+                return
+            raise ValueError("No valid URLs to process after SSL verification")
+        try:
+            loader = TavilyLoader(
+                urls=valid_urls,
+                api_key=self.api_key,
+                extract_depth=self.extract_depth,
+                continue_on_failure=self.continue_on_failure,
+            )
+            yield from loader.lazy_load()
+        except Exception as e:
+            if self.continue_on_failure:
+                log.exception(f"Error extracting content from URLs: {e}")
+            else:
+                raise e
 
 
-class SafePlaywrightURLLoader(PlaywrightURLLoader):
+    async def alazy_load(self) -> AsyncIterator[Document]:
+        """Async version with rate limiting and SSL verification."""
+        valid_urls = []
+        for url in self.web_paths:
+            try:
+                await self._safe_process_url(url)
+                valid_urls.append(url)
+            except Exception as e:
+                log.warning(f"SSL verification failed for {url}: {str(e)}")
+                if not self.continue_on_failure:
+                    raise e
+
+        if not valid_urls:
+            if self.continue_on_failure:
+                log.warning("No valid URLs to process after SSL verification")
+                return
+            raise ValueError("No valid URLs to process after SSL verification")
+
+        try:
+            loader = TavilyLoader(
+                urls=valid_urls,
+                api_key=self.api_key,
+                extract_depth=self.extract_depth,
+                continue_on_failure=self.continue_on_failure,
+            )
+            async for document in loader.alazy_load():
+                yield document
+        except Exception as e:
+            if self.continue_on_failure:
+                log.exception(f"Error loading URLs: {e}")
+            else:
+                raise e
+
+
+class SafePlaywrightURLLoader(PlaywrightURLLoader, RateLimitMixin, URLProcessingMixin):
     """Load HTML pages safely with Playwright, supporting SSL verification, rate limiting, and remote browser connection.
     """Load HTML pages safely with Playwright, supporting SSL verification, rate limiting, and remote browser connection.
 
 
     Attributes:
     Attributes:
@@ -256,6 +377,7 @@ class SafePlaywrightURLLoader(PlaywrightURLLoader):
         headless (bool): If True, the browser will run in headless mode.
         headless (bool): If True, the browser will run in headless mode.
         proxy (dict): Proxy override settings for the Playwright session.
         proxy (dict): Proxy override settings for the Playwright session.
         playwright_ws_url (Optional[str]): WebSocket endpoint URI for remote browser connection.
         playwright_ws_url (Optional[str]): WebSocket endpoint URI for remote browser connection.
+        playwright_timeout (Optional[int]): Maximum operation time in milliseconds.
     """
     """
 
 
     def __init__(
     def __init__(
@@ -269,6 +391,7 @@ class SafePlaywrightURLLoader(PlaywrightURLLoader):
         remove_selectors: Optional[List[str]] = None,
         remove_selectors: Optional[List[str]] = None,
         proxy: Optional[Dict[str, str]] = None,
         proxy: Optional[Dict[str, str]] = None,
         playwright_ws_url: Optional[str] = None,
         playwright_ws_url: Optional[str] = None,
+        playwright_timeout: Optional[int] = 10000,
     ):
     ):
         """Initialize with additional safety parameters and remote browser support."""
         """Initialize with additional safety parameters and remote browser support."""
 
 
@@ -295,6 +418,7 @@ class SafePlaywrightURLLoader(PlaywrightURLLoader):
         self.last_request_time = None
         self.last_request_time = None
         self.playwright_ws_url = playwright_ws_url
         self.playwright_ws_url = playwright_ws_url
         self.trust_env = trust_env
         self.trust_env = trust_env
+        self.playwright_timeout = playwright_timeout
 
 
     def lazy_load(self) -> Iterator[Document]:
     def lazy_load(self) -> Iterator[Document]:
         """Safely load URLs synchronously with support for remote browser."""
         """Safely load URLs synchronously with support for remote browser."""
@@ -311,7 +435,7 @@ class SafePlaywrightURLLoader(PlaywrightURLLoader):
                 try:
                 try:
                     self._safe_process_url_sync(url)
                     self._safe_process_url_sync(url)
                     page = browser.new_page()
                     page = browser.new_page()
-                    response = page.goto(url)
+                    response = page.goto(url, timeout=self.playwright_timeout)
                     if response is None:
                     if response is None:
                         raise ValueError(f"page.goto() returned None for url {url}")
                         raise ValueError(f"page.goto() returned None for url {url}")
 
 
@@ -320,7 +444,7 @@ class SafePlaywrightURLLoader(PlaywrightURLLoader):
                     yield Document(page_content=text, metadata=metadata)
                     yield Document(page_content=text, metadata=metadata)
                 except Exception as e:
                 except Exception as e:
                     if self.continue_on_failure:
                     if self.continue_on_failure:
-                        log.exception(e, "Error loading %s", url)
+                        log.exception(f"Error loading {url}: {e}")
                         continue
                         continue
                     raise e
                     raise e
             browser.close()
             browser.close()
@@ -342,7 +466,7 @@ class SafePlaywrightURLLoader(PlaywrightURLLoader):
                 try:
                 try:
                     await self._safe_process_url(url)
                     await self._safe_process_url(url)
                     page = await browser.new_page()
                     page = await browser.new_page()
-                    response = await page.goto(url)
+                    response = await page.goto(url, timeout=self.playwright_timeout)
                     if response is None:
                     if response is None:
                         raise ValueError(f"page.goto() returned None for url {url}")
                         raise ValueError(f"page.goto() returned None for url {url}")
 
 
@@ -351,46 +475,11 @@ class SafePlaywrightURLLoader(PlaywrightURLLoader):
                     yield Document(page_content=text, metadata=metadata)
                     yield Document(page_content=text, metadata=metadata)
                 except Exception as e:
                 except Exception as e:
                     if self.continue_on_failure:
                     if self.continue_on_failure:
-                        log.exception(e, "Error loading %s", url)
+                        log.exception(f"Error loading {url}: {e}")
                         continue
                         continue
                     raise e
                     raise e
             await browser.close()
             await browser.close()
 
 
-    def _verify_ssl_cert(self, url: str) -> bool:
-        return verify_ssl_cert(url)
-
-    async def _wait_for_rate_limit(self):
-        """Wait to respect the rate limit if specified."""
-        if self.requests_per_second and self.last_request_time:
-            min_interval = timedelta(seconds=1.0 / self.requests_per_second)
-            time_since_last = datetime.now() - self.last_request_time
-            if time_since_last < min_interval:
-                await asyncio.sleep((min_interval - time_since_last).total_seconds())
-        self.last_request_time = datetime.now()
-
-    def _sync_wait_for_rate_limit(self):
-        """Synchronous version of rate limit wait."""
-        if self.requests_per_second and self.last_request_time:
-            min_interval = timedelta(seconds=1.0 / self.requests_per_second)
-            time_since_last = datetime.now() - self.last_request_time
-            if time_since_last < min_interval:
-                time.sleep((min_interval - time_since_last).total_seconds())
-        self.last_request_time = datetime.now()
-
-    async def _safe_process_url(self, url: str) -> bool:
-        """Perform safety checks before processing a URL."""
-        if self.verify_ssl and not self._verify_ssl_cert(url):
-            raise ValueError(f"SSL certificate verification failed for {url}")
-        await self._wait_for_rate_limit()
-        return True
-
-    def _safe_process_url_sync(self, url: str) -> bool:
-        """Synchronous version of safety checks."""
-        if self.verify_ssl and not self._verify_ssl_cert(url):
-            raise ValueError(f"SSL certificate verification failed for {url}")
-        self._sync_wait_for_rate_limit()
-        return True
-
 
 
 class SafeWebBaseLoader(WebBaseLoader):
 class SafeWebBaseLoader(WebBaseLoader):
     """WebBaseLoader with enhanced error handling for URLs."""
     """WebBaseLoader with enhanced error handling for URLs."""
@@ -472,7 +561,7 @@ class SafeWebBaseLoader(WebBaseLoader):
                 yield Document(page_content=text, metadata=metadata)
                 yield Document(page_content=text, metadata=metadata)
             except Exception as e:
             except Exception as e:
                 # Log the error and continue with the next URL
                 # Log the error and continue with the next URL
-                log.exception(e, "Error loading %s", path)
+                log.exception(f"Error loading {path}: {e}")
 
 
     async def alazy_load(self) -> AsyncIterator[Document]:
     async def alazy_load(self) -> AsyncIterator[Document]:
         """Async lazy load text from the url(s) in web_path."""
         """Async lazy load text from the url(s) in web_path."""
@@ -499,6 +588,7 @@ RAG_WEB_LOADER_ENGINES = defaultdict(lambda: SafeWebBaseLoader)
 RAG_WEB_LOADER_ENGINES["playwright"] = SafePlaywrightURLLoader
 RAG_WEB_LOADER_ENGINES["playwright"] = SafePlaywrightURLLoader
 RAG_WEB_LOADER_ENGINES["safe_web"] = SafeWebBaseLoader
 RAG_WEB_LOADER_ENGINES["safe_web"] = SafeWebBaseLoader
 RAG_WEB_LOADER_ENGINES["firecrawl"] = SafeFireCrawlLoader
 RAG_WEB_LOADER_ENGINES["firecrawl"] = SafeFireCrawlLoader
+RAG_WEB_LOADER_ENGINES["tavily"] = SafeTavilyLoader
 
 
 
 
 def get_web_loader(
 def get_web_loader(
@@ -518,13 +608,19 @@ def get_web_loader(
         "trust_env": trust_env,
         "trust_env": trust_env,
     }
     }
 
 
-    if PLAYWRIGHT_WS_URI.value:
-        web_loader_args["playwright_ws_url"] = PLAYWRIGHT_WS_URI.value
+    if RAG_WEB_LOADER_ENGINE.value == "playwright":
+        web_loader_args["playwright_timeout"] = PLAYWRIGHT_TIMEOUT.value * 1000
+        if PLAYWRIGHT_WS_URI.value:
+            web_loader_args["playwright_ws_url"] = PLAYWRIGHT_WS_URI.value
 
 
     if RAG_WEB_LOADER_ENGINE.value == "firecrawl":
     if RAG_WEB_LOADER_ENGINE.value == "firecrawl":
         web_loader_args["api_key"] = FIRECRAWL_API_KEY.value
         web_loader_args["api_key"] = FIRECRAWL_API_KEY.value
         web_loader_args["api_url"] = FIRECRAWL_API_BASE_URL.value
         web_loader_args["api_url"] = FIRECRAWL_API_BASE_URL.value
 
 
+    if RAG_WEB_LOADER_ENGINE.value == "tavily":
+        web_loader_args["api_key"] = TAVILY_API_KEY.value
+        web_loader_args["extract_depth"] = TAVILY_EXTRACT_DEPTH.value
+
     # Create the appropriate WebLoader based on the configuration
     # Create the appropriate WebLoader based on the configuration
     WebLoaderClass = RAG_WEB_LOADER_ENGINES[RAG_WEB_LOADER_ENGINE.value]
     WebLoaderClass = RAG_WEB_LOADER_ENGINES[RAG_WEB_LOADER_ENGINE.value]
     web_loader = WebLoaderClass(**web_loader_args)
     web_loader = WebLoaderClass(**web_loader_args)

+ 3 - 1
backend/open_webui/routers/audio.py

@@ -625,7 +625,9 @@ def transcription(
 ):
 ):
     log.info(f"file.content_type: {file.content_type}")
     log.info(f"file.content_type: {file.content_type}")
 
 
-    if file.content_type not in ["audio/mpeg", "audio/wav", "audio/ogg", "audio/x-m4a"]:
+    supported_filetypes = ("audio/mpeg", "audio/wav", "audio/ogg", "audio/x-m4a")
+
+    if not file.content_type.startswith(supported_filetypes):
         raise HTTPException(
         raise HTTPException(
             status_code=status.HTTP_400_BAD_REQUEST,
             status_code=status.HTTP_400_BAD_REQUEST,
             detail=ERROR_MESSAGES.FILE_NOT_SUPPORTED,
             detail=ERROR_MESSAGES.FILE_NOT_SUPPORTED,

+ 1 - 1
backend/open_webui/routers/auths.py

@@ -210,7 +210,7 @@ async def ldap_auth(request: Request, response: Response, form_data: LdapForm):
             LDAP_APP_DN,
             LDAP_APP_DN,
             LDAP_APP_PASSWORD,
             LDAP_APP_PASSWORD,
             auto_bind="NONE",
             auto_bind="NONE",
-            authentication="SIMPLE",
+            authentication="SIMPLE" if LDAP_APP_DN else "ANONYMOUS",
         )
         )
         if not connection_app.bind():
         if not connection_app.bind():
             raise HTTPException(400, detail="Application account bind failed")
             raise HTTPException(400, detail="Application account bind failed")

+ 103 - 0
backend/open_webui/routers/chats.py

@@ -2,6 +2,8 @@ import json
 import logging
 import logging
 from typing import Optional
 from typing import Optional
 
 
+
+from open_webui.socket.main import get_event_emitter
 from open_webui.models.chats import (
 from open_webui.models.chats import (
     ChatForm,
     ChatForm,
     ChatImportForm,
     ChatImportForm,
@@ -372,6 +374,107 @@ async def update_chat_by_id(
         )
         )
 
 
 
 
+############################
+# UpdateChatMessageById
+############################
+class MessageForm(BaseModel):
+    content: str
+
+
+@router.post("/{id}/messages/{message_id}", response_model=Optional[ChatResponse])
+async def update_chat_message_by_id(
+    id: str, message_id: str, form_data: MessageForm, user=Depends(get_verified_user)
+):
+    chat = Chats.get_chat_by_id(id)
+
+    if not chat:
+        raise HTTPException(
+            status_code=status.HTTP_401_UNAUTHORIZED,
+            detail=ERROR_MESSAGES.ACCESS_PROHIBITED,
+        )
+
+    if chat.user_id != user.id and user.role != "admin":
+        raise HTTPException(
+            status_code=status.HTTP_401_UNAUTHORIZED,
+            detail=ERROR_MESSAGES.ACCESS_PROHIBITED,
+        )
+
+    chat = Chats.upsert_message_to_chat_by_id_and_message_id(
+        id,
+        message_id,
+        {
+            "content": form_data.content,
+        },
+    )
+
+    event_emitter = get_event_emitter(
+        {
+            "user_id": user.id,
+            "chat_id": id,
+            "message_id": message_id,
+        },
+        False,
+    )
+
+    if event_emitter:
+        await event_emitter(
+            {
+                "type": "chat:message",
+                "data": {
+                    "chat_id": id,
+                    "message_id": message_id,
+                    "content": form_data.content,
+                },
+            }
+        )
+
+    return ChatResponse(**chat.model_dump())
+
+
+############################
+# SendChatMessageEventById
+############################
+class EventForm(BaseModel):
+    type: str
+    data: dict
+
+
+@router.post("/{id}/messages/{message_id}/event", response_model=Optional[bool])
+async def send_chat_message_event_by_id(
+    id: str, message_id: str, form_data: EventForm, user=Depends(get_verified_user)
+):
+    chat = Chats.get_chat_by_id(id)
+
+    if not chat:
+        raise HTTPException(
+            status_code=status.HTTP_401_UNAUTHORIZED,
+            detail=ERROR_MESSAGES.ACCESS_PROHIBITED,
+        )
+
+    if chat.user_id != user.id and user.role != "admin":
+        raise HTTPException(
+            status_code=status.HTTP_401_UNAUTHORIZED,
+            detail=ERROR_MESSAGES.ACCESS_PROHIBITED,
+        )
+
+    event_emitter = get_event_emitter(
+        {
+            "user_id": user.id,
+            "chat_id": id,
+            "message_id": message_id,
+        }
+    )
+
+    try:
+        if event_emitter:
+            await event_emitter(form_data.model_dump())
+        else:
+            return False
+        return True
+    except:
+        return False
+
+
 ############################
 ############################
 # DeleteChatById
 # DeleteChatById
 ############################
 ############################

+ 1 - 1
backend/open_webui/routers/files.py

@@ -81,7 +81,7 @@ def upload_file(
                     ProcessFileForm(file_id=id, content=result.get("text", "")),
                     ProcessFileForm(file_id=id, content=result.get("text", "")),
                     user=user,
                     user=user,
                 )
                 )
-            else:
+            elif file.content_type not in ["image/png", "image/jpeg", "image/gif"]:
                 process_file(request, ProcessFileForm(file_id=id), user=user)
                 process_file(request, ProcessFileForm(file_id=id), user=user)
 
 
             file_item = Files.get_file_by_id(id=id)
             file_item = Files.get_file_by_id(id=id)

+ 2 - 4
backend/open_webui/routers/images.py

@@ -517,10 +517,8 @@ async def image_generations(
             images = []
             images = []
 
 
             for image in res["data"]:
             for image in res["data"]:
-                if "url" in image:
-                    image_data, content_type = load_url_image_data(
-                        image["url"], headers
-                    )
+                if image_url := image.get("url", None):
+                    image_data, content_type = load_url_image_data(image_url, headers)
                 else:
                 else:
                     image_data, content_type = load_b64_image_data(image["b64_json"])
                     image_data, content_type = load_b64_image_data(image["b64_json"])
 
 

+ 17 - 7
backend/open_webui/routers/knowledge.py

@@ -437,14 +437,24 @@ def remove_file_from_knowledge_by_id(
         )
         )
 
 
     # Remove content from the vector database
     # Remove content from the vector database
-    VECTOR_DB_CLIENT.delete(
-        collection_name=knowledge.id, filter={"file_id": form_data.file_id}
-    )
+    try:
+        VECTOR_DB_CLIENT.delete(
+            collection_name=knowledge.id, filter={"file_id": form_data.file_id}
+        )
+    except Exception as e:
+        log.debug("This was most likely caused by bypassing embedding processing")
+        log.debug(e)
+        pass
 
 
-    # Remove the file's collection from vector database
-    file_collection = f"file-{form_data.file_id}"
-    if VECTOR_DB_CLIENT.has_collection(collection_name=file_collection):
-        VECTOR_DB_CLIENT.delete_collection(collection_name=file_collection)
+    try:
+        # Remove the file's collection from vector database
+        file_collection = f"file-{form_data.file_id}"
+        if VECTOR_DB_CLIENT.has_collection(collection_name=file_collection):
+            VECTOR_DB_CLIENT.delete_collection(collection_name=file_collection)
+    except Exception as e:
+        log.debug("This was most likely caused by bypassing embedding processing")
+        log.debug(e)
+        pass
 
 
     # Delete file from database
     # Delete file from database
     Files.delete_file_by_id(form_data.file_id)
     Files.delete_file_by_id(form_data.file_id)

+ 7 - 2
backend/open_webui/routers/ollama.py

@@ -295,7 +295,7 @@ async def update_config(
     }
     }
 
 
 
 
-@cached(ttl=3)
+@cached(ttl=1)
 async def get_all_models(request: Request, user: UserModel = None):
 async def get_all_models(request: Request, user: UserModel = None):
     log.info("get_all_models()")
     log.info("get_all_models()")
     if request.app.state.config.ENABLE_OLLAMA_API:
     if request.app.state.config.ENABLE_OLLAMA_API:
@@ -336,6 +336,7 @@ async def get_all_models(request: Request, user: UserModel = None):
                 )
                 )
 
 
                 prefix_id = api_config.get("prefix_id", None)
                 prefix_id = api_config.get("prefix_id", None)
+                tags = api_config.get("tags", [])
                 model_ids = api_config.get("model_ids", [])
                 model_ids = api_config.get("model_ids", [])
 
 
                 if len(model_ids) != 0 and "models" in response:
                 if len(model_ids) != 0 and "models" in response:
@@ -350,6 +351,10 @@ async def get_all_models(request: Request, user: UserModel = None):
                     for model in response.get("models", []):
                     for model in response.get("models", []):
                         model["model"] = f"{prefix_id}.{model['model']}"
                         model["model"] = f"{prefix_id}.{model['model']}"
 
 
+                if tags:
+                    for model in response.get("models", []):
+                        model["tags"] = tags
+
         def merge_models_lists(model_lists):
         def merge_models_lists(model_lists):
             merged_models = {}
             merged_models = {}
 
 
@@ -1164,7 +1169,7 @@ async def generate_chat_completion(
     prefix_id = api_config.get("prefix_id", None)
     prefix_id = api_config.get("prefix_id", None)
     if prefix_id:
     if prefix_id:
         payload["model"] = payload["model"].replace(f"{prefix_id}.", "")
         payload["model"] = payload["model"].replace(f"{prefix_id}.", "")
-
+    # payload["keep_alive"] = -1 # keep alive forever
     return await send_post_request(
     return await send_post_request(
         url=f"{url}/api/chat",
         url=f"{url}/api/chat",
         payload=json.dumps(payload),
         payload=json.dumps(payload),

+ 32 - 13
backend/open_webui/routers/openai.py

@@ -36,6 +36,9 @@ from open_webui.utils.payload import (
     apply_model_params_to_body_openai,
     apply_model_params_to_body_openai,
     apply_model_system_prompt_to_body,
     apply_model_system_prompt_to_body,
 )
 )
+from open_webui.utils.misc import (
+    convert_logit_bias_input_to_json,
+)
 
 
 from open_webui.utils.auth import get_admin_user, get_verified_user
 from open_webui.utils.auth import get_admin_user, get_verified_user
 from open_webui.utils.access_control import has_access
 from open_webui.utils.access_control import has_access
@@ -350,6 +353,7 @@ async def get_all_models_responses(request: Request, user: UserModel) -> list:
             )
             )
 
 
             prefix_id = api_config.get("prefix_id", None)
             prefix_id = api_config.get("prefix_id", None)
+            tags = api_config.get("tags", [])
 
 
             if prefix_id:
             if prefix_id:
                 for model in (
                 for model in (
@@ -357,6 +361,12 @@ async def get_all_models_responses(request: Request, user: UserModel) -> list:
                 ):
                 ):
                     model["id"] = f"{prefix_id}.{model['id']}"
                     model["id"] = f"{prefix_id}.{model['id']}"
 
 
+            if tags:
+                for model in (
+                    response if isinstance(response, list) else response.get("data", [])
+                ):
+                    model["tags"] = tags
+
     log.debug(f"get_all_models:responses() {responses}")
     log.debug(f"get_all_models:responses() {responses}")
     return responses
     return responses
 
 
@@ -374,7 +384,7 @@ async def get_filtered_models(models, user):
     return filtered_models
     return filtered_models
 
 
 
 
-@cached(ttl=3)
+@cached(ttl=1)
 async def get_all_models(request: Request, user: UserModel) -> dict[str, list]:
 async def get_all_models(request: Request, user: UserModel) -> dict[str, list]:
     log.info("get_all_models()")
     log.info("get_all_models()")
 
 
@@ -396,6 +406,7 @@ async def get_all_models(request: Request, user: UserModel) -> dict[str, list]:
 
 
         for idx, models in enumerate(model_lists):
         for idx, models in enumerate(model_lists):
             if models is not None and "error" not in models:
             if models is not None and "error" not in models:
+
                 merged_list.extend(
                 merged_list.extend(
                     [
                     [
                         {
                         {
@@ -406,18 +417,21 @@ async def get_all_models(request: Request, user: UserModel) -> dict[str, list]:
                             "urlIdx": idx,
                             "urlIdx": idx,
                         }
                         }
                         for model in models
                         for model in models
-                        if "api.openai.com"
-                        not in request.app.state.config.OPENAI_API_BASE_URLS[idx]
-                        or not any(
-                            name in model["id"]
-                            for name in [
-                                "babbage",
-                                "dall-e",
-                                "davinci",
-                                "embedding",
-                                "tts",
-                                "whisper",
-                            ]
+                        if (model.get("id") or model.get("name"))
+                        and (
+                            "api.openai.com"
+                            not in request.app.state.config.OPENAI_API_BASE_URLS[idx]
+                            or not any(
+                                name in model["id"]
+                                for name in [
+                                    "babbage",
+                                    "dall-e",
+                                    "davinci",
+                                    "embedding",
+                                    "tts",
+                                    "whisper",
+                                ]
+                            )
                         )
                         )
                     ]
                     ]
                 )
                 )
@@ -666,6 +680,11 @@ async def generate_chat_completion(
         del payload["max_tokens"]
         del payload["max_tokens"]
 
 
     # Convert the modified body back to JSON
     # Convert the modified body back to JSON
+    if "logit_bias" in payload:
+        payload["logit_bias"] = json.loads(
+            convert_logit_bias_input_to_json(payload["logit_bias"])
+        )
+
     payload = json.dumps(payload)
     payload = json.dumps(payload)
 
 
     r = None
     r = None

+ 2 - 2
backend/open_webui/routers/pipelines.py

@@ -90,8 +90,8 @@ async def process_pipeline_inlet_filter(request, payload, user, models):
                     headers=headers,
                     headers=headers,
                     json=request_data,
                     json=request_data,
                 ) as response:
                 ) as response:
-                    response.raise_for_status()
                     payload = await response.json()
                     payload = await response.json()
+                    response.raise_for_status()
             except aiohttp.ClientResponseError as e:
             except aiohttp.ClientResponseError as e:
                 res = (
                 res = (
                     await response.json()
                     await response.json()
@@ -139,8 +139,8 @@ async def process_pipeline_outlet_filter(request, payload, user, models):
                     headers=headers,
                     headers=headers,
                     json=request_data,
                     json=request_data,
                 ) as response:
                 ) as response:
-                    response.raise_for_status()
                     payload = await response.json()
                     payload = await response.json()
+                    response.raise_for_status()
             except aiohttp.ClientResponseError as e:
             except aiohttp.ClientResponseError as e:
                 try:
                 try:
                     res = (
                     res = (

+ 7 - 0
backend/open_webui/routers/retrieval.py

@@ -358,6 +358,7 @@ async def get_rag_config(request: Request, user=Depends(get_admin_user)):
         "content_extraction": {
         "content_extraction": {
             "engine": request.app.state.config.CONTENT_EXTRACTION_ENGINE,
             "engine": request.app.state.config.CONTENT_EXTRACTION_ENGINE,
             "tika_server_url": request.app.state.config.TIKA_SERVER_URL,
             "tika_server_url": request.app.state.config.TIKA_SERVER_URL,
+            "docling_server_url": request.app.state.config.DOCLING_SERVER_URL,
             "document_intelligence_config": {
             "document_intelligence_config": {
                 "endpoint": request.app.state.config.DOCUMENT_INTELLIGENCE_ENDPOINT,
                 "endpoint": request.app.state.config.DOCUMENT_INTELLIGENCE_ENDPOINT,
                 "key": request.app.state.config.DOCUMENT_INTELLIGENCE_KEY,
                 "key": request.app.state.config.DOCUMENT_INTELLIGENCE_KEY,
@@ -428,6 +429,7 @@ class DocumentIntelligenceConfigForm(BaseModel):
 class ContentExtractionConfig(BaseModel):
 class ContentExtractionConfig(BaseModel):
     engine: str = ""
     engine: str = ""
     tika_server_url: Optional[str] = None
     tika_server_url: Optional[str] = None
+    docling_server_url: Optional[str] = None
     document_intelligence_config: Optional[DocumentIntelligenceConfigForm] = None
     document_intelligence_config: Optional[DocumentIntelligenceConfigForm] = None
 
 
 
 
@@ -540,6 +542,9 @@ async def update_rag_config(
         request.app.state.config.TIKA_SERVER_URL = (
         request.app.state.config.TIKA_SERVER_URL = (
             form_data.content_extraction.tika_server_url
             form_data.content_extraction.tika_server_url
         )
         )
+        request.app.state.config.DOCLING_SERVER_URL = (
+            form_data.content_extraction.docling_server_url
+        )
         if form_data.content_extraction.document_intelligence_config is not None:
         if form_data.content_extraction.document_intelligence_config is not None:
             request.app.state.config.DOCUMENT_INTELLIGENCE_ENDPOINT = (
             request.app.state.config.DOCUMENT_INTELLIGENCE_ENDPOINT = (
                 form_data.content_extraction.document_intelligence_config.endpoint
                 form_data.content_extraction.document_intelligence_config.endpoint
@@ -648,6 +653,7 @@ async def update_rag_config(
         "content_extraction": {
         "content_extraction": {
             "engine": request.app.state.config.CONTENT_EXTRACTION_ENGINE,
             "engine": request.app.state.config.CONTENT_EXTRACTION_ENGINE,
             "tika_server_url": request.app.state.config.TIKA_SERVER_URL,
             "tika_server_url": request.app.state.config.TIKA_SERVER_URL,
+            "docling_server_url": request.app.state.config.DOCLING_SERVER_URL,
             "document_intelligence_config": {
             "document_intelligence_config": {
                 "endpoint": request.app.state.config.DOCUMENT_INTELLIGENCE_ENDPOINT,
                 "endpoint": request.app.state.config.DOCUMENT_INTELLIGENCE_ENDPOINT,
                 "key": request.app.state.config.DOCUMENT_INTELLIGENCE_KEY,
                 "key": request.app.state.config.DOCUMENT_INTELLIGENCE_KEY,
@@ -994,6 +1000,7 @@ def process_file(
                 loader = Loader(
                 loader = Loader(
                     engine=request.app.state.config.CONTENT_EXTRACTION_ENGINE,
                     engine=request.app.state.config.CONTENT_EXTRACTION_ENGINE,
                     TIKA_SERVER_URL=request.app.state.config.TIKA_SERVER_URL,
                     TIKA_SERVER_URL=request.app.state.config.TIKA_SERVER_URL,
+                    DOCLING_SERVER_URL=request.app.state.config.DOCLING_SERVER_URL,
                     PDF_EXTRACT_IMAGES=request.app.state.config.PDF_EXTRACT_IMAGES,
                     PDF_EXTRACT_IMAGES=request.app.state.config.PDF_EXTRACT_IMAGES,
                     DOCUMENT_INTELLIGENCE_ENDPOINT=request.app.state.config.DOCUMENT_INTELLIGENCE_ENDPOINT,
                     DOCUMENT_INTELLIGENCE_ENDPOINT=request.app.state.config.DOCUMENT_INTELLIGENCE_ENDPOINT,
                     DOCUMENT_INTELLIGENCE_KEY=request.app.state.config.DOCUMENT_INTELLIGENCE_KEY,
                     DOCUMENT_INTELLIGENCE_KEY=request.app.state.config.DOCUMENT_INTELLIGENCE_KEY,

+ 13 - 5
backend/open_webui/routers/users.py

@@ -2,6 +2,7 @@ import logging
 from typing import Optional
 from typing import Optional
 
 
 from open_webui.models.auths import Auths
 from open_webui.models.auths import Auths
+from open_webui.models.groups import Groups
 from open_webui.models.chats import Chats
 from open_webui.models.chats import Chats
 from open_webui.models.users import (
 from open_webui.models.users import (
     UserModel,
     UserModel,
@@ -17,7 +18,10 @@ from open_webui.constants import ERROR_MESSAGES
 from open_webui.env import SRC_LOG_LEVELS
 from open_webui.env import SRC_LOG_LEVELS
 from fastapi import APIRouter, Depends, HTTPException, Request, status
 from fastapi import APIRouter, Depends, HTTPException, Request, status
 from pydantic import BaseModel
 from pydantic import BaseModel
+
 from open_webui.utils.auth import get_admin_user, get_password_hash, get_verified_user
 from open_webui.utils.auth import get_admin_user, get_password_hash, get_verified_user
+from open_webui.utils.access_control import get_permissions
+
 
 
 log = logging.getLogger(__name__)
 log = logging.getLogger(__name__)
 log.setLevel(SRC_LOG_LEVELS["MODELS"])
 log.setLevel(SRC_LOG_LEVELS["MODELS"])
@@ -45,7 +49,7 @@ async def get_users(
 
 
 @router.get("/groups")
 @router.get("/groups")
 async def get_user_groups(user=Depends(get_verified_user)):
 async def get_user_groups(user=Depends(get_verified_user)):
-    return Users.get_user_groups(user.id)
+    return Groups.get_groups_by_member_id(user.id)
 
 
 
 
 ############################
 ############################
@@ -54,8 +58,12 @@ async def get_user_groups(user=Depends(get_verified_user)):
 
 
 
 
 @router.get("/permissions")
 @router.get("/permissions")
-async def get_user_permissisions(user=Depends(get_verified_user)):
-    return Users.get_user_groups(user.id)
+async def get_user_permissisions(request: Request, user=Depends(get_verified_user)):
+    user_permissions = get_permissions(
+        user.id, request.app.state.config.USER_PERMISSIONS
+    )
+
+    return user_permissions
 
 
 
 
 ############################
 ############################
@@ -89,7 +97,7 @@ class UserPermissions(BaseModel):
 
 
 
 
 @router.get("/default/permissions", response_model=UserPermissions)
 @router.get("/default/permissions", response_model=UserPermissions)
-async def get_user_permissions(request: Request, user=Depends(get_admin_user)):
+async def get_default_user_permissions(request: Request, user=Depends(get_admin_user)):
     return {
     return {
         "workspace": WorkspacePermissions(
         "workspace": WorkspacePermissions(
             **request.app.state.config.USER_PERMISSIONS.get("workspace", {})
             **request.app.state.config.USER_PERMISSIONS.get("workspace", {})
@@ -104,7 +112,7 @@ async def get_user_permissions(request: Request, user=Depends(get_admin_user)):
 
 
 
 
 @router.post("/default/permissions")
 @router.post("/default/permissions")
-async def update_user_permissions(
+async def update_default_user_permissions(
     request: Request, form_data: UserPermissions, user=Depends(get_admin_user)
     request: Request, form_data: UserPermissions, user=Depends(get_admin_user)
 ):
 ):
     request.app.state.config.USER_PERMISSIONS = form_data.model_dump()
     request.app.state.config.USER_PERMISSIONS = form_data.model_dump()

+ 45 - 36
backend/open_webui/socket/main.py

@@ -269,11 +269,19 @@ async def disconnect(sid):
         # print(f"Unknown session ID {sid} disconnected")
         # print(f"Unknown session ID {sid} disconnected")
 
 
 
 
-def get_event_emitter(request_info):
+def get_event_emitter(request_info, update_db=True):
     async def __event_emitter__(event_data):
     async def __event_emitter__(event_data):
         user_id = request_info["user_id"]
         user_id = request_info["user_id"]
+
         session_ids = list(
         session_ids = list(
-            set(USER_POOL.get(user_id, []) + [request_info["session_id"]])
+            set(
+                USER_POOL.get(user_id, [])
+                + (
+                    [request_info.get("session_id")]
+                    if request_info.get("session_id")
+                    else []
+                )
+            )
         )
         )
 
 
         for session_id in session_ids:
         for session_id in session_ids:
@@ -287,40 +295,41 @@ def get_event_emitter(request_info):
                 to=session_id,
                 to=session_id,
             )
             )
 
 
-        if "type" in event_data and event_data["type"] == "status":
-            Chats.add_message_status_to_chat_by_id_and_message_id(
-                request_info["chat_id"],
-                request_info["message_id"],
-                event_data.get("data", {}),
-            )
-
-        if "type" in event_data and event_data["type"] == "message":
-            message = Chats.get_message_by_id_and_message_id(
-                request_info["chat_id"],
-                request_info["message_id"],
-            )
-
-            content = message.get("content", "")
-            content += event_data.get("data", {}).get("content", "")
-
-            Chats.upsert_message_to_chat_by_id_and_message_id(
-                request_info["chat_id"],
-                request_info["message_id"],
-                {
-                    "content": content,
-                },
-            )
-
-        if "type" in event_data and event_data["type"] == "replace":
-            content = event_data.get("data", {}).get("content", "")
-
-            Chats.upsert_message_to_chat_by_id_and_message_id(
-                request_info["chat_id"],
-                request_info["message_id"],
-                {
-                    "content": content,
-                },
-            )
+        if update_db:
+            if "type" in event_data and event_data["type"] == "status":
+                Chats.add_message_status_to_chat_by_id_and_message_id(
+                    request_info["chat_id"],
+                    request_info["message_id"],
+                    event_data.get("data", {}),
+                )
+
+            if "type" in event_data and event_data["type"] == "message":
+                message = Chats.get_message_by_id_and_message_id(
+                    request_info["chat_id"],
+                    request_info["message_id"],
+                )
+
+                content = message.get("content", "")
+                content += event_data.get("data", {}).get("content", "")
+
+                Chats.upsert_message_to_chat_by_id_and_message_id(
+                    request_info["chat_id"],
+                    request_info["message_id"],
+                    {
+                        "content": content,
+                    },
+                )
+
+            if "type" in event_data and event_data["type"] == "replace":
+                content = event_data.get("data", {}).get("content", "")
+
+                Chats.upsert_message_to_chat_by_id_and_message_id(
+                    request_info["chat_id"],
+                    request_info["message_id"],
+                    {
+                        "content": content,
+                    },
+                )
 
 
     return __event_emitter__
     return __event_emitter__
 
 

+ 1 - 0
backend/open_webui/utils/filter.py

@@ -106,6 +106,7 @@ async def process_filter_functions(
 
 
     # Handle file cleanup for inlet
     # Handle file cleanup for inlet
     if skip_files and "files" in form_data.get("metadata", {}):
     if skip_files and "files" in form_data.get("metadata", {}):
+        del form_data["files"]
         del form_data["metadata"]["files"]
         del form_data["metadata"]["files"]
 
 
     return form_data, {}
     return form_data, {}

+ 76 - 25
backend/open_webui/utils/middleware.py

@@ -100,7 +100,7 @@ log.setLevel(SRC_LOG_LEVELS["MAIN"])
 
 
 
 
 async def chat_completion_tools_handler(
 async def chat_completion_tools_handler(
-    request: Request, body: dict, user: UserModel, models, tools
+    request: Request, body: dict, extra_params: dict, user: UserModel, models, tools
 ) -> tuple[dict, dict]:
 ) -> tuple[dict, dict]:
     async def get_content_from_response(response) -> Optional[str]:
     async def get_content_from_response(response) -> Optional[str]:
         content = None
         content = None
@@ -135,6 +135,9 @@ async def chat_completion_tools_handler(
             "metadata": {"task": str(TASKS.FUNCTION_CALLING)},
             "metadata": {"task": str(TASKS.FUNCTION_CALLING)},
         }
         }
 
 
+    event_caller = extra_params["__event_call__"]
+    metadata = extra_params["__metadata__"]
+
     task_model_id = get_task_model_id(
     task_model_id = get_task_model_id(
         body["model"],
         body["model"],
         request.app.state.config.TASK_MODEL,
         request.app.state.config.TASK_MODEL,
@@ -189,19 +192,33 @@ async def chat_completion_tools_handler(
                 tool_function_params = tool_call.get("parameters", {})
                 tool_function_params = tool_call.get("parameters", {})
 
 
                 try:
                 try:
-                    required_params = (
-                        tools[tool_function_name]
-                        .get("spec", {})
-                        .get("parameters", {})
-                        .get("required", [])
+                    tool = tools[tool_function_name]
+
+                    spec = tool.get("spec", {})
+                    allowed_params = (
+                        spec.get("parameters", {}).get("properties", {}).keys()
                     )
                     )
-                    tool_function = tools[tool_function_name]["callable"]
+                    tool_function = tool["callable"]
                     tool_function_params = {
                     tool_function_params = {
                         k: v
                         k: v
                         for k, v in tool_function_params.items()
                         for k, v in tool_function_params.items()
-                        if k in required_params
+                        if k in allowed_params
                     }
                     }
-                    tool_output = await tool_function(**tool_function_params)
+
+                    if tool.get("direct", False):
+                        tool_output = await tool_function(**tool_function_params)
+                    else:
+                        tool_output = await event_caller(
+                            {
+                                "type": "execute:tool",
+                                "data": {
+                                    "id": str(uuid4()),
+                                    "tool": tool,
+                                    "params": tool_function_params,
+                                    "session_id": metadata.get("session_id", None),
+                                },
+                            }
+                        )
 
 
                 except Exception as e:
                 except Exception as e:
                     tool_output = str(e)
                     tool_output = str(e)
@@ -767,12 +784,18 @@ async def process_chat_payload(request, form_data, user, metadata, model):
     }
     }
     form_data["metadata"] = metadata
     form_data["metadata"] = metadata
 
 
+    # Server side tools
     tool_ids = metadata.get("tool_ids", None)
     tool_ids = metadata.get("tool_ids", None)
+    # Client side tools
+    tool_specs = form_data.get("tool_specs", None)
+
     log.debug(f"{tool_ids=}")
     log.debug(f"{tool_ids=}")
+    log.debug(f"{tool_specs=}")
+
+    tools_dict = {}
 
 
     if tool_ids:
     if tool_ids:
-        # If tool_ids field is present, then get the tools
-        tools = get_tools(
+        tools_dict = get_tools(
             request,
             request,
             tool_ids,
             tool_ids,
             user,
             user,
@@ -783,20 +806,30 @@ async def process_chat_payload(request, form_data, user, metadata, model):
                 "__files__": metadata.get("files", []),
                 "__files__": metadata.get("files", []),
             },
             },
         )
         )
-        log.info(f"{tools=}")
+        log.info(f"{tools_dict=}")
+
+    if tool_specs:
+        for tool in tool_specs:
+            callable = tool.pop("callable", None)
+            tools_dict[tool["name"]] = {
+                "direct": True,
+                "callable": callable,
+                "spec": tool,
+            }
 
 
+    if tools_dict:
         if metadata.get("function_calling") == "native":
         if metadata.get("function_calling") == "native":
             # If the function calling is native, then call the tools function calling handler
             # If the function calling is native, then call the tools function calling handler
-            metadata["tools"] = tools
+            metadata["tools"] = tools_dict
             form_data["tools"] = [
             form_data["tools"] = [
                 {"type": "function", "function": tool.get("spec", {})}
                 {"type": "function", "function": tool.get("spec", {})}
-                for tool in tools.values()
+                for tool in tools_dict.values()
             ]
             ]
         else:
         else:
             # If the function calling is not native, then call the tools function calling handler
             # If the function calling is not native, then call the tools function calling handler
             try:
             try:
                 form_data, flags = await chat_completion_tools_handler(
                 form_data, flags = await chat_completion_tools_handler(
-                    request, form_data, user, models, tools
+                    request, form_data, extra_params, user, models, tools_dict
                 )
                 )
                 sources.extend(flags.get("sources", []))
                 sources.extend(flags.get("sources", []))
 
 
@@ -815,7 +848,7 @@ async def process_chat_payload(request, form_data, user, metadata, model):
         for source_idx, source in enumerate(sources):
         for source_idx, source in enumerate(sources):
             if "document" in source:
             if "document" in source:
                 for doc_idx, doc_context in enumerate(source["document"]):
                 for doc_idx, doc_context in enumerate(source["document"]):
-                    context_string += f"<source><source_id>{source_idx}</source_id><source_context>{doc_context}</source_context></source>\n"
+                    context_string += f"<source><source_id>{source_idx + 1}</source_id><source_context>{doc_context}</source_context></source>\n"
 
 
         context_string = context_string.strip()
         context_string = context_string.strip()
         prompt = get_last_user_message(form_data["messages"])
         prompt = get_last_user_message(form_data["messages"])
@@ -1082,8 +1115,6 @@ async def process_chat_response(
         for filter_id in get_sorted_filter_ids(model)
         for filter_id in get_sorted_filter_ids(model)
     ]
     ]
 
 
-    print(f"{filter_functions=}")
-
     # Streaming response
     # Streaming response
     if event_emitter and event_caller:
     if event_emitter and event_caller:
         task_id = str(uuid4())  # Create a unique task ID.
         task_id = str(uuid4())  # Create a unique task ID.
@@ -1563,7 +1594,9 @@ async def process_chat_response(
 
 
                                     value = delta.get("content")
                                     value = delta.get("content")
 
 
-                                    reasoning_content = delta.get("reasoning_content")
+                                    reasoning_content = delta.get(
+                                        "reasoning_content"
+                                    ) or delta.get("reasoning")
                                     if reasoning_content:
                                     if reasoning_content:
                                         if (
                                         if (
                                             not content_blocks
                                             not content_blocks
@@ -1766,18 +1799,36 @@ async def process_chat_response(
                             spec = tool.get("spec", {})
                             spec = tool.get("spec", {})
 
 
                             try:
                             try:
-                                required_params = spec.get("parameters", {}).get(
-                                    "required", []
+                                allowed_params = (
+                                    spec.get("parameters", {})
+                                    .get("properties", {})
+                                    .keys()
                                 )
                                 )
                                 tool_function = tool["callable"]
                                 tool_function = tool["callable"]
                                 tool_function_params = {
                                 tool_function_params = {
                                     k: v
                                     k: v
                                     for k, v in tool_function_params.items()
                                     for k, v in tool_function_params.items()
-                                    if k in required_params
+                                    if k in allowed_params
                                 }
                                 }
-                                tool_result = await tool_function(
-                                    **tool_function_params
-                                )
+
+                                if tool.get("direct", False):
+                                    tool_result = await tool_function(
+                                        **tool_function_params
+                                    )
+                                else:
+                                    tool_result = await event_caller(
+                                        {
+                                            "type": "execute:tool",
+                                            "data": {
+                                                "id": str(uuid4()),
+                                                "tool": tool,
+                                                "params": tool_function_params,
+                                                "session_id": metadata.get(
+                                                    "session_id", None
+                                                ),
+                                            },
+                                        }
+                                    )
                             except Exception as e:
                             except Exception as e:
                                 tool_result = str(e)
                                 tool_result = str(e)
 
 

+ 1 - 0
backend/open_webui/utils/models.py

@@ -49,6 +49,7 @@ async def get_all_base_models(request: Request, user: UserModel = None):
                 "created": int(time.time()),
                 "created": int(time.time()),
                 "owned_by": "ollama",
                 "owned_by": "ollama",
                 "ollama": model,
                 "ollama": model,
+                "tags": model.get("tags", []),
             }
             }
             for model in ollama_models["models"]
             for model in ollama_models["models"]
         ]
         ]

+ 8 - 5
backend/open_webui/utils/oauth.py

@@ -94,7 +94,7 @@ class OAuthManager:
             oauth_claim = auth_manager_config.OAUTH_ROLES_CLAIM
             oauth_claim = auth_manager_config.OAUTH_ROLES_CLAIM
             oauth_allowed_roles = auth_manager_config.OAUTH_ALLOWED_ROLES
             oauth_allowed_roles = auth_manager_config.OAUTH_ALLOWED_ROLES
             oauth_admin_roles = auth_manager_config.OAUTH_ADMIN_ROLES
             oauth_admin_roles = auth_manager_config.OAUTH_ADMIN_ROLES
-            oauth_roles = None
+            oauth_roles = []
             # Default/fallback role if no matching roles are found
             # Default/fallback role if no matching roles are found
             role = auth_manager_config.DEFAULT_USER_ROLE
             role = auth_manager_config.DEFAULT_USER_ROLE
 
 
@@ -104,7 +104,7 @@ class OAuthManager:
                 nested_claims = oauth_claim.split(".")
                 nested_claims = oauth_claim.split(".")
                 for nested_claim in nested_claims:
                 for nested_claim in nested_claims:
                     claim_data = claim_data.get(nested_claim, {})
                     claim_data = claim_data.get(nested_claim, {})
-                oauth_roles = claim_data if isinstance(claim_data, list) else None
+                oauth_roles = claim_data if isinstance(claim_data, list) else []
 
 
             log.debug(f"Oauth Roles claim: {oauth_claim}")
             log.debug(f"Oauth Roles claim: {oauth_claim}")
             log.debug(f"User roles from oauth: {oauth_roles}")
             log.debug(f"User roles from oauth: {oauth_roles}")
@@ -140,6 +140,7 @@ class OAuthManager:
         log.debug("Running OAUTH Group management")
         log.debug("Running OAUTH Group management")
         oauth_claim = auth_manager_config.OAUTH_GROUPS_CLAIM
         oauth_claim = auth_manager_config.OAUTH_GROUPS_CLAIM
 
 
+        user_oauth_groups = []
         # Nested claim search for groups claim
         # Nested claim search for groups claim
         if oauth_claim:
         if oauth_claim:
             claim_data = user_data
             claim_data = user_data
@@ -160,7 +161,7 @@ class OAuthManager:
 
 
         # Remove groups that user is no longer a part of
         # Remove groups that user is no longer a part of
         for group_model in user_current_groups:
         for group_model in user_current_groups:
-            if group_model.name not in user_oauth_groups:
+            if user_oauth_groups and group_model.name not in user_oauth_groups:
                 # Remove group from user
                 # Remove group from user
                 log.debug(
                 log.debug(
                     f"Removing user from group {group_model.name} as it is no longer in their oauth groups"
                     f"Removing user from group {group_model.name} as it is no longer in their oauth groups"
@@ -186,8 +187,10 @@ class OAuthManager:
 
 
         # Add user to new groups
         # Add user to new groups
         for group_model in all_available_groups:
         for group_model in all_available_groups:
-            if group_model.name in user_oauth_groups and not any(
-                gm.name == group_model.name for gm in user_current_groups
+            if (
+                user_oauth_groups
+                and group_model.name in user_oauth_groups
+                and not any(gm.name == group_model.name for gm in user_current_groups)
             ):
             ):
                 # Add user to group
                 # Add user to group
                 log.debug(
                 log.debug(

+ 10 - 0
backend/open_webui/utils/payload.py

@@ -110,6 +110,11 @@ def apply_model_params_to_body_ollama(params: dict, form_data: dict) -> dict:
         "num_thread": int,
         "num_thread": int,
     }
     }
 
 
+    # Extract keep_alive from options if it exists
+    if "options" in form_data and "keep_alive" in form_data["options"]:
+        form_data["keep_alive"] = form_data["options"]["keep_alive"]
+        del form_data["options"]["keep_alive"]
+
     return apply_model_params_to_body(params, form_data, mappings)
     return apply_model_params_to_body(params, form_data, mappings)
 
 
 
 
@@ -231,6 +236,11 @@ def convert_payload_openai_to_ollama(openai_payload: dict) -> dict:
                 "system"
                 "system"
             ]  # To prevent Ollama warning of invalid option provided
             ]  # To prevent Ollama warning of invalid option provided
 
 
+        # Extract keep_alive from options if it exists
+        if "keep_alive" in ollama_options:
+            ollama_payload["keep_alive"] = ollama_options["keep_alive"]
+            del ollama_options["keep_alive"]
+
     # If there is the "stop" parameter in the openai_payload, remap it to the ollama_payload.options
     # If there is the "stop" parameter in the openai_payload, remap it to the ollama_payload.options
     if "stop" in openai_payload:
     if "stop" in openai_payload:
         ollama_options = ollama_payload.get("options", {})
         ollama_options = ollama_payload.get("options", {})

+ 10 - 6
backend/open_webui/utils/plugin.py

@@ -7,7 +7,7 @@ import types
 import tempfile
 import tempfile
 import logging
 import logging
 
 
-from open_webui.env import SRC_LOG_LEVELS
+from open_webui.env import SRC_LOG_LEVELS, PIP_OPTIONS, PIP_PACKAGE_INDEX_OPTIONS
 from open_webui.models.functions import Functions
 from open_webui.models.functions import Functions
 from open_webui.models.tools import Tools
 from open_webui.models.tools import Tools
 
 
@@ -165,15 +165,19 @@ def load_function_module_by_id(function_id, content=None):
         os.unlink(temp_file.name)
         os.unlink(temp_file.name)
 
 
 
 
-def install_frontmatter_requirements(requirements):
+def install_frontmatter_requirements(requirements: str):
     if requirements:
     if requirements:
         try:
         try:
             req_list = [req.strip() for req in requirements.split(",")]
             req_list = [req.strip() for req in requirements.split(",")]
-            for req in req_list:
-                log.info(f"Installing requirement: {req}")
-                subprocess.check_call([sys.executable, "-m", "pip", "install", req])
+            log.info(f"Installing requirements: {' '.join(req_list)}")
+            subprocess.check_call(
+                [sys.executable, "-m", "pip", "install"]
+                + PIP_OPTIONS
+                + req_list
+                + PIP_PACKAGE_INDEX_OPTIONS
+            )
         except Exception as e:
         except Exception as e:
-            log.error(f"Error installing package: {req}")
+            log.error(f"Error installing packages: {' '.join(req_list)}")
             raise e
             raise e
 
 
     else:
     else:

+ 0 - 0
backend/open_webui/utils/telemetry/__init__.py


+ 26 - 0
backend/open_webui/utils/telemetry/constants.py

@@ -0,0 +1,26 @@
+from opentelemetry.semconv.trace import SpanAttributes as _SpanAttributes
+
+# Span Tags
+SPAN_DB_TYPE = "mysql"
+SPAN_REDIS_TYPE = "redis"
+SPAN_DURATION = "duration"
+SPAN_SQL_STR = "sql"
+SPAN_SQL_EXPLAIN = "explain"
+SPAN_ERROR_TYPE = "error"
+
+
+class SpanAttributes(_SpanAttributes):
+    """
+    Span Attributes
+    """
+
+    DB_INSTANCE = "db.instance"
+    DB_TYPE = "db.type"
+    DB_IP = "db.ip"
+    DB_PORT = "db.port"
+    ERROR_KIND = "error.kind"
+    ERROR_OBJECT = "error.object"
+    ERROR_MESSAGE = "error.message"
+    RESULT_CODE = "result.code"
+    RESULT_MESSAGE = "result.message"
+    RESULT_ERRORS = "result.errors"

+ 31 - 0
backend/open_webui/utils/telemetry/exporters.py

@@ -0,0 +1,31 @@
+import threading
+
+from opentelemetry.sdk.trace import ReadableSpan
+from opentelemetry.sdk.trace.export import BatchSpanProcessor
+
+
+class LazyBatchSpanProcessor(BatchSpanProcessor):
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+        self.done = True
+        with self.condition:
+            self.condition.notify_all()
+        self.worker_thread.join()
+        self.done = False
+        self.worker_thread = None
+
+    def on_end(self, span: ReadableSpan) -> None:
+        if self.worker_thread is None:
+            self.worker_thread = threading.Thread(
+                name=self.__class__.__name__, target=self.worker, daemon=True
+            )
+            self.worker_thread.start()
+        super().on_end(span)
+
+    def shutdown(self) -> None:
+        self.done = True
+        with self.condition:
+            self.condition.notify_all()
+        if self.worker_thread:
+            self.worker_thread.join()
+        self.span_exporter.shutdown()

+ 202 - 0
backend/open_webui/utils/telemetry/instrumentors.py

@@ -0,0 +1,202 @@
+import logging
+import traceback
+from typing import Collection, Union
+
+from aiohttp import (
+    TraceRequestStartParams,
+    TraceRequestEndParams,
+    TraceRequestExceptionParams,
+)
+from chromadb.telemetry.opentelemetry.fastapi import instrument_fastapi
+from fastapi import FastAPI
+from opentelemetry.instrumentation.httpx import (
+    HTTPXClientInstrumentor,
+    RequestInfo,
+    ResponseInfo,
+)
+from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
+from opentelemetry.instrumentation.logging import LoggingInstrumentor
+from opentelemetry.instrumentation.redis import RedisInstrumentor
+from opentelemetry.instrumentation.requests import RequestsInstrumentor
+from opentelemetry.instrumentation.sqlalchemy import SQLAlchemyInstrumentor
+from opentelemetry.instrumentation.aiohttp_client import AioHttpClientInstrumentor
+from opentelemetry.trace import Span, StatusCode
+from redis import Redis
+from requests import PreparedRequest, Response
+from sqlalchemy import Engine
+from fastapi import status
+
+from open_webui.utils.telemetry.constants import SPAN_REDIS_TYPE, SpanAttributes
+
+from open_webui.env import SRC_LOG_LEVELS
+
+logger = logging.getLogger(__name__)
+logger.setLevel(SRC_LOG_LEVELS["MAIN"])
+
+
+def requests_hook(span: Span, request: PreparedRequest):
+    """
+    Http Request Hook
+    """
+
+    span.update_name(f"{request.method} {request.url}")
+    span.set_attributes(
+        attributes={
+            SpanAttributes.HTTP_URL: request.url,
+            SpanAttributes.HTTP_METHOD: request.method,
+        }
+    )
+
+
+def response_hook(span: Span, request: PreparedRequest, response: Response):
+    """
+    HTTP Response Hook
+    """
+
+    span.set_attributes(
+        attributes={
+            SpanAttributes.HTTP_STATUS_CODE: response.status_code,
+        }
+    )
+    span.set_status(StatusCode.ERROR if response.status_code >= 400 else StatusCode.OK)
+
+
+def redis_request_hook(span: Span, instance: Redis, args, kwargs):
+    """
+    Redis Request Hook
+    """
+
+    try:
+        connection_kwargs: dict = instance.connection_pool.connection_kwargs
+        host = connection_kwargs.get("host")
+        port = connection_kwargs.get("port")
+        db = connection_kwargs.get("db")
+        span.set_attributes(
+            {
+                SpanAttributes.DB_INSTANCE: f"{host}/{db}",
+                SpanAttributes.DB_NAME: f"{host}/{db}",
+                SpanAttributes.DB_TYPE: SPAN_REDIS_TYPE,
+                SpanAttributes.DB_PORT: port,
+                SpanAttributes.DB_IP: host,
+                SpanAttributes.DB_STATEMENT: " ".join([str(i) for i in args]),
+                SpanAttributes.DB_OPERATION: str(args[0]),
+            }
+        )
+    except Exception:  # pylint: disable=W0718
+        logger.error(traceback.format_exc())
+
+
+def httpx_request_hook(span: Span, request: RequestInfo):
+    """
+    HTTPX Request Hook
+    """
+
+    span.update_name(f"{request.method.decode()} {str(request.url)}")
+    span.set_attributes(
+        attributes={
+            SpanAttributes.HTTP_URL: str(request.url),
+            SpanAttributes.HTTP_METHOD: request.method.decode(),
+        }
+    )
+
+
+def httpx_response_hook(span: Span, request: RequestInfo, response: ResponseInfo):
+    """
+    HTTPX Response Hook
+    """
+
+    span.set_attribute(SpanAttributes.HTTP_STATUS_CODE, response.status_code)
+    span.set_status(
+        StatusCode.ERROR
+        if response.status_code >= status.HTTP_400_BAD_REQUEST
+        else StatusCode.OK
+    )
+
+
+async def httpx_async_request_hook(span: Span, request: RequestInfo):
+    """
+    Async Request Hook
+    """
+
+    httpx_request_hook(span, request)
+
+
+async def httpx_async_response_hook(
+    span: Span, request: RequestInfo, response: ResponseInfo
+):
+    """
+    Async Response Hook
+    """
+
+    httpx_response_hook(span, request, response)
+
+
+def aiohttp_request_hook(span: Span, request: TraceRequestStartParams):
+    """
+    Aiohttp Request Hook
+    """
+
+    span.update_name(f"{request.method} {str(request.url)}")
+    span.set_attributes(
+        attributes={
+            SpanAttributes.HTTP_URL: str(request.url),
+            SpanAttributes.HTTP_METHOD: request.method,
+        }
+    )
+
+
+def aiohttp_response_hook(
+    span: Span, response: Union[TraceRequestExceptionParams, TraceRequestEndParams]
+):
+    """
+    Aiohttp Response Hook
+    """
+
+    if isinstance(response, TraceRequestEndParams):
+        span.set_attribute(SpanAttributes.HTTP_STATUS_CODE, response.response.status)
+        span.set_status(
+            StatusCode.ERROR
+            if response.response.status >= status.HTTP_400_BAD_REQUEST
+            else StatusCode.OK
+        )
+    elif isinstance(response, TraceRequestExceptionParams):
+        span.set_status(StatusCode.ERROR)
+        span.set_attribute(SpanAttributes.ERROR_MESSAGE, str(response.exception))
+
+
+class Instrumentor(BaseInstrumentor):
+    """
+    Instrument OT
+    """
+
+    def __init__(self, app: FastAPI, db_engine: Engine):
+        self.app = app
+        self.db_engine = db_engine
+
+    def instrumentation_dependencies(self) -> Collection[str]:
+        return []
+
+    def _instrument(self, **kwargs):
+        instrument_fastapi(app=self.app)
+        SQLAlchemyInstrumentor().instrument(engine=self.db_engine)
+        RedisInstrumentor().instrument(request_hook=redis_request_hook)
+        RequestsInstrumentor().instrument(
+            request_hook=requests_hook, response_hook=response_hook
+        )
+        LoggingInstrumentor().instrument()
+        HTTPXClientInstrumentor().instrument(
+            request_hook=httpx_request_hook,
+            response_hook=httpx_response_hook,
+            async_request_hook=httpx_async_request_hook,
+            async_response_hook=httpx_async_response_hook,
+        )
+        AioHttpClientInstrumentor().instrument(
+            request_hook=aiohttp_request_hook,
+            response_hook=aiohttp_response_hook,
+        )
+
+    def _uninstrument(self, **kwargs):
+        if getattr(self, "instrumentors", None) is None:
+            return
+        for instrumentor in self.instrumentors:
+            instrumentor.uninstrument()

+ 23 - 0
backend/open_webui/utils/telemetry/setup.py

@@ -0,0 +1,23 @@
+from fastapi import FastAPI
+from opentelemetry import trace
+from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter
+from opentelemetry.sdk.resources import SERVICE_NAME, Resource
+from opentelemetry.sdk.trace import TracerProvider
+from sqlalchemy import Engine
+
+from open_webui.utils.telemetry.exporters import LazyBatchSpanProcessor
+from open_webui.utils.telemetry.instrumentors import Instrumentor
+from open_webui.env import OTEL_SERVICE_NAME, OTEL_EXPORTER_OTLP_ENDPOINT
+
+
+def setup(app: FastAPI, db_engine: Engine):
+    # set up trace
+    trace.set_tracer_provider(
+        TracerProvider(
+            resource=Resource.create(attributes={SERVICE_NAME: OTEL_SERVICE_NAME})
+        )
+    )
+    # otlp export
+    exporter = OTLPSpanExporter(endpoint=OTEL_EXPORTER_OTLP_ENDPOINT)
+    trace.get_tracer_provider().add_span_processor(LazyBatchSpanProcessor(exporter))
+    Instrumentor(app=app, db_engine=db_engine).instrument()

+ 3 - 0
backend/open_webui/utils/tools.py

@@ -1,6 +1,9 @@
 import inspect
 import inspect
 import logging
 import logging
 import re
 import re
+import inspect
+import uuid
+
 from typing import Any, Awaitable, Callable, get_type_hints
 from typing import Any, Awaitable, Callable, get_type_hints
 from functools import update_wrapper, partial
 from functools import update_wrapper, partial
 
 

+ 16 - 2
backend/requirements.txt

@@ -37,13 +37,13 @@ asgiref==3.8.1
 # AI libraries
 # AI libraries
 openai
 openai
 anthropic
 anthropic
-google-generativeai==0.7.2
+google-generativeai==0.8.4
 tiktoken
 tiktoken
 
 
 langchain==0.3.19
 langchain==0.3.19
 langchain-community==0.3.18
 langchain-community==0.3.18
 
 
-fake-useragent==1.5.1
+fake-useragent==2.1.0
 chromadb==0.6.2
 chromadb==0.6.2
 pymilvus==2.5.0
 pymilvus==2.5.0
 qdrant-client~=1.12.0
 qdrant-client~=1.12.0
@@ -78,6 +78,7 @@ sentencepiece
 soundfile==0.13.1
 soundfile==0.13.1
 azure-ai-documentintelligence==1.0.0
 azure-ai-documentintelligence==1.0.0
 
 
+pillow==11.1.0
 opencv-python-headless==4.11.0.86
 opencv-python-headless==4.11.0.86
 rapidocr-onnxruntime==1.3.24
 rapidocr-onnxruntime==1.3.24
 rank-bm25==0.2.2
 rank-bm25==0.2.2
@@ -118,3 +119,16 @@ ldap3==2.9.1
 
 
 ## Firecrawl
 ## Firecrawl
 firecrawl-py==1.12.0
 firecrawl-py==1.12.0
+
+## Trace
+opentelemetry-api==1.30.0
+opentelemetry-sdk==1.30.0
+opentelemetry-exporter-otlp==1.30.0
+opentelemetry-instrumentation==0.51b0
+opentelemetry-instrumentation-fastapi==0.51b0
+opentelemetry-instrumentation-sqlalchemy==0.51b0
+opentelemetry-instrumentation-redis==0.51b0
+opentelemetry-instrumentation-requests==0.51b0
+opentelemetry-instrumentation-logging==0.51b0
+opentelemetry-instrumentation-httpx==0.51b0
+opentelemetry-instrumentation-aiohttp-client==0.51b0

+ 2 - 1
backend/start_windows.bat

@@ -41,4 +41,5 @@ IF "%WEBUI_SECRET_KEY%%WEBUI_JWT_SECRET_KEY%" == " " (
 
 
 :: Execute uvicorn
 :: Execute uvicorn
 SET "WEBUI_SECRET_KEY=%WEBUI_SECRET_KEY%"
 SET "WEBUI_SECRET_KEY=%WEBUI_SECRET_KEY%"
-uvicorn open_webui.main:app --host "%HOST%" --port "%PORT%" --forwarded-allow-ips '*'
+uvicorn open_webui.main:app --host "%HOST%" --port "%PORT%" --forwarded-allow-ips '*' --ws auto
+:: For ssl user uvicorn open_webui.main:app --host "%HOST%" --port "%PORT%" --forwarded-allow-ips '*' --ssl-keyfile "key.pem" --ssl-certfile "cert.pem" --ws auto

+ 18 - 8
package-lock.json

@@ -37,6 +37,7 @@
 				"file-saver": "^2.0.5",
 				"file-saver": "^2.0.5",
 				"fuse.js": "^7.0.0",
 				"fuse.js": "^7.0.0",
 				"highlight.js": "^11.9.0",
 				"highlight.js": "^11.9.0",
+				"html2canvas-pro": "^1.5.8",
 				"i18next": "^23.10.0",
 				"i18next": "^23.10.0",
 				"i18next-browser-languagedetector": "^7.2.0",
 				"i18next-browser-languagedetector": "^7.2.0",
 				"i18next-resources-to-backend": "^1.2.0",
 				"i18next-resources-to-backend": "^1.2.0",
@@ -59,7 +60,7 @@
 				"prosemirror-schema-list": "^1.4.1",
 				"prosemirror-schema-list": "^1.4.1",
 				"prosemirror-state": "^1.4.3",
 				"prosemirror-state": "^1.4.3",
 				"prosemirror-view": "^1.34.3",
 				"prosemirror-view": "^1.34.3",
-				"pyodide": "^0.27.2",
+				"pyodide": "^0.27.3",
 				"socket.io-client": "^4.2.0",
 				"socket.io-client": "^4.2.0",
 				"sortablejs": "^1.15.2",
 				"sortablejs": "^1.15.2",
 				"svelte-sonner": "^0.3.19",
 				"svelte-sonner": "^0.3.19",
@@ -3884,7 +3885,6 @@
 			"resolved": "https://registry.npmjs.org/base64-arraybuffer/-/base64-arraybuffer-1.0.2.tgz",
 			"resolved": "https://registry.npmjs.org/base64-arraybuffer/-/base64-arraybuffer-1.0.2.tgz",
 			"integrity": "sha512-I3yl4r9QB5ZRY3XuJVEPfc2XhZO6YweFPI+UovAzn+8/hb3oJ6lnysaFcjVpkCPfVWFUDvoZ8kmVDP7WyRtYtQ==",
 			"integrity": "sha512-I3yl4r9QB5ZRY3XuJVEPfc2XhZO6YweFPI+UovAzn+8/hb3oJ6lnysaFcjVpkCPfVWFUDvoZ8kmVDP7WyRtYtQ==",
 			"license": "MIT",
 			"license": "MIT",
-			"optional": true,
 			"engines": {
 			"engines": {
 				"node": ">= 0.6.0"
 				"node": ">= 0.6.0"
 			}
 			}
@@ -4759,7 +4759,6 @@
 			"resolved": "https://registry.npmjs.org/css-line-break/-/css-line-break-2.1.0.tgz",
 			"resolved": "https://registry.npmjs.org/css-line-break/-/css-line-break-2.1.0.tgz",
 			"integrity": "sha512-FHcKFCZcAha3LwfVBhCQbW2nCNbkZXn7KVUJcsT5/P8YmfsVja0FMPJr0B903j/E69HUphKiV9iQArX8SDYA4w==",
 			"integrity": "sha512-FHcKFCZcAha3LwfVBhCQbW2nCNbkZXn7KVUJcsT5/P8YmfsVja0FMPJr0B903j/E69HUphKiV9iQArX8SDYA4w==",
 			"license": "MIT",
 			"license": "MIT",
-			"optional": true,
 			"dependencies": {
 			"dependencies": {
 				"utrie": "^1.0.2"
 				"utrie": "^1.0.2"
 			}
 			}
@@ -6842,6 +6841,19 @@
 				"node": ">=8.0.0"
 				"node": ">=8.0.0"
 			}
 			}
 		},
 		},
+		"node_modules/html2canvas-pro": {
+			"version": "1.5.8",
+			"resolved": "https://registry.npmjs.org/html2canvas-pro/-/html2canvas-pro-1.5.8.tgz",
+			"integrity": "sha512-bVGAU7IvhBwBlRAmX6QhekX8lsaxmYoF6zIwf/HNlHscjx+KN8jw/U4PQRYqeEVm9+m13hcS1l5ChJB9/e29Lw==",
+			"license": "MIT",
+			"dependencies": {
+				"css-line-break": "^2.1.0",
+				"text-segmentation": "^1.0.3"
+			},
+			"engines": {
+				"node": ">=16.0.0"
+			}
+		},
 		"node_modules/htmlparser2": {
 		"node_modules/htmlparser2": {
 			"version": "8.0.2",
 			"version": "8.0.2",
 			"resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-8.0.2.tgz",
 			"resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-8.0.2.tgz",
@@ -9802,9 +9814,9 @@
 			}
 			}
 		},
 		},
 		"node_modules/pyodide": {
 		"node_modules/pyodide": {
-			"version": "0.27.2",
-			"resolved": "https://registry.npmjs.org/pyodide/-/pyodide-0.27.2.tgz",
-			"integrity": "sha512-sfA2kiUuQVRpWI4BYnU3sX5PaTTt/xrcVEmRzRcId8DzZXGGtPgCBC0gCqjUTUYSa8ofPaSjXmzESc86yvvCHg==",
+			"version": "0.27.3",
+			"resolved": "https://registry.npmjs.org/pyodide/-/pyodide-0.27.3.tgz",
+			"integrity": "sha512-6NwKEbPk0M3Wic2T1TCZijgZH9VE4RkHp1VGljS1sou0NjGdsmY2R/fG5oLmdDkjTRMI1iW7WYaY9pofX8gg1g==",
 			"license": "Apache-2.0",
 			"license": "Apache-2.0",
 			"dependencies": {
 			"dependencies": {
 				"ws": "^8.5.0"
 				"ws": "^8.5.0"
@@ -11472,7 +11484,6 @@
 			"resolved": "https://registry.npmjs.org/text-segmentation/-/text-segmentation-1.0.3.tgz",
 			"resolved": "https://registry.npmjs.org/text-segmentation/-/text-segmentation-1.0.3.tgz",
 			"integrity": "sha512-iOiPUo/BGnZ6+54OsWxZidGCsdU8YbE4PSpdPinp7DeMtUJNJBoJ/ouUSTJjHkh1KntHaltHl/gDs2FC4i5+Nw==",
 			"integrity": "sha512-iOiPUo/BGnZ6+54OsWxZidGCsdU8YbE4PSpdPinp7DeMtUJNJBoJ/ouUSTJjHkh1KntHaltHl/gDs2FC4i5+Nw==",
 			"license": "MIT",
 			"license": "MIT",
-			"optional": true,
 			"dependencies": {
 			"dependencies": {
 				"utrie": "^1.0.2"
 				"utrie": "^1.0.2"
 			}
 			}
@@ -11821,7 +11832,6 @@
 			"resolved": "https://registry.npmjs.org/utrie/-/utrie-1.0.2.tgz",
 			"resolved": "https://registry.npmjs.org/utrie/-/utrie-1.0.2.tgz",
 			"integrity": "sha512-1MLa5ouZiOmQzUbjbu9VmjLzn1QLXBhwpUa7kdLUQK+KQ5KA9I1vk5U4YHe/X2Ch7PYnJfWuWT+VbuxbGwljhw==",
 			"integrity": "sha512-1MLa5ouZiOmQzUbjbu9VmjLzn1QLXBhwpUa7kdLUQK+KQ5KA9I1vk5U4YHe/X2Ch7PYnJfWuWT+VbuxbGwljhw==",
 			"license": "MIT",
 			"license": "MIT",
-			"optional": true,
 			"dependencies": {
 			"dependencies": {
 				"base64-arraybuffer": "^1.0.2"
 				"base64-arraybuffer": "^1.0.2"
 			}
 			}

+ 2 - 1
package.json

@@ -80,6 +80,7 @@
 		"file-saver": "^2.0.5",
 		"file-saver": "^2.0.5",
 		"fuse.js": "^7.0.0",
 		"fuse.js": "^7.0.0",
 		"highlight.js": "^11.9.0",
 		"highlight.js": "^11.9.0",
+		"html2canvas-pro": "^1.5.8",
 		"i18next": "^23.10.0",
 		"i18next": "^23.10.0",
 		"i18next-browser-languagedetector": "^7.2.0",
 		"i18next-browser-languagedetector": "^7.2.0",
 		"i18next-resources-to-backend": "^1.2.0",
 		"i18next-resources-to-backend": "^1.2.0",
@@ -102,7 +103,7 @@
 		"prosemirror-schema-list": "^1.4.1",
 		"prosemirror-schema-list": "^1.4.1",
 		"prosemirror-state": "^1.4.3",
 		"prosemirror-state": "^1.4.3",
 		"prosemirror-view": "^1.34.3",
 		"prosemirror-view": "^1.34.3",
-		"pyodide": "^0.27.2",
+		"pyodide": "^0.27.3",
 		"socket.io-client": "^4.2.0",
 		"socket.io-client": "^4.2.0",
 		"sortablejs": "^1.15.2",
 		"sortablejs": "^1.15.2",
 		"svelte-sonner": "^0.3.19",
 		"svelte-sonner": "^0.3.19",

+ 2 - 1
pyproject.toml

@@ -51,7 +51,7 @@ dependencies = [
     "langchain==0.3.19",
     "langchain==0.3.19",
     "langchain-community==0.3.18",
     "langchain-community==0.3.18",
 
 
-    "fake-useragent==1.5.1",
+    "fake-useragent==2.1.0",
     "chromadb==0.6.2",
     "chromadb==0.6.2",
     "pymilvus==2.5.0",
     "pymilvus==2.5.0",
     "qdrant-client~=1.12.0",
     "qdrant-client~=1.12.0",
@@ -84,6 +84,7 @@ dependencies = [
     "soundfile==0.13.1",
     "soundfile==0.13.1",
     "azure-ai-documentintelligence==1.0.0",
     "azure-ai-documentintelligence==1.0.0",
 
 
+    "pillow==11.1.0",
     "opencv-python-headless==4.11.0.86",
     "opencv-python-headless==4.11.0.86",
     "rapidocr-onnxruntime==1.3.24",
     "rapidocr-onnxruntime==1.3.24",
     "rank-bm25==0.2.2",
     "rank-bm25==0.2.2",

+ 1 - 1
src/app.css

@@ -106,7 +106,7 @@ li p {
 }
 }
 
 
 ::-webkit-scrollbar {
 ::-webkit-scrollbar {
-	height: 0.4rem;
+	height: 0.8rem;
 	width: 0.4rem;
 	width: 0.4rem;
 }
 }
 
 

+ 7 - 0
src/lib/apis/index.ts

@@ -114,6 +114,13 @@ export const getModels = async (
 					}
 					}
 				}
 				}
 
 
+				const tags = apiConfig.tags;
+				if (tags) {
+					for (const model of models) {
+						model.tags = tags;
+					}
+				}
+
 				localModels = localModels.concat(models);
 				localModels = localModels.concat(models);
 			}
 			}
 		}
 		}

+ 32 - 4
src/lib/components/AddConnectionModal.svelte

@@ -14,6 +14,7 @@
 	import SensitiveInput from '$lib/components/common/SensitiveInput.svelte';
 	import SensitiveInput from '$lib/components/common/SensitiveInput.svelte';
 	import Tooltip from '$lib/components/common/Tooltip.svelte';
 	import Tooltip from '$lib/components/common/Tooltip.svelte';
 	import Switch from '$lib/components/common/Switch.svelte';
 	import Switch from '$lib/components/common/Switch.svelte';
+	import Tags from './common/Tags.svelte';
 
 
 	export let onSubmit: Function = () => {};
 	export let onSubmit: Function = () => {};
 	export let onDelete: Function = () => {};
 	export let onDelete: Function = () => {};
@@ -31,6 +32,7 @@
 
 
 	let prefixId = '';
 	let prefixId = '';
 	let enable = true;
 	let enable = true;
+	let tags = [];
 
 
 	let modelId = '';
 	let modelId = '';
 	let modelIds = [];
 	let modelIds = [];
@@ -77,9 +79,9 @@
 	const submitHandler = async () => {
 	const submitHandler = async () => {
 		loading = true;
 		loading = true;
 
 
-		if (!ollama && (!url || !key)) {
+		if (!ollama && !url) {
 			loading = false;
 			loading = false;
-			toast.error('URL and Key are required');
+			toast.error('URL is required');
 			return;
 			return;
 		}
 		}
 
 
@@ -88,6 +90,7 @@
 			key,
 			key,
 			config: {
 			config: {
 				enable: enable,
 				enable: enable,
+				tags: tags,
 				prefix_id: prefixId,
 				prefix_id: prefixId,
 				model_ids: modelIds
 				model_ids: modelIds
 			}
 			}
@@ -101,6 +104,7 @@
 		url = '';
 		url = '';
 		key = '';
 		key = '';
 		prefixId = '';
 		prefixId = '';
+		tags = [];
 		modelIds = [];
 		modelIds = [];
 	};
 	};
 
 
@@ -110,6 +114,7 @@
 			key = connection.key;
 			key = connection.key;
 
 
 			enable = connection.config?.enable ?? true;
 			enable = connection.config?.enable ?? true;
+			tags = connection.config?.tags ?? [];
 			prefixId = connection.config?.prefix_id ?? '';
 			prefixId = connection.config?.prefix_id ?? '';
 			modelIds = connection.config?.model_ids ?? [];
 			modelIds = connection.config?.model_ids ?? [];
 		}
 		}
@@ -179,7 +184,7 @@
 								</div>
 								</div>
 							</div>
 							</div>
 
 
-							<Tooltip content="Verify Connection" className="self-end -mb-1">
+							<Tooltip content={$i18n.t('Verify Connection')} className="self-end -mb-1">
 								<button
 								<button
 									class="self-center p-1 bg-transparent hover:bg-gray-100 dark:bg-gray-900 dark:hover:bg-gray-850 rounded-lg transition"
 									class="self-center p-1 bg-transparent hover:bg-gray-100 dark:bg-gray-900 dark:hover:bg-gray-850 rounded-lg transition"
 									on:click={() => {
 									on:click={() => {
@@ -218,7 +223,7 @@
 										className="w-full text-sm bg-transparent placeholder:text-gray-300 dark:placeholder:text-gray-700 outline-hidden"
 										className="w-full text-sm bg-transparent placeholder:text-gray-300 dark:placeholder:text-gray-700 outline-hidden"
 										bind:value={key}
 										bind:value={key}
 										placeholder={$i18n.t('API Key')}
 										placeholder={$i18n.t('API Key')}
-										required={!ollama}
+										required={false}
 									/>
 									/>
 								</div>
 								</div>
 							</div>
 							</div>
@@ -244,6 +249,29 @@
 							</div>
 							</div>
 						</div>
 						</div>
 
 
+						<div class="flex gap-2 mt-2">
+							<div class="flex flex-col w-full">
+								<div class=" mb-1.5 text-xs text-gray-500">{$i18n.t('Tags')}</div>
+
+								<div class="flex-1">
+									<Tags
+										bind:tags
+										on:add={(e) => {
+											tags = [
+												...tags,
+												{
+													name: e.detail
+												}
+											];
+										}}
+										on:delete={(e) => {
+											tags = tags.filter((tag) => tag.name !== e.detail);
+										}}
+									/>
+								</div>
+							</div>
+						</div>
+
 						<hr class=" border-gray-100 dark:border-gray-700/10 my-2.5 w-full" />
 						<hr class=" border-gray-100 dark:border-gray-700/10 my-2.5 w-full" />
 
 
 						<div class="flex flex-col w-full">
 						<div class="flex flex-col w-full">

+ 13 - 1
src/lib/components/admin/Settings/Connections/OllamaConnection.svelte

@@ -5,6 +5,7 @@
 	import Tooltip from '$lib/components/common/Tooltip.svelte';
 	import Tooltip from '$lib/components/common/Tooltip.svelte';
 	import SensitiveInput from '$lib/components/common/SensitiveInput.svelte';
 	import SensitiveInput from '$lib/components/common/SensitiveInput.svelte';
 	import AddConnectionModal from '$lib/components/AddConnectionModal.svelte';
 	import AddConnectionModal from '$lib/components/AddConnectionModal.svelte';
+	import ConfirmDialog from '$lib/components/common/ConfirmDialog.svelte';
 
 
 	import Cog6 from '$lib/components/icons/Cog6.svelte';
 	import Cog6 from '$lib/components/icons/Cog6.svelte';
 	import Wrench from '$lib/components/icons/Wrench.svelte';
 	import Wrench from '$lib/components/icons/Wrench.svelte';
@@ -20,6 +21,7 @@
 
 
 	let showManageModal = false;
 	let showManageModal = false;
 	let showConfigModal = false;
 	let showConfigModal = false;
+	let showDeleteConfirmDialog = false;
 </script>
 </script>
 
 
 <AddConnectionModal
 <AddConnectionModal
@@ -31,7 +33,9 @@
 		key: config?.key ?? '',
 		key: config?.key ?? '',
 		config: config
 		config: config
 	}}
 	}}
-	{onDelete}
+	onDelete={() => {
+		showDeleteConfirmDialog = true;
+	}}
 	onSubmit={(connection) => {
 	onSubmit={(connection) => {
 		url = connection.url;
 		url = connection.url;
 		config = { ...connection.config, key: connection.key };
 		config = { ...connection.config, key: connection.key };
@@ -39,6 +43,14 @@
 	}}
 	}}
 />
 />
 
 
+<ConfirmDialog
+	bind:show={showDeleteConfirmDialog}
+	on:confirm={() => {
+		onDelete();
+		showConfigModal = false;
+	}}
+/>
+
 <ManageOllamaModal bind:show={showManageModal} urlIdx={idx} />
 <ManageOllamaModal bind:show={showManageModal} urlIdx={idx} />
 
 
 <div class="flex gap-1.5">
 <div class="flex gap-1.5">

+ 12 - 1
src/lib/components/admin/Settings/Connections/OpenAIConnection.svelte

@@ -6,6 +6,7 @@
 	import SensitiveInput from '$lib/components/common/SensitiveInput.svelte';
 	import SensitiveInput from '$lib/components/common/SensitiveInput.svelte';
 	import Cog6 from '$lib/components/icons/Cog6.svelte';
 	import Cog6 from '$lib/components/icons/Cog6.svelte';
 	import AddConnectionModal from '$lib/components/AddConnectionModal.svelte';
 	import AddConnectionModal from '$lib/components/AddConnectionModal.svelte';
+	import ConfirmDialog from '$lib/components/common/ConfirmDialog.svelte';
 
 
 	import { connect } from 'socket.io-client';
 	import { connect } from 'socket.io-client';
 
 
@@ -19,8 +20,16 @@
 	export let config = {};
 	export let config = {};
 
 
 	let showConfigModal = false;
 	let showConfigModal = false;
+	let showDeleteConfirmDialog = false;
 </script>
 </script>
 
 
+<ConfirmDialog
+	bind:show={showDeleteConfirmDialog}
+	on:confirm={() => {
+		onDelete();
+	}}
+/>
+
 <AddConnectionModal
 <AddConnectionModal
 	edit
 	edit
 	bind:show={showConfigModal}
 	bind:show={showConfigModal}
@@ -29,7 +38,9 @@
 		key,
 		key,
 		config
 		config
 	}}
 	}}
-	{onDelete}
+	onDelete={() => {
+		showDeleteConfirmDialog = true;
+	}}
 	onSubmit={(connection) => {
 	onSubmit={(connection) => {
 		url = connection.url;
 		url = connection.url;
 		key = connection.key;
 		key = connection.key;

+ 31 - 4
src/lib/components/admin/Settings/Documents.svelte

@@ -49,6 +49,8 @@
 	let contentExtractionEngine = 'default';
 	let contentExtractionEngine = 'default';
 	let tikaServerUrl = '';
 	let tikaServerUrl = '';
 	let showTikaServerUrl = false;
 	let showTikaServerUrl = false;
+	let doclingServerUrl = '';
+	let showDoclingServerUrl = false;
 	let documentIntelligenceEndpoint = '';
 	let documentIntelligenceEndpoint = '';
 	let documentIntelligenceKey = '';
 	let documentIntelligenceKey = '';
 	let showDocumentIntelligenceConfig = false;
 	let showDocumentIntelligenceConfig = false;
@@ -176,6 +178,10 @@
 			toast.error($i18n.t('Tika Server URL required.'));
 			toast.error($i18n.t('Tika Server URL required.'));
 			return;
 			return;
 		}
 		}
+		if (contentExtractionEngine === 'docling' && doclingServerUrl === '') {
+			toast.error($i18n.t('Docling Server URL required.'));
+			return;
+		}
 		if (
 		if (
 			contentExtractionEngine === 'document_intelligence' &&
 			contentExtractionEngine === 'document_intelligence' &&
 			(documentIntelligenceEndpoint === '' || documentIntelligenceKey === '')
 			(documentIntelligenceEndpoint === '' || documentIntelligenceKey === '')
@@ -210,6 +216,7 @@
 			content_extraction: {
 			content_extraction: {
 				engine: contentExtractionEngine,
 				engine: contentExtractionEngine,
 				tika_server_url: tikaServerUrl,
 				tika_server_url: tikaServerUrl,
+				docling_server_url: doclingServerUrl,
 				document_intelligence_config: {
 				document_intelligence_config: {
 					key: documentIntelligenceKey,
 					key: documentIntelligenceKey,
 					endpoint: documentIntelligenceEndpoint
 					endpoint: documentIntelligenceEndpoint
@@ -270,7 +277,10 @@
 
 
 			contentExtractionEngine = res.content_extraction.engine;
 			contentExtractionEngine = res.content_extraction.engine;
 			tikaServerUrl = res.content_extraction.tika_server_url;
 			tikaServerUrl = res.content_extraction.tika_server_url;
+			doclingServerUrl = res.content_extraction.docling_server_url;
+
 			showTikaServerUrl = contentExtractionEngine === 'tika';
 			showTikaServerUrl = contentExtractionEngine === 'tika';
+			showDoclingServerUrl = contentExtractionEngine === 'docling';
 			documentIntelligenceEndpoint = res.content_extraction.document_intelligence_config.endpoint;
 			documentIntelligenceEndpoint = res.content_extraction.document_intelligence_config.endpoint;
 			documentIntelligenceKey = res.content_extraction.document_intelligence_config.key;
 			documentIntelligenceKey = res.content_extraction.document_intelligence_config.key;
 			showDocumentIntelligenceConfig = contentExtractionEngine === 'document_intelligence';
 			showDocumentIntelligenceConfig = contentExtractionEngine === 'document_intelligence';
@@ -338,6 +348,7 @@
 							>
 							>
 								<option value="">{$i18n.t('Default')} </option>
 								<option value="">{$i18n.t('Default')} </option>
 								<option value="tika">{$i18n.t('Tika')}</option>
 								<option value="tika">{$i18n.t('Tika')}</option>
+								<option value="docling">{$i18n.t('Docling')}</option>
 								<option value="document_intelligence">{$i18n.t('Document Intelligence')}</option>
 								<option value="document_intelligence">{$i18n.t('Document Intelligence')}</option>
 							</select>
 							</select>
 						</div>
 						</div>
@@ -352,6 +363,14 @@
 								/>
 								/>
 							</div>
 							</div>
 						</div>
 						</div>
+					{:else if contentExtractionEngine === 'docling'}
+						<div class="flex w-full mt-1">
+							<input
+								class="flex-1 w-full rounded-lg text-sm bg-transparent outline-hidden"
+								placeholder={$i18n.t('Enter Docling Server URL')}
+								bind:value={doclingServerUrl}
+							/>
+						</div>
 					{:else if contentExtractionEngine === 'document_intelligence'}
 					{:else if contentExtractionEngine === 'document_intelligence'}
 						<div class="my-0.5 flex gap-2 pr-2">
 						<div class="my-0.5 flex gap-2 pr-2">
 							<input
 							<input
@@ -388,8 +407,12 @@
 					<div class="flex items-center relative">
 					<div class="flex items-center relative">
 						<Tooltip
 						<Tooltip
 							content={BYPASS_EMBEDDING_AND_RETRIEVAL
 							content={BYPASS_EMBEDDING_AND_RETRIEVAL
-								? 'Inject the entire content as context for comprehensive processing, this is recommended for complex queries.'
-								: 'Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.'}
+								? $i18n.t(
+										'Inject the entire content as context for comprehensive processing, this is recommended for complex queries.'
+									)
+								: $i18n.t(
+										'Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.'
+									)}
 						>
 						>
 							<Switch bind:state={BYPASS_EMBEDDING_AND_RETRIEVAL} />
 							<Switch bind:state={BYPASS_EMBEDDING_AND_RETRIEVAL} />
 						</Tooltip>
 						</Tooltip>
@@ -626,8 +649,12 @@
 						<div class="flex items-center relative">
 						<div class="flex items-center relative">
 							<Tooltip
 							<Tooltip
 								content={RAG_FULL_CONTEXT
 								content={RAG_FULL_CONTEXT
-									? 'Inject entire contents as context for comprehensive processing, this is recommended for complex queries.'
-									: 'Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.'}
+									? $i18n.t(
+											'Inject the entire content as context for comprehensive processing, this is recommended for complex queries.'
+										)
+									: $i18n.t(
+											'Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.'
+										)}
 							>
 							>
 								<Switch bind:state={RAG_FULL_CONTEXT} />
 								<Switch bind:state={RAG_FULL_CONTEXT} />
 							</Tooltip>
 							</Tooltip>

+ 11 - 2
src/lib/components/admin/Settings/Evaluations/ArenaModelModal.svelte

@@ -10,6 +10,7 @@
 	import PencilSolid from '$lib/components/icons/PencilSolid.svelte';
 	import PencilSolid from '$lib/components/icons/PencilSolid.svelte';
 	import { toast } from 'svelte-sonner';
 	import { toast } from 'svelte-sonner';
 	import AccessControl from '$lib/components/workspace/common/AccessControl.svelte';
 	import AccessControl from '$lib/components/workspace/common/AccessControl.svelte';
+	import ConfirmDialog from '$lib/components/common/ConfirmDialog.svelte';
 
 
 	export let show = false;
 	export let show = false;
 	export let edit = false;
 	export let edit = false;
@@ -44,6 +45,7 @@
 
 
 	let imageInputElement;
 	let imageInputElement;
 	let loading = false;
 	let loading = false;
+	let showDeleteConfirmDialog = false;
 
 
 	const addModelHandler = () => {
 	const addModelHandler = () => {
 		if (selectedModelId) {
 		if (selectedModelId) {
@@ -115,6 +117,14 @@
 	});
 	});
 </script>
 </script>
 
 
+<ConfirmDialog
+	bind:show={showDeleteConfirmDialog}
+	on:confirm={() => {
+		dispatch('delete', model);
+		show = false;
+	}}
+/>
+
 <Modal size="sm" bind:show>
 <Modal size="sm" bind:show>
 	<div>
 	<div>
 		<div class=" flex justify-between dark:text-gray-100 px-5 pt-4 pb-2">
 		<div class=" flex justify-between dark:text-gray-100 px-5 pt-4 pb-2">
@@ -378,8 +388,7 @@
 								class="px-3.5 py-1.5 text-sm font-medium dark:bg-black dark:hover:bg-gray-950 dark:text-white bg-white text-black hover:bg-gray-100 transition rounded-full flex flex-row space-x-1 items-center"
 								class="px-3.5 py-1.5 text-sm font-medium dark:bg-black dark:hover:bg-gray-950 dark:text-white bg-white text-black hover:bg-gray-100 transition rounded-full flex flex-row space-x-1 items-center"
 								type="button"
 								type="button"
 								on:click={() => {
 								on:click={() => {
-									dispatch('delete', model);
-									show = false;
+									showDeleteConfirmDialog = true;
 								}}
 								}}
 							>
 							>
 								{$i18n.t('Delete')}
 								{$i18n.t('Delete')}

+ 9 - 5
src/lib/components/admin/Settings/Images.svelte

@@ -191,11 +191,15 @@
 			}
 			}
 
 
 			if (config.comfyui.COMFYUI_WORKFLOW) {
 			if (config.comfyui.COMFYUI_WORKFLOW) {
-				config.comfyui.COMFYUI_WORKFLOW = JSON.stringify(
-					JSON.parse(config.comfyui.COMFYUI_WORKFLOW),
-					null,
-					2
-				);
+				try {
+					config.comfyui.COMFYUI_WORKFLOW = JSON.stringify(
+						JSON.parse(config.comfyui.COMFYUI_WORKFLOW),
+						null,
+						2
+					);
+				} catch (e) {
+					console.log(e);
+				}
 			}
 			}
 
 
 			requiredWorkflowNodes = requiredWorkflowNodes.map((node) => {
 			requiredWorkflowNodes = requiredWorkflowNodes.map((node) => {

+ 2 - 1
src/lib/components/admin/Settings/Models/ModelList.svelte

@@ -33,6 +33,7 @@
 		if (modelListElement) {
 		if (modelListElement) {
 			sortable = Sortable.create(modelListElement, {
 			sortable = Sortable.create(modelListElement, {
 				animation: 150,
 				animation: 150,
+				handle: '.item-handle',
 				onUpdate: async (event) => {
 				onUpdate: async (event) => {
 					positionChangeHandler();
 					positionChangeHandler();
 				}
 				}
@@ -47,7 +48,7 @@
 			<div class=" flex gap-2 w-full justify-between items-center" id="model-item-{modelId}">
 			<div class=" flex gap-2 w-full justify-between items-center" id="model-item-{modelId}">
 				<Tooltip content={modelId} placement="top-start">
 				<Tooltip content={modelId} placement="top-start">
 					<div class="flex items-center gap-1">
 					<div class="flex items-center gap-1">
-						<EllipsisVertical className="size-4 cursor-move" />
+						<EllipsisVertical className="size-4 cursor-move item-handle" />
 
 
 						<div class=" text-sm flex-1 py-1 rounded-lg">
 						<div class=" text-sm flex-1 py-1 rounded-lg">
 							{#if $models.find((model) => model.id === modelId)}
 							{#if $models.find((model) => model.id === modelId)}

+ 6 - 2
src/lib/components/admin/Settings/WebSearch.svelte

@@ -462,8 +462,12 @@
 						<div class="flex items-center relative">
 						<div class="flex items-center relative">
 							<Tooltip
 							<Tooltip
 								content={webConfig.BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL
 								content={webConfig.BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL
-									? 'Inject the entire content as context for comprehensive processing, this is recommended for complex queries.'
-									: 'Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.'}
+									? $i18n.t(
+											'Inject the entire content as context for comprehensive processing, this is recommended for complex queries.'
+										)
+									: $i18n.t(
+											'Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.'
+										)}
 							>
 							>
 								<Switch bind:state={webConfig.BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL} />
 								<Switch bind:state={webConfig.BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL} />
 							</Tooltip>
 							</Tooltip>

+ 14 - 3
src/lib/components/admin/Users/Groups/EditGroupModal.svelte

@@ -9,6 +9,7 @@
 	import Users from './Users.svelte';
 	import Users from './Users.svelte';
 	import UserPlusSolid from '$lib/components/icons/UserPlusSolid.svelte';
 	import UserPlusSolid from '$lib/components/icons/UserPlusSolid.svelte';
 	import WrenchSolid from '$lib/components/icons/WrenchSolid.svelte';
 	import WrenchSolid from '$lib/components/icons/WrenchSolid.svelte';
+	import ConfirmDialog from '$lib/components/common/ConfirmDialog.svelte';
 
 
 	export let onSubmit: Function = () => {};
 	export let onSubmit: Function = () => {};
 	export let onDelete: Function = () => {};
 	export let onDelete: Function = () => {};
@@ -25,6 +26,7 @@
 
 
 	let selectedTab = 'general';
 	let selectedTab = 'general';
 	let loading = false;
 	let loading = false;
+	let showDeleteConfirmDialog = false;
 
 
 	export let name = '';
 	export let name = '';
 	export let description = '';
 	export let description = '';
@@ -88,6 +90,14 @@
 	});
 	});
 </script>
 </script>
 
 
+<ConfirmDialog
+	bind:show={showDeleteConfirmDialog}
+	on:confirm={() => {
+		onDelete();
+		show = false;
+	}}
+/>
+
 <Modal size="md" bind:show>
 <Modal size="md" bind:show>
 	<div>
 	<div>
 		<div class=" flex justify-between dark:text-gray-100 px-5 pt-4 mb-1.5">
 		<div class=" flex justify-between dark:text-gray-100 px-5 pt-4 mb-1.5">
@@ -263,18 +273,19 @@
 						{/if}
 						{/if}
 					</div> -->
 					</div> -->
 
 
-					<div class="flex justify-end pt-3 text-sm font-medium gap-1.5">
+					<div class="flex justify-between pt-3 text-sm font-medium gap-1.5">
 						{#if edit}
 						{#if edit}
 							<button
 							<button
 								class="px-3.5 py-1.5 text-sm font-medium dark:bg-black dark:hover:bg-gray-900 dark:text-white bg-white text-black hover:bg-gray-100 transition rounded-full flex flex-row space-x-1 items-center"
 								class="px-3.5 py-1.5 text-sm font-medium dark:bg-black dark:hover:bg-gray-900 dark:text-white bg-white text-black hover:bg-gray-100 transition rounded-full flex flex-row space-x-1 items-center"
 								type="button"
 								type="button"
 								on:click={() => {
 								on:click={() => {
-									onDelete();
-									show = false;
+									showDeleteConfirmDialog = true;
 								}}
 								}}
 							>
 							>
 								{$i18n.t('Delete')}
 								{$i18n.t('Delete')}
 							</button>
 							</button>
+						{:else}
+							<div></div>
 						{/if}
 						{/if}
 
 
 						<button
 						<button

+ 15 - 1
src/lib/components/admin/Users/UserList/UserChatsModal.svelte

@@ -12,6 +12,7 @@
 	import Modal from '$lib/components/common/Modal.svelte';
 	import Modal from '$lib/components/common/Modal.svelte';
 	import Tooltip from '$lib/components/common/Tooltip.svelte';
 	import Tooltip from '$lib/components/common/Tooltip.svelte';
 	import Spinner from '$lib/components/common/Spinner.svelte';
 	import Spinner from '$lib/components/common/Spinner.svelte';
+	import ConfirmDialog from '$lib/components/common/ConfirmDialog.svelte';
 
 
 	const i18n = getContext('i18n');
 	const i18n = getContext('i18n');
 
 
@@ -19,6 +20,8 @@
 	export let user;
 	export let user;
 
 
 	let chats = null;
 	let chats = null;
+	let showDeleteConfirmDialog = false;
+	let chatToDelete = null;
 
 
 	const deleteChatHandler = async (chatId) => {
 	const deleteChatHandler = async (chatId) => {
 		const res = await deleteChatById(localStorage.token, chatId).catch((error) => {
 		const res = await deleteChatById(localStorage.token, chatId).catch((error) => {
@@ -50,6 +53,16 @@
 	}
 	}
 </script>
 </script>
 
 
+<ConfirmDialog
+	bind:show={showDeleteConfirmDialog}
+	on:confirm={() => {
+		if (chatToDelete) {
+			deleteChatHandler(chatToDelete);
+			chatToDelete = null;
+		}
+	}}
+/>
+
 <Modal size="lg" bind:show>
 <Modal size="lg" bind:show>
 	<div class=" flex justify-between dark:text-gray-300 px-5 pt-4">
 	<div class=" flex justify-between dark:text-gray-300 px-5 pt-4">
 		<div class=" text-lg font-medium self-center capitalize">
 		<div class=" text-lg font-medium self-center capitalize">
@@ -142,7 +155,8 @@
 														<button
 														<button
 															class="self-center w-fit text-sm px-2 py-2 hover:bg-black/5 dark:hover:bg-white/5 rounded-xl"
 															class="self-center w-fit text-sm px-2 py-2 hover:bg-black/5 dark:hover:bg-white/5 rounded-xl"
 															on:click={async () => {
 															on:click={async () => {
-																deleteChatHandler(chat.id);
+																chatToDelete = chat.id;
+																showDeleteConfirmDialog = true;
 															}}
 															}}
 														>
 														>
 															<svg
 															<svg

+ 7 - 4
src/lib/components/channel/Messages.svelte

@@ -73,10 +73,13 @@
 						<div class="text-2xl font-medium capitalize">{channel.name}</div>
 						<div class="text-2xl font-medium capitalize">{channel.name}</div>
 
 
 						<div class=" text-gray-500">
 						<div class=" text-gray-500">
-							This channel was created on {dayjs(channel.created_at / 1000000).format(
-								'MMMM D, YYYY'
-							)}. This is the very beginning of the {channel.name}
-							channel.
+							{$i18n.t(
+								'This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.',
+								{
+									createdAt: dayjs(channel.created_at / 1000000).format('MMMM D, YYYY'),
+									channelName: channel.name
+								}
+							)}
 						</div>
 						</div>
 					</div>
 					</div>
 				{:else}
 				{:else}

+ 18 - 16
src/lib/components/chat/Chat.svelte

@@ -212,7 +212,14 @@
 		const _chatId = JSON.parse(JSON.stringify($chatId));
 		const _chatId = JSON.parse(JSON.stringify($chatId));
 		let _messageId = JSON.parse(JSON.stringify(message.id));
 		let _messageId = JSON.parse(JSON.stringify(message.id));
 
 
-		let messageChildrenIds = history.messages[_messageId].childrenIds;
+		let messageChildrenIds = [];
+		if (_messageId === null) {
+			messageChildrenIds = Object.keys(history.messages).filter(
+				(id) => history.messages[id].parentId === null
+			);
+		} else {
+			messageChildrenIds = history.messages[_messageId].childrenIds;
+		}
 
 
 		while (messageChildrenIds.length !== 0) {
 		while (messageChildrenIds.length !== 0) {
 			_messageId = messageChildrenIds.at(-1);
 			_messageId = messageChildrenIds.at(-1);
@@ -286,18 +293,10 @@
 				} else if (type === 'chat:tags') {
 				} else if (type === 'chat:tags') {
 					chat = await getChatById(localStorage.token, $chatId);
 					chat = await getChatById(localStorage.token, $chatId);
 					allTags.set(await getAllTags(localStorage.token));
 					allTags.set(await getAllTags(localStorage.token));
-				} else if (type === 'message') {
+				} else if (type === 'chat:message:delta' || type === 'message') {
 					message.content += data.content;
 					message.content += data.content;
-				} else if (type === 'replace') {
+				} else if (type === 'chat:message' || type === 'replace') {
 					message.content = data.content;
 					message.content = data.content;
-				} else if (type === 'action') {
-					if (data.action === 'continue') {
-						const continueButton = document.getElementById('continue-response-button');
-
-						if (continueButton) {
-							continueButton.click();
-						}
-					}
 				} else if (type === 'confirmation') {
 				} else if (type === 'confirmation') {
 					eventCallback = cb;
 					eventCallback = cb;
 
 
@@ -887,6 +886,8 @@
 				await chats.set(await getChatList(localStorage.token, $currentChatPage));
 				await chats.set(await getChatList(localStorage.token, $currentChatPage));
 			}
 			}
 		}
 		}
+
+		taskId = null;
 	};
 	};
 
 
 	const chatActionHandler = async (chatId, actionId, modelId, responseMessageId, event = null) => {
 	const chatActionHandler = async (chatId, actionId, modelId, responseMessageId, event = null) => {
@@ -1276,12 +1277,13 @@
 		prompt = '';
 		prompt = '';
 
 
 		// Reset chat input textarea
 		// Reset chat input textarea
-		const chatInputElement = document.getElementById('chat-input');
+		if (!($settings?.richTextInput ?? true)) {
+			const chatInputElement = document.getElementById('chat-input');
 
 
-		if (chatInputElement) {
-			await tick();
-			chatInputElement.style.height = '';
-			chatInputElement.style.height = Math.min(chatInputElement.scrollHeight, 320) + 'px';
+			if (chatInputElement) {
+				await tick();
+				chatInputElement.style.height = '';
+			}
 		}
 		}
 
 
 		const _files = JSON.parse(JSON.stringify(files));
 		const _files = JSON.parse(JSON.stringify(files));

+ 36 - 40
src/lib/components/chat/Controls/Controls.svelte

@@ -30,45 +30,45 @@
 		</button>
 		</button>
 	</div>
 	</div>
 
 
-	{#if $user.role === 'admin' || $user?.permissions.chat?.controls}
-		<div class=" dark:text-gray-200 text-sm font-primary py-0.5 px-0.5">
-			{#if chatFiles.length > 0}
-				<Collapsible title={$i18n.t('Files')} open={true} buttonClassName="w-full">
-					<div class="flex flex-col gap-1 mt-1.5" slot="content">
-						{#each chatFiles as file, fileIdx}
-							<FileItem
-								className="w-full"
-								item={file}
-								edit={true}
-								url={file?.url ? file.url : null}
-								name={file.name}
-								type={file.type}
-								size={file?.size}
-								dismissible={true}
-								on:dismiss={() => {
-									// Remove the file from the chatFiles array
+	<div class=" dark:text-gray-200 text-sm font-primary py-0.5 px-0.5">
+		{#if chatFiles.length > 0}
+			<Collapsible title={$i18n.t('Files')} open={true} buttonClassName="w-full">
+				<div class="flex flex-col gap-1 mt-1.5" slot="content">
+					{#each chatFiles as file, fileIdx}
+						<FileItem
+							className="w-full"
+							item={file}
+							edit={true}
+							url={file?.url ? file.url : null}
+							name={file.name}
+							type={file.type}
+							size={file?.size}
+							dismissible={true}
+							on:dismiss={() => {
+								// Remove the file from the chatFiles array
 
 
-									chatFiles.splice(fileIdx, 1);
-									chatFiles = chatFiles;
-								}}
-								on:click={() => {
-									console.log(file);
-								}}
-							/>
-						{/each}
-					</div>
-				</Collapsible>
-
-				<hr class="my-2 border-gray-50 dark:border-gray-700/10" />
-			{/if}
-
-			<Collapsible bind:open={showValves} title={$i18n.t('Valves')} buttonClassName="w-full">
-				<div class="text-sm" slot="content">
-					<Valves show={showValves} />
+								chatFiles.splice(fileIdx, 1);
+								chatFiles = chatFiles;
+							}}
+							on:click={() => {
+								console.log(file);
+							}}
+						/>
+					{/each}
 				</div>
 				</div>
 			</Collapsible>
 			</Collapsible>
 
 
 			<hr class="my-2 border-gray-50 dark:border-gray-700/10" />
 			<hr class="my-2 border-gray-50 dark:border-gray-700/10" />
+		{/if}
+
+		<Collapsible bind:open={showValves} title={$i18n.t('Valves')} buttonClassName="w-full">
+			<div class="text-sm" slot="content">
+				<Valves show={showValves} />
+			</div>
+		</Collapsible>
+
+		{#if $user.role === 'admin' || $user?.permissions.chat?.controls}
+			<hr class="my-2 border-gray-50 dark:border-gray-700/10" />
 
 
 			<Collapsible title={$i18n.t('System Prompt')} open={true} buttonClassName="w-full">
 			<Collapsible title={$i18n.t('System Prompt')} open={true} buttonClassName="w-full">
 				<div class="" slot="content">
 				<div class="" slot="content">
@@ -90,10 +90,6 @@
 					</div>
 					</div>
 				</div>
 				</div>
 			</Collapsible>
 			</Collapsible>
-		</div>
-	{:else}
-		<div class="text-sm dark:text-gray-300 text-center py-2 px-10">
-			{$i18n.t('You do not have permission to access this feature.')}
-		</div>
-	{/if}
+		{/if}
+	</div>
 </div>
 </div>

+ 1 - 1
src/lib/components/chat/MessageInput/Commands/Knowledge.svelte

@@ -210,7 +210,7 @@
 									{/if}
 									{/if}
 
 
 									<div class="line-clamp-1">
 									<div class="line-clamp-1">
-										{item?.name}
+										{decodeURIComponent(item?.name)}
 									</div>
 									</div>
 								</div>
 								</div>
 
 

+ 6 - 1
src/lib/components/chat/MessageInput/Commands/Prompts.svelte

@@ -120,7 +120,12 @@
 			text = text.replaceAll('{{CURRENT_WEEKDAY}}', weekday);
 			text = text.replaceAll('{{CURRENT_WEEKDAY}}', weekday);
 		}
 		}
 
 
-		prompt = text;
+		const promptWords = prompt.split(' ');
+
+		promptWords.pop();
+		promptWords.push(`${text}`);
+
+		prompt = promptWords.join(' ');
 
 
 		const chatInputContainerElement = document.getElementById('chat-input-container');
 		const chatInputContainerElement = document.getElementById('chat-input-container');
 		const chatInputElement = document.getElementById('chat-input');
 		const chatInputElement = document.getElementById('chat-input');

+ 6 - 6
src/lib/components/chat/Messages/Citations.svelte

@@ -102,7 +102,7 @@
 			<div class="flex text-xs font-medium flex-wrap">
 			<div class="flex text-xs font-medium flex-wrap">
 				{#each citations as citation, idx}
 				{#each citations as citation, idx}
 					<button
 					<button
-						id={`source-${id}-${idx}`}
+						id={`source-${id}-${idx + 1}`}
 						class="no-toggle outline-hidden flex dark:text-gray-300 p-1 bg-white dark:bg-gray-900 rounded-xl max-w-96"
 						class="no-toggle outline-hidden flex dark:text-gray-300 p-1 bg-white dark:bg-gray-900 rounded-xl max-w-96"
 						on:click={() => {
 						on:click={() => {
 							showCitationModal = true;
 							showCitationModal = true;
@@ -117,14 +117,14 @@
 						<div
 						<div
 							class="flex-1 mx-1 truncate text-black/60 hover:text-black dark:text-white/60 dark:hover:text-white transition"
 							class="flex-1 mx-1 truncate text-black/60 hover:text-black dark:text-white/60 dark:hover:text-white transition"
 						>
 						>
-							{citation.source.name}
+							{decodeURIComponent(citation.source.name)}
 						</div>
 						</div>
 					</button>
 					</button>
 				{/each}
 				{/each}
 			</div>
 			</div>
 		{:else}
 		{:else}
 			<Collapsible
 			<Collapsible
-				id="collapsible-sources"
+				id={`collapsible-${id}`}
 				bind:open={isCollapsibleOpen}
 				bind:open={isCollapsibleOpen}
 				className="w-full max-w-full "
 				className="w-full max-w-full "
 				buttonClassName="w-fit max-w-full"
 				buttonClassName="w-fit max-w-full"
@@ -157,7 +157,7 @@
 											</div>
 											</div>
 										{/if}
 										{/if}
 										<div class="flex-1 mx-1 truncate">
 										<div class="flex-1 mx-1 truncate">
-											{citation.source.name}
+											{decodeURIComponent(citation.source.name)}
 										</div>
 										</div>
 									</button>
 									</button>
 								{/each}
 								{/each}
@@ -181,7 +181,7 @@
 					<div class="flex text-xs font-medium flex-wrap">
 					<div class="flex text-xs font-medium flex-wrap">
 						{#each citations as citation, idx}
 						{#each citations as citation, idx}
 							<button
 							<button
-								id={`source-${id}-${idx}`}
+								id={`source-${id}-${idx + 1}`}
 								class="no-toggle outline-hidden flex dark:text-gray-300 p-1 bg-gray-50 hover:bg-gray-100 dark:bg-gray-900 dark:hover:bg-gray-850 transition rounded-xl max-w-96"
 								class="no-toggle outline-hidden flex dark:text-gray-300 p-1 bg-gray-50 hover:bg-gray-100 dark:bg-gray-900 dark:hover:bg-gray-850 transition rounded-xl max-w-96"
 								on:click={() => {
 								on:click={() => {
 									showCitationModal = true;
 									showCitationModal = true;
@@ -194,7 +194,7 @@
 									</div>
 									</div>
 								{/if}
 								{/if}
 								<div class="flex-1 mx-1 truncate">
 								<div class="flex-1 mx-1 truncate">
-									{citation.source.name}
+									{decodeURIComponent(citation.source.name)}
 								</div>
 								</div>
 							</button>
 							</button>
 						{/each}
 						{/each}

+ 3 - 3
src/lib/components/chat/Messages/CitationsModal.svelte

@@ -98,7 +98,7 @@
 												: `#`}
 												: `#`}
 										target="_blank"
 										target="_blank"
 									>
 									>
-										{document?.metadata?.name ?? document.source.name}
+										{decodeURIComponent(document?.metadata?.name ?? document.source.name)}
 									</a>
 									</a>
 									{#if document?.metadata?.page}
 									{#if document?.metadata?.page}
 										<span class="text-xs text-gray-500 dark:text-gray-400">
 										<span class="text-xs text-gray-500 dark:text-gray-400">
@@ -128,11 +128,11 @@
 													{percentage.toFixed(2)}%
 													{percentage.toFixed(2)}%
 												</span>
 												</span>
 												<span class="text-gray-500 dark:text-gray-500">
 												<span class="text-gray-500 dark:text-gray-500">
-													({document.distance.toFixed(4)})
+													({(document?.distance ?? 0).toFixed(4)})
 												</span>
 												</span>
 											{:else}
 											{:else}
 												<span class="text-gray-500 dark:text-gray-500">
 												<span class="text-gray-500 dark:text-gray-500">
-													{document.distance.toFixed(4)}
+													{(document?.distance ?? 0).toFixed(4)}
 												</span>
 												</span>
 											{/if}
 											{/if}
 										</div>
 										</div>

+ 4 - 2
src/lib/components/chat/Messages/CodeBlock.svelte

@@ -27,6 +27,7 @@
 
 
 	export let save = false;
 	export let save = false;
 	export let run = true;
 	export let run = true;
+	export let collapsed = false;
 
 
 	export let token;
 	export let token;
 	export let lang = '';
 	export let lang = '';
@@ -60,7 +61,6 @@
 	let result = null;
 	let result = null;
 	let files = null;
 	let files = null;
 
 
-	let collapsed = false;
 	let copied = false;
 	let copied = false;
 	let saved = false;
 	let saved = false;
 
 
@@ -441,7 +441,9 @@
 
 
 					{#if ($config?.features?.enable_code_execution ?? true) && (lang.toLowerCase() === 'python' || lang.toLowerCase() === 'py' || (lang === '' && checkPythonCode(code)))}
 					{#if ($config?.features?.enable_code_execution ?? true) && (lang.toLowerCase() === 'python' || lang.toLowerCase() === 'py' || (lang === '' && checkPythonCode(code)))}
 						{#if executing}
 						{#if executing}
-							<div class="run-code-button bg-none border-none p-1 cursor-not-allowed">Running</div>
+							<div class="run-code-button bg-none border-none p-1 cursor-not-allowed">
+								{$i18n.t('Running')}
+							</div>
 						{:else if run}
 						{:else if run}
 							<button
 							<button
 								class="flex gap-1 items-center run-code-button bg-none border-none bg-gray-50 hover:bg-gray-100 dark:bg-gray-850 dark:hover:bg-gray-800 transition rounded-md px-1.5 py-0.5"
 								class="flex gap-1 items-center run-code-button bg-none border-none bg-gray-50 hover:bg-gray-100 dark:bg-gray-850 dark:hover:bg-gray-800 transition rounded-md px-1.5 py-0.5"

+ 108 - 0
src/lib/components/chat/Messages/Markdown/AlertRenderer.svelte

@@ -0,0 +1,108 @@
+<script lang="ts" context="module">
+	import { marked, type Token } from 'marked';
+
+	type AlertType = 'NOTE' | 'TIP' | 'IMPORTANT' | 'WARNING' | 'CAUTION';
+
+	interface AlertTheme {
+		border: string;
+		text: string;
+		icon: ComponentType;
+	}
+
+	export interface AlertData {
+		type: AlertType;
+		text: string;
+		tokens: Token[];
+	}
+
+	const alertStyles: Record<AlertType, AlertTheme> = {
+		NOTE: {
+			border: 'border-sky-500',
+			text: 'text-sky-500',
+			icon: Info
+		},
+		TIP: {
+			border: 'border-emerald-500',
+			text: 'text-emerald-500',
+			icon: LightBlub
+		},
+		IMPORTANT: {
+			border: 'border-purple-500',
+			text: 'text-purple-500',
+			icon: Star
+		},
+		WARNING: {
+			border: 'border-yellow-500',
+			text: 'text-yellow-500',
+			icon: ArrowRightCircle
+		},
+		CAUTION: {
+			border: 'border-rose-500',
+			text: 'text-rose-500',
+			icon: Bolt
+		}
+	};
+
+	export function alertComponent(token: Token): AlertData | false {
+		const regExpStr = `^(?:\\[!(NOTE|TIP|IMPORTANT|WARNING|CAUTION)\\])\\s*?\n*`;
+		const regExp = new RegExp(regExpStr);
+		const matches = token.text?.match(regExp);
+
+		if (matches && matches.length) {
+			const alertType = matches[1] as AlertType;
+			const newText = token.text.replace(regExp, '');
+			const newTokens = marked.lexer(newText);
+			return {
+				type: alertType,
+				text: newText,
+				tokens: newTokens
+			};
+		}
+		return false;
+	}
+</script>
+
+<script lang="ts">
+	import Info from '$lib/components/icons/Info.svelte';
+	import Star from '$lib/components/icons/Star.svelte';
+	import LightBlub from '$lib/components/icons/LightBlub.svelte';
+	import Bolt from '$lib/components/icons/Bolt.svelte';
+	import ArrowRightCircle from '$lib/components/icons/ArrowRightCircle.svelte';
+	import MarkdownTokens from './MarkdownTokens.svelte';
+	import type { ComponentType } from 'svelte';
+
+	export let token: Token;
+	export let alert: AlertData;
+	export let id = '';
+	export let tokenIdx = 0;
+	export let onTaskClick: ((event: MouseEvent) => void) | undefined = undefined;
+	export let onSourceClick: ((event: MouseEvent) => void) | undefined = undefined;
+</script>
+
+<!--
+
+Renders the following Markdown as alerts:
+
+> [!NOTE]
+> Example note
+
+> [!TIP]
+> Example tip
+
+> [!IMPORTANT]
+> Example important
+
+> [!CAUTION]
+> Example caution
+
+> [!WARNING]
+> Example warning
+
+-->
+<div class={`border-l-2 pl-2 ${alertStyles[alert.type].border}`}>
+	<p class={alertStyles[alert.type].text}>
+		<svelte:component this={alertStyles[alert.type].icon} className="inline-block size-4" />
+		<b>{alert.type}</b>
+	</p>
+	<MarkdownTokens id={`${id}-${tokenIdx}`} tokens={alert.tokens} {onTaskClick} {onSourceClick} />
+</div>

+ 15 - 5
src/lib/components/chat/Messages/Markdown/MarkdownTokens.svelte

@@ -14,10 +14,13 @@
 	import CodeBlock from '$lib/components/chat/Messages/CodeBlock.svelte';
 	import CodeBlock from '$lib/components/chat/Messages/CodeBlock.svelte';
 	import MarkdownInlineTokens from '$lib/components/chat/Messages/Markdown/MarkdownInlineTokens.svelte';
 	import MarkdownInlineTokens from '$lib/components/chat/Messages/Markdown/MarkdownInlineTokens.svelte';
 	import KatexRenderer from './KatexRenderer.svelte';
 	import KatexRenderer from './KatexRenderer.svelte';
+	import AlertRenderer, { alertComponent } from './AlertRenderer.svelte';
 	import Collapsible from '$lib/components/common/Collapsible.svelte';
 	import Collapsible from '$lib/components/common/Collapsible.svelte';
 	import Tooltip from '$lib/components/common/Tooltip.svelte';
 	import Tooltip from '$lib/components/common/Tooltip.svelte';
 	import ArrowDownTray from '$lib/components/icons/ArrowDownTray.svelte';
 	import ArrowDownTray from '$lib/components/icons/ArrowDownTray.svelte';
+
 	import Source from './Source.svelte';
 	import Source from './Source.svelte';
+	import { settings } from '$lib/stores';
 
 
 	const dispatch = createEventDispatcher();
 	const dispatch = createEventDispatcher();
 
 
@@ -84,6 +87,7 @@
 		{#if token.raw.includes('```')}
 		{#if token.raw.includes('```')}
 			<CodeBlock
 			<CodeBlock
 				id={`${id}-${tokenIdx}`}
 				id={`${id}-${tokenIdx}`}
+				collapsed={$settings?.collapseCodeBlocks ?? false}
 				{token}
 				{token}
 				lang={token?.lang ?? ''}
 				lang={token?.lang ?? ''}
 				code={token?.text ?? ''}
 				code={token?.text ?? ''}
@@ -119,7 +123,7 @@
 									class="px-3! py-1.5! cursor-pointer border border-gray-100 dark:border-gray-850"
 									class="px-3! py-1.5! cursor-pointer border border-gray-100 dark:border-gray-850"
 									style={token.align[headerIdx] ? '' : `text-align: ${token.align[headerIdx]}`}
 									style={token.align[headerIdx] ? '' : `text-align: ${token.align[headerIdx]}`}
 								>
 								>
-									<div class="flex flex-col gap-1.5 text-left">
+									<div class="gap-1.5 text-left">
 										<div class="shrink-0 break-normal">
 										<div class="shrink-0 break-normal">
 											<MarkdownInlineTokens
 											<MarkdownInlineTokens
 												id={`${id}-${tokenIdx}-header-${headerIdx}`}
 												id={`${id}-${tokenIdx}-header-${headerIdx}`}
@@ -140,7 +144,7 @@
 										class="px-3! py-1.5! text-gray-900 dark:text-white w-max border border-gray-100 dark:border-gray-850"
 										class="px-3! py-1.5! text-gray-900 dark:text-white w-max border border-gray-100 dark:border-gray-850"
 										style={token.align[cellIdx] ? '' : `text-align: ${token.align[cellIdx]}`}
 										style={token.align[cellIdx] ? '' : `text-align: ${token.align[cellIdx]}`}
 									>
 									>
-										<div class="flex flex-col break-normal">
+										<div class="break-normal">
 											<MarkdownInlineTokens
 											<MarkdownInlineTokens
 												id={`${id}-${tokenIdx}-row-${rowIdx}-${cellIdx}`}
 												id={`${id}-${tokenIdx}-row-${rowIdx}-${cellIdx}`}
 												tokens={cell.tokens}
 												tokens={cell.tokens}
@@ -170,9 +174,14 @@
 			</div>
 			</div>
 		</div>
 		</div>
 	{:else if token.type === 'blockquote'}
 	{:else if token.type === 'blockquote'}
-		<blockquote dir="auto">
-			<svelte:self id={`${id}-${tokenIdx}`} tokens={token.tokens} {onTaskClick} {onSourceClick} />
-		</blockquote>
+		{@const alert = alertComponent(token)}
+		{#if alert}
+			<AlertRenderer {token} {alert} />
+		{:else}
+			<blockquote dir="auto">
+				<svelte:self id={`${id}-${tokenIdx}`} tokens={token.tokens} {onTaskClick} {onSourceClick} />
+			</blockquote>
+		{/if}
 	{:else if token.type === 'list'}
 	{:else if token.type === 'list'}
 		{#if token.ordered}
 		{#if token.ordered}
 			<ol start={token.start || 1}>
 			<ol start={token.start || 1}>
@@ -242,6 +251,7 @@
 	{:else if token.type === 'details'}
 	{:else if token.type === 'details'}
 		<Collapsible
 		<Collapsible
 			title={token.summary}
 			title={token.summary}
+			open={$settings?.expandDetails ?? false}
 			attributes={token?.attributes}
 			attributes={token?.attributes}
 			className="w-full space-y-1"
 			className="w-full space-y-1"
 			dir="auto"
 			dir="auto"

+ 5 - 3
src/lib/components/chat/Messages/ResponseMessage.svelte

@@ -559,7 +559,7 @@
 		<div class="flex-auto w-0 pl-1">
 		<div class="flex-auto w-0 pl-1">
 			<Name>
 			<Name>
 				<Tooltip content={model?.name ?? message.model} placement="top-start">
 				<Tooltip content={model?.name ?? message.model} placement="top-start">
-					<span class="line-clamp-1">
+					<span class="line-clamp-1 text-black dark:text-white">
 						{model?.name ?? message.model}
 						{model?.name ?? message.model}
 					</span>
 					</span>
 				</Tooltip>
 				</Tooltip>
@@ -748,7 +748,9 @@
 										onSourceClick={async (id, idx) => {
 										onSourceClick={async (id, idx) => {
 											console.log(id, idx);
 											console.log(id, idx);
 											let sourceButton = document.getElementById(`source-${message.id}-${idx}`);
 											let sourceButton = document.getElementById(`source-${message.id}-${idx}`);
-											const sourcesCollapsible = document.getElementById(`collapsible-sources`);
+											const sourcesCollapsible = document.getElementById(
+												`collapsible-${message.id}`
+											);
 
 
 											if (sourceButton) {
 											if (sourceButton) {
 												sourceButton.click();
 												sourceButton.click();
@@ -1269,7 +1271,7 @@
 										<Tooltip content={$i18n.t('Delete')} placement="bottom">
 										<Tooltip content={$i18n.t('Delete')} placement="bottom">
 											<button
 											<button
 												type="button"
 												type="button"
-												id="continue-response-button"
+												id="delete-response-button"
 												class="{isLastMessage
 												class="{isLastMessage
 													? 'visible'
 													? 'visible'
 													: 'invisible group-hover:visible'} p-1.5 hover:bg-black/5 dark:hover:bg-white/5 rounded-lg dark:hover:text-white hover:text-black transition regenerate-response-button"
 													: 'invisible group-hover:visible'} p-1.5 hover:bg-black/5 dark:hover:bg-white/5 rounded-lg dark:hover:text-white hover:text-black transition regenerate-response-button"

+ 1 - 1
src/lib/components/chat/Messages/UserMessage.svelte

@@ -347,7 +347,7 @@
 								</button>
 								</button>
 							</Tooltip>
 							</Tooltip>
 
 
-							{#if !isFirstMessage && !readOnly}
+							{#if !readOnly && (!isFirstMessage || siblings.length > 1)}
 								<Tooltip content={$i18n.t('Delete')} placement="bottom">
 								<Tooltip content={$i18n.t('Delete')} placement="bottom">
 									<button
 									<button
 										class="invisible group-hover:visible p-1 rounded-sm dark:hover:text-white hover:text-black transition"
 										class="invisible group-hover:visible p-1 rounded-sm dark:hover:text-white hover:text-black transition"

+ 146 - 40
src/lib/components/chat/ModelSelector/Selector.svelte

@@ -61,10 +61,11 @@
 	$: selectedModel = items.find((item) => item.value === value) ?? '';
 	$: selectedModel = items.find((item) => item.value === value) ?? '';
 
 
 	let searchValue = '';
 	let searchValue = '';
+
 	let selectedTag = '';
 	let selectedTag = '';
+	let selectedConnectionType = '';
 
 
 	let ollamaVersion = null;
 	let ollamaVersion = null;
-
 	let selectedModelIdx = 0;
 	let selectedModelIdx = 0;
 
 
 	const fuse = new Fuse(
 	const fuse = new Fuse(
@@ -72,7 +73,7 @@
 			const _item = {
 			const _item = {
 				...item,
 				...item,
 				modelName: item.model?.name,
 				modelName: item.model?.name,
-				tags: item.model?.info?.meta?.tags?.map((tag) => tag.name).join(' '),
+				tags: (item.model?.tags ?? []).map((tag) => tag.name).join(' '),
 				desc: item.model?.info?.meta?.description
 				desc: item.model?.info?.meta?.description
 			};
 			};
 			return _item;
 			return _item;
@@ -93,14 +94,61 @@
 					if (selectedTag === '') {
 					if (selectedTag === '') {
 						return true;
 						return true;
 					}
 					}
-					return item.model?.info?.meta?.tags?.map((tag) => tag.name).includes(selectedTag);
+					return (item.model?.tags ?? []).map((tag) => tag.name).includes(selectedTag);
 				})
 				})
-		: items.filter((item) => {
-				if (selectedTag === '') {
-					return true;
-				}
-				return item.model?.info?.meta?.tags?.map((tag) => tag.name).includes(selectedTag);
-			});
+				.filter((item) => {
+					if (selectedConnectionType === '') {
+						return true;
+					} else if (selectedConnectionType === 'ollama') {
+						return item.model?.owned_by === 'ollama';
+					} else if (selectedConnectionType === 'openai') {
+						return item.model?.owned_by === 'openai';
+					} else if (selectedConnectionType === 'direct') {
+						return item.model?.direct;
+					}
+				})
+		: items
+				.filter((item) => {
+					if (selectedTag === '') {
+						return true;
+					}
+					return (item.model?.tags ?? []).map((tag) => tag.name).includes(selectedTag);
+				})
+				.filter((item) => {
+					if (selectedConnectionType === '') {
+						return true;
+					} else if (selectedConnectionType === 'ollama') {
+						return item.model?.owned_by === 'ollama';
+					} else if (selectedConnectionType === 'openai') {
+						return item.model?.owned_by === 'openai';
+					} else if (selectedConnectionType === 'direct') {
+						return item.model?.direct;
+					}
+				});
+
+	$: if (selectedTag || selectedConnectionType) {
+		resetView();
+	} else {
+		resetView();
+	}
+
+	const resetView = async () => {
+		await tick();
+
+		const selectedInFiltered = filteredItems.findIndex((item) => item.value === value);
+
+		if (selectedInFiltered >= 0) {
+			// The selected model is visible in the current filter
+			selectedModelIdx = selectedInFiltered;
+		} else {
+			// The selected model is not visible, default to first item in filtered list
+			selectedModelIdx = 0;
+		}
+
+		await tick();
+		const item = document.querySelector(`[data-arrow-selected="true"]`);
+		item?.scrollIntoView({ block: 'center', inline: 'nearest', behavior: 'instant' });
+	};
 
 
 	const pullModelHandler = async () => {
 	const pullModelHandler = async () => {
 		const sanitizedModelTag = searchValue.trim().replace(/^ollama\s+(run|pull)\s+/, '');
 		const sanitizedModelTag = searchValue.trim().replace(/^ollama\s+(run|pull)\s+/, '');
@@ -234,7 +282,7 @@
 		ollamaVersion = await getOllamaVersion(localStorage.token).catch((error) => false);
 		ollamaVersion = await getOllamaVersion(localStorage.token).catch((error) => false);
 
 
 		if (items) {
 		if (items) {
-			tags = items.flatMap((item) => item.model?.info?.meta?.tags ?? []).map((tag) => tag.name);
+			tags = items.flatMap((item) => item.model?.tags ?? []).map((tag) => tag.name);
 
 
 			// Remove duplicates and sort
 			// Remove duplicates and sort
 			tags = Array.from(new Set(tags)).sort((a, b) => a.localeCompare(b));
 			tags = Array.from(new Set(tags)).sort((a, b) => a.localeCompare(b));
@@ -262,8 +310,9 @@
 	bind:open={show}
 	bind:open={show}
 	onOpenChange={async () => {
 	onOpenChange={async () => {
 		searchValue = '';
 		searchValue = '';
-		selectedModelIdx = 0;
 		window.setTimeout(() => document.getElementById('model-search-input')?.focus(), 0);
 		window.setTimeout(() => document.getElementById('model-search-input')?.focus(), 0);
+
+		resetView();
 	}}
 	}}
 	closeFocus={false}
 	closeFocus={false}
 >
 >
@@ -326,28 +375,78 @@
 
 
 			<div class="px-3 mb-2 max-h-64 overflow-y-auto scrollbar-hidden group relative">
 			<div class="px-3 mb-2 max-h-64 overflow-y-auto scrollbar-hidden group relative">
 				{#if tags}
 				{#if tags}
-					<div class=" flex w-full sticky">
+					<div
+						class=" flex w-full sticky top-0 z-10 bg-white dark:bg-gray-850 overflow-x-auto scrollbar-none"
+						on:wheel={(e) => {
+							if (e.deltaY !== 0) {
+								e.preventDefault();
+								e.currentTarget.scrollLeft += e.deltaY;
+							}
+						}}
+					>
 						<div
 						<div
-							class="flex gap-1 scrollbar-none overflow-x-auto w-fit text-center text-sm font-medium rounded-full bg-transparent px-1.5 pb-0.5"
+							class="flex gap-1 w-fit text-center text-sm font-medium rounded-full bg-transparent px-1.5 pb-0.5"
 							bind:this={tagsContainerElement}
 							bind:this={tagsContainerElement}
 						>
 						>
 							<button
 							<button
-								class="min-w-fit outline-none p-1.5 {selectedTag === ''
+								class="min-w-fit outline-none p-1.5 {selectedTag === '' &&
+								selectedConnectionType === ''
 									? ''
 									? ''
 									: 'text-gray-300 dark:text-gray-600 hover:text-gray-700 dark:hover:text-white'} transition capitalize"
 									: 'text-gray-300 dark:text-gray-600 hover:text-gray-700 dark:hover:text-white'} transition capitalize"
 								on:click={() => {
 								on:click={() => {
+									selectedConnectionType = '';
 									selectedTag = '';
 									selectedTag = '';
 								}}
 								}}
 							>
 							>
 								{$i18n.t('All')}
 								{$i18n.t('All')}
 							</button>
 							</button>
 
 
+							{#if items.find((item) => item.model?.owned_by === 'ollama') && items.find((item) => item.model?.owned_by === 'openai')}
+								<button
+									class="min-w-fit outline-none p-1.5 {selectedConnectionType === 'ollama'
+										? ''
+										: 'text-gray-300 dark:text-gray-600 hover:text-gray-700 dark:hover:text-white'} transition capitalize"
+									on:click={() => {
+										selectedTag = '';
+										selectedConnectionType = 'ollama';
+									}}
+								>
+									{$i18n.t('Local')}
+								</button>
+								<button
+									class="min-w-fit outline-none p-1.5 {selectedConnectionType === 'openai'
+										? ''
+										: 'text-gray-300 dark:text-gray-600 hover:text-gray-700 dark:hover:text-white'} transition capitalize"
+									on:click={() => {
+										selectedTag = '';
+										selectedConnectionType = 'openai';
+									}}
+								>
+									{$i18n.t('External')}
+								</button>
+							{/if}
+
+							{#if items.find((item) => item.model?.direct)}
+								<button
+									class="min-w-fit outline-none p-1.5 {selectedConnectionType === 'direct'
+										? ''
+										: 'text-gray-300 dark:text-gray-600 hover:text-gray-700 dark:hover:text-white'} transition capitalize"
+									on:click={() => {
+										selectedTag = '';
+										selectedConnectionType = 'direct';
+									}}
+								>
+									{$i18n.t('Direct')}
+								</button>
+							{/if}
+
 							{#each tags as tag}
 							{#each tags as tag}
 								<button
 								<button
 									class="min-w-fit outline-none p-1.5 {selectedTag === tag
 									class="min-w-fit outline-none p-1.5 {selectedTag === tag
 										? ''
 										? ''
 										: 'text-gray-300 dark:text-gray-600 hover:text-gray-700 dark:hover:text-white'} transition capitalize"
 										: 'text-gray-300 dark:text-gray-600 hover:text-gray-700 dark:hover:text-white'} transition capitalize"
 									on:click={() => {
 									on:click={() => {
+										selectedConnectionType = '';
 										selectedTag = tag;
 										selectedTag = tag;
 									}}
 									}}
 								>
 								>
@@ -366,6 +465,7 @@
 							? 'bg-gray-100 dark:bg-gray-800 group-hover:bg-transparent'
 							? 'bg-gray-100 dark:bg-gray-800 group-hover:bg-transparent'
 							: ''}"
 							: ''}"
 						data-arrow-selected={index === selectedModelIdx}
 						data-arrow-selected={index === selectedModelIdx}
+						data-value={item.value}
 						on:click={() => {
 						on:click={() => {
 							value = item.value;
 							value = item.value;
 							selectedModelIdx = index;
 							selectedModelIdx = index;
@@ -374,9 +474,9 @@
 						}}
 						}}
 					>
 					>
 						<div class="flex flex-col">
 						<div class="flex flex-col">
-							{#if $mobile && (item?.model?.info?.meta?.tags ?? []).length > 0}
+							{#if $mobile && (item?.model?.tags ?? []).length > 0}
 								<div class="flex gap-0.5 self-start h-full mb-1.5 -translate-x-1">
 								<div class="flex gap-0.5 self-start h-full mb-1.5 -translate-x-1">
-									{#each item.model?.info?.meta.tags as tag}
+									{#each item.model?.tags.sort((a, b) => a.name.localeCompare(b.name)) as tag}
 										<div
 										<div
 											class=" text-xs font-bold px-1 rounded-sm uppercase line-clamp-1 bg-gray-500/20 text-gray-700 dark:text-gray-200"
 											class=" text-xs font-bold px-1 rounded-sm uppercase line-clamp-1 bg-gray-500/20 text-gray-700 dark:text-gray-200"
 										>
 										>
@@ -398,31 +498,37 @@
 													alt="Model"
 													alt="Model"
 													class="rounded-full size-5 flex items-center mr-2"
 													class="rounded-full size-5 flex items-center mr-2"
 												/>
 												/>
-												{item.label}
+
+												<div class="flex items-center line-clamp-1">
+													<div class="line-clamp-1">
+														{item.label}
+													</div>
+
+													{#if item.model.owned_by === 'ollama' && (item.model.ollama?.details?.parameter_size ?? '') !== ''}
+														<div class="flex ml-1 items-center translate-y-[0.5px]">
+															<Tooltip
+																content={`${
+																	item.model.ollama?.details?.quantization_level
+																		? item.model.ollama?.details?.quantization_level + ' '
+																		: ''
+																}${
+																	item.model.ollama?.size
+																		? `(${(item.model.ollama?.size / 1024 ** 3).toFixed(1)}GB)`
+																		: ''
+																}`}
+																className="self-end"
+															>
+																<span
+																	class=" text-xs font-medium text-gray-600 dark:text-gray-400 line-clamp-1"
+																	>{item.model.ollama?.details?.parameter_size ?? ''}</span
+																>
+															</Tooltip>
+														</div>
+													{/if}
+												</div>
 											</Tooltip>
 											</Tooltip>
 										</div>
 										</div>
 									</div>
 									</div>
-									{#if item.model.owned_by === 'ollama' && (item.model.ollama?.details?.parameter_size ?? '') !== ''}
-										<div class="flex ml-1 items-center translate-y-[0.5px]">
-											<Tooltip
-												content={`${
-													item.model.ollama?.details?.quantization_level
-														? item.model.ollama?.details?.quantization_level + ' '
-														: ''
-												}${
-													item.model.ollama?.size
-														? `(${(item.model.ollama?.size / 1024 ** 3).toFixed(1)}GB)`
-														: ''
-												}`}
-												className="self-end"
-											>
-												<span
-													class=" text-xs font-medium text-gray-600 dark:text-gray-400 line-clamp-1"
-													>{item.model.ollama?.details?.parameter_size ?? ''}</span
-												>
-											</Tooltip>
-										</div>
-									{/if}
 								</div>
 								</div>
 
 
 								<!-- {JSON.stringify(item.info)} -->
 								<!-- {JSON.stringify(item.info)} -->
@@ -496,11 +602,11 @@
 									</Tooltip>
 									</Tooltip>
 								{/if}
 								{/if}
 
 
-								{#if !$mobile && (item?.model?.info?.meta?.tags ?? []).length > 0}
+								{#if !$mobile && (item?.model?.tags ?? []).length > 0}
 									<div
 									<div
 										class="flex gap-0.5 self-center items-center h-full translate-y-[0.5px] overflow-x-auto scrollbar-none"
 										class="flex gap-0.5 self-center items-center h-full translate-y-[0.5px] overflow-x-auto scrollbar-none"
 									>
 									>
-										{#each item.model?.info?.meta.tags as tag}
+										{#each item.model?.tags.sort((a, b) => a.name.localeCompare(b.name)) as tag}
 											<Tooltip content={tag.name} className="flex-shrink-0">
 											<Tooltip content={tag.name} className="flex-shrink-0">
 												<div
 												<div
 													class=" text-xs font-bold px-1 rounded-sm uppercase bg-gray-500/20 text-gray-700 dark:text-gray-200"
 													class=" text-xs font-bold px-1 rounded-sm uppercase bg-gray-500/20 text-gray-700 dark:text-gray-200"

+ 13 - 29
src/lib/components/chat/Navbar.svelte

@@ -114,37 +114,21 @@
 							</div>
 							</div>
 						</button>
 						</button>
 					</Menu>
 					</Menu>
-				{:else if $mobile && ($user.role === 'admin' || $user?.permissions?.chat?.controls)}
-					<Tooltip content={$i18n.t('Controls')}>
-						<button
-							class=" flex cursor-pointer px-2 py-2 rounded-xl hover:bg-gray-50 dark:hover:bg-gray-850 transition"
-							on:click={async () => {
-								await showControls.set(!$showControls);
-							}}
-							aria-label="Controls"
-						>
-							<div class=" m-auto self-center">
-								<AdjustmentsHorizontal className=" size-5" strokeWidth="0.5" />
-							</div>
-						</button>
-					</Tooltip>
 				{/if}
 				{/if}
 
 
-				{#if !$mobile && ($user.role === 'admin' || $user?.permissions?.chat?.controls)}
-					<Tooltip content={$i18n.t('Controls')}>
-						<button
-							class=" flex cursor-pointer px-2 py-2 rounded-xl hover:bg-gray-50 dark:hover:bg-gray-850 transition"
-							on:click={async () => {
-								await showControls.set(!$showControls);
-							}}
-							aria-label="Controls"
-						>
-							<div class=" m-auto self-center">
-								<AdjustmentsHorizontal className=" size-5" strokeWidth="0.5" />
-							</div>
-						</button>
-					</Tooltip>
-				{/if}
+				<Tooltip content={$i18n.t('Controls')}>
+					<button
+						class=" flex cursor-pointer px-2 py-2 rounded-xl hover:bg-gray-50 dark:hover:bg-gray-850 transition"
+						on:click={async () => {
+							await showControls.set(!$showControls);
+						}}
+						aria-label="Controls"
+					>
+						<div class=" m-auto self-center">
+							<AdjustmentsHorizontal className=" size-5" strokeWidth="0.5" />
+						</div>
+					</button>
+				</Tooltip>
 
 
 				<Tooltip content={$i18n.t('New Chat')}>
 				<Tooltip content={$i18n.t('New Chat')}>
 					<button
 					<button

+ 1 - 0
src/lib/components/chat/Settings/Advanced/AdvancedParams.svelte

@@ -961,6 +961,7 @@
 			<div class="flex w-full justify-between">
 			<div class="flex w-full justify-between">
 				<div class=" self-center text-xs font-medium">
 				<div class=" self-center text-xs font-medium">
 					{$i18n.t('Context Length')}
 					{$i18n.t('Context Length')}
+					{$i18n.t('(Ollama)')}
 				</div>
 				</div>
 
 
 				<button
 				<button

+ 13 - 1
src/lib/components/chat/Settings/Connections/Connection.svelte

@@ -6,6 +6,7 @@
 	import SensitiveInput from '$lib/components/common/SensitiveInput.svelte';
 	import SensitiveInput from '$lib/components/common/SensitiveInput.svelte';
 	import Cog6 from '$lib/components/icons/Cog6.svelte';
 	import Cog6 from '$lib/components/icons/Cog6.svelte';
 	import AddConnectionModal from '$lib/components/AddConnectionModal.svelte';
 	import AddConnectionModal from '$lib/components/AddConnectionModal.svelte';
+	import ConfirmDialog from '$lib/components/common/ConfirmDialog.svelte';
 
 
 	export let onDelete = () => {};
 	export let onDelete = () => {};
 	export let onSubmit = () => {};
 	export let onSubmit = () => {};
@@ -17,6 +18,7 @@
 	export let config = {};
 	export let config = {};
 
 
 	let showConfigModal = false;
 	let showConfigModal = false;
+	let showDeleteConfirmDialog = false;
 </script>
 </script>
 
 
 <AddConnectionModal
 <AddConnectionModal
@@ -28,7 +30,9 @@
 		key,
 		key,
 		config
 		config
 	}}
 	}}
-	{onDelete}
+	onDelete={() => {
+		showDeleteConfirmDialog = true;
+	}}
 	onSubmit={(connection) => {
 	onSubmit={(connection) => {
 		url = connection.url;
 		url = connection.url;
 		key = connection.key;
 		key = connection.key;
@@ -37,6 +41,14 @@
 	}}
 	}}
 />
 />
 
 
+<ConfirmDialog
+	bind:show={showDeleteConfirmDialog}
+	on:confirm={() => {
+		onDelete();
+		showConfigModal = false;
+	}}
+/>
+
 <div class="flex w-full gap-2 items-center">
 <div class="flex w-full gap-2 items-center">
 	<Tooltip
 	<Tooltip
 		className="w-full relative"
 		className="w-full relative"

+ 56 - 0
src/lib/components/chat/Settings/Interface.svelte

@@ -39,6 +39,9 @@
 	let chatDirection: 'LTR' | 'RTL' = 'LTR';
 	let chatDirection: 'LTR' | 'RTL' = 'LTR';
 	let ctrlEnterToSend = false;
 	let ctrlEnterToSend = false;
 
 
+	let collapseCodeBlocks = false;
+	let expandDetails = false;
+
 	let imageCompression = false;
 	let imageCompression = false;
 	let imageCompressionSize = {
 	let imageCompressionSize = {
 		width: '',
 		width: '',
@@ -55,6 +58,16 @@
 
 
 	let webSearch = null;
 	let webSearch = null;
 
 
+	const toggleExpandDetails = () => {
+		expandDetails = !expandDetails;
+		saveSettings({ expandDetails });
+	};
+
+	const toggleCollapseCodeBlocks = () => {
+		collapseCodeBlocks = !collapseCodeBlocks;
+		saveSettings({ collapseCodeBlocks });
+	};
+
 	const toggleSplitLargeChunks = async () => {
 	const toggleSplitLargeChunks = async () => {
 		splitLargeChunks = !splitLargeChunks;
 		splitLargeChunks = !splitLargeChunks;
 		saveSettings({ splitLargeChunks: splitLargeChunks });
 		saveSettings({ splitLargeChunks: splitLargeChunks });
@@ -227,6 +240,9 @@
 		richTextInput = $settings.richTextInput ?? true;
 		richTextInput = $settings.richTextInput ?? true;
 		largeTextAsFile = $settings.largeTextAsFile ?? false;
 		largeTextAsFile = $settings.largeTextAsFile ?? false;
 
 
+		collapseCodeBlocks = $settings.collapseCodeBlocks ?? false;
+		expandDetails = $settings.expandDetails ?? false;
+
 		landingPageMode = $settings.landingPageMode ?? '';
 		landingPageMode = $settings.landingPageMode ?? '';
 		chatBubble = $settings.chatBubble ?? true;
 		chatBubble = $settings.chatBubble ?? true;
 		widescreenMode = $settings.widescreenMode ?? false;
 		widescreenMode = $settings.widescreenMode ?? false;
@@ -570,6 +586,46 @@
 				</div>
 				</div>
 			</div>
 			</div>
 
 
+			<div>
+				<div class=" py-0.5 flex w-full justify-between">
+					<div class=" self-center text-xs">{$i18n.t('Always Collapse Code Blocks')}</div>
+
+					<button
+						class="p-1 px-3 text-xs flex rounded-sm transition"
+						on:click={() => {
+							toggleCollapseCodeBlocks();
+						}}
+						type="button"
+					>
+						{#if collapseCodeBlocks === true}
+							<span class="ml-2 self-center">{$i18n.t('On')}</span>
+						{:else}
+							<span class="ml-2 self-center">{$i18n.t('Off')}</span>
+						{/if}
+					</button>
+				</div>
+			</div>
+
+			<div>
+				<div class=" py-0.5 flex w-full justify-between">
+					<div class=" self-center text-xs">{$i18n.t('Always Expand Details')}</div>
+
+					<button
+						class="p-1 px-3 text-xs flex rounded-sm transition"
+						on:click={() => {
+							toggleExpandDetails();
+						}}
+						type="button"
+					>
+						{#if expandDetails === true}
+							<span class="ml-2 self-center">{$i18n.t('On')}</span>
+						{:else}
+							<span class="ml-2 self-center">{$i18n.t('Off')}</span>
+						{/if}
+					</button>
+				</div>
+			</div>
+
 			<div>
 			<div>
 				<div class=" py-0.5 flex w-full justify-between">
 				<div class=" py-0.5 flex w-full justify-between">
 					<div class=" self-center text-xs">
 					<div class=" self-center text-xs">

+ 0 - 1
src/lib/components/chat/SettingsModal.svelte

@@ -15,7 +15,6 @@
 	import Chats from './Settings/Chats.svelte';
 	import Chats from './Settings/Chats.svelte';
 	import User from '../icons/User.svelte';
 	import User from '../icons/User.svelte';
 	import Personalization from './Settings/Personalization.svelte';
 	import Personalization from './Settings/Personalization.svelte';
-	import SearchInput from '../layout/Sidebar/SearchInput.svelte';
 	import Search from '../icons/Search.svelte';
 	import Search from '../icons/Search.svelte';
 	import Connections from './Settings/Connections.svelte';
 	import Connections from './Settings/Connections.svelte';
 
 

+ 4 - 3
src/lib/components/chat/Suggestions.svelte

@@ -45,9 +45,10 @@
 		if (inputValue.length > 500) {
 		if (inputValue.length > 500) {
 			filteredPrompts = [];
 			filteredPrompts = [];
 		} else {
 		} else {
-			const newFilteredPrompts = inputValue.trim()
-				? fuse.search(inputValue.trim()).map((result) => result.item)
-				: sortedPrompts;
+			const newFilteredPrompts =
+				inputValue.trim() && fuse
+					? fuse.search(inputValue.trim()).map((result) => result.item)
+					: sortedPrompts;
 
 
 			// Compare with the oldFilteredPrompts
 			// Compare with the oldFilteredPrompts
 			// If there's a difference, update array + version
 			// If there's a difference, update array + version

+ 7 - 1
src/lib/components/common/Checkbox.svelte

@@ -4,6 +4,7 @@
 
 
 	export let state = 'unchecked';
 	export let state = 'unchecked';
 	export let indeterminate = false;
 	export let indeterminate = false;
+	export let disabled = false;
 
 
 	let _state = 'unchecked';
 	let _state = 'unchecked';
 
 
@@ -14,8 +15,12 @@
 	class=" outline -outline-offset-1 outline-[1.5px] outline-gray-200 dark:outline-gray-600 {state !==
 	class=" outline -outline-offset-1 outline-[1.5px] outline-gray-200 dark:outline-gray-600 {state !==
 	'unchecked'
 	'unchecked'
 		? 'bg-black outline-black '
 		? 'bg-black outline-black '
-		: 'hover:outline-gray-500 hover:bg-gray-50 dark:hover:bg-gray-800'} text-white transition-all rounded-sm inline-block w-3.5 h-3.5 relative"
+		: 'hover:outline-gray-500 hover:bg-gray-50 dark:hover:bg-gray-800'} text-white transition-all rounded-sm inline-block w-3.5 h-3.5 relative {disabled
+		? 'opacity-50 cursor-not-allowed'
+		: ''}"
 	on:click={() => {
 	on:click={() => {
+		if (disabled) return;
+
 		if (_state === 'unchecked') {
 		if (_state === 'unchecked') {
 			_state = 'checked';
 			_state = 'checked';
 			dispatch('change', _state);
 			dispatch('change', _state);
@@ -30,6 +35,7 @@
 		}
 		}
 	}}
 	}}
 	type="button"
 	type="button"
+	{disabled}
 >
 >
 	<div class="top-0 left-0 absolute w-full flex justify-center">
 	<div class="top-0 left-0 absolute w-full flex justify-center">
 		{#if _state === 'checked'}
 		{#if _state === 'checked'}

+ 38 - 4
src/lib/components/common/CodeEditor.svelte

@@ -33,15 +33,49 @@
 
 
 	const updateValue = () => {
 	const updateValue = () => {
 		if (_value !== value) {
 		if (_value !== value) {
+			const changes = findChanges(_value, value);
 			_value = value;
 			_value = value;
-			if (codeEditor) {
-				codeEditor.dispatch({
-					changes: [{ from: 0, to: codeEditor.state.doc.length, insert: _value }]
-				});
+
+			if (codeEditor && changes.length > 0) {
+				codeEditor.dispatch({ changes });
 			}
 			}
 		}
 		}
 	};
 	};
 
 
+	/**
+	 * Finds multiple diffs in two strings and generates minimal change edits.
+	 */
+	function findChanges(oldStr, newStr) {
+		let changes = [];
+		let oldIndex = 0,
+			newIndex = 0;
+
+		while (oldIndex < oldStr.length || newIndex < newStr.length) {
+			if (oldStr[oldIndex] !== newStr[newIndex]) {
+				let start = oldIndex;
+
+				// Identify the changed portion
+				while (oldIndex < oldStr.length && oldStr[oldIndex] !== newStr[newIndex]) {
+					oldIndex++;
+				}
+				while (newIndex < newStr.length && newStr[newIndex] !== oldStr[start]) {
+					newIndex++;
+				}
+
+				changes.push({
+					from: start,
+					to: oldIndex, // Replace the differing part
+					insert: newStr.substring(start, newIndex)
+				});
+			} else {
+				oldIndex++;
+				newIndex++;
+			}
+		}
+
+		return changes;
+	}
+
 	export let id = '';
 	export let id = '';
 	export let lang = '';
 	export let lang = '';
 
 

+ 7 - 3
src/lib/components/common/FileItem.svelte

@@ -82,7 +82,7 @@
 	{#if !small}
 	{#if !small}
 		<div class="flex flex-col justify-center -space-y-0.5 px-2.5 w-full">
 		<div class="flex flex-col justify-center -space-y-0.5 px-2.5 w-full">
 			<div class=" dark:text-gray-100 text-sm font-medium line-clamp-1 mb-1">
 			<div class=" dark:text-gray-100 text-sm font-medium line-clamp-1 mb-1">
-				{name}
+				{decodeURIComponent(name)}
 			</div>
 			</div>
 
 
 			<div class=" flex justify-between text-gray-500 text-xs line-clamp-1">
 			<div class=" flex justify-between text-gray-500 text-xs line-clamp-1">
@@ -101,7 +101,11 @@
 			</div>
 			</div>
 		</div>
 		</div>
 	{:else}
 	{:else}
-		<Tooltip content={name} className="flex flex-col w-full" placement="top-start">
+		<Tooltip
+			content={decodeURIComponent(name)}
+			className="flex flex-col w-full"
+			placement="top-start"
+		>
 			<div class="flex flex-col justify-center -space-y-0.5 px-2.5 w-full">
 			<div class="flex flex-col justify-center -space-y-0.5 px-2.5 w-full">
 				<div class=" dark:text-gray-100 text-sm flex justify-between items-center">
 				<div class=" dark:text-gray-100 text-sm flex justify-between items-center">
 					{#if loading}
 					{#if loading}
@@ -109,7 +113,7 @@
 							<Spinner className="size-4" />
 							<Spinner className="size-4" />
 						</div>
 						</div>
 					{/if}
 					{/if}
-					<div class="font-medium line-clamp-1 flex-1">{name}</div>
+					<div class="font-medium line-clamp-1 flex-1">{decodeURIComponent(name)}</div>
 					<div class="text-gray-500 text-xs capitalize shrink-0">{formatFileSize(size)}</div>
 					<div class="text-gray-500 text-xs capitalize shrink-0">{formatFileSize(size)}</div>
 				</div>
 				</div>
 			</div>
 			</div>

+ 6 - 2
src/lib/components/common/FileItemModal.svelte

@@ -87,8 +87,12 @@
 						<div>
 						<div>
 							<Tooltip
 							<Tooltip
 								content={enableFullContent
 								content={enableFullContent
-									? 'Inject the entire content as context for comprehensive processing, this is recommended for complex queries.'
-									: 'Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.'}
+									? $i18n.t(
+											'Inject the entire content as context for comprehensive processing, this is recommended for complex queries.'
+										)
+									: $i18n.t(
+											'Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.'
+										)}
 							>
 							>
 								<div class="flex items-center gap-1.5 text-xs">
 								<div class="flex items-center gap-1.5 text-xs">
 									{#if enableFullContent}
 									{#if enableFullContent}

+ 36 - 4
src/lib/components/common/ImagePreview.svelte

@@ -1,5 +1,6 @@
 <script lang="ts">
 <script lang="ts">
 	import { onDestroy, onMount } from 'svelte';
 	import { onDestroy, onMount } from 'svelte';
+	import panzoom, { type PanZoom } from 'panzoom';
 
 
 	export let show = false;
 	export let show = false;
 	export let src = '';
 	export let src = '';
@@ -9,6 +10,25 @@
 
 
 	let previewElement = null;
 	let previewElement = null;
 
 
+	let instance: PanZoom;
+
+	let sceneParentElement: HTMLElement;
+	let sceneElement: HTMLElement;
+
+	$: if (sceneElement) {
+		instance = panzoom(sceneElement, {
+			bounds: true,
+			boundsPadding: 0.1,
+
+			zoomSpeed: 0.065
+		});
+	}
+	const resetPanZoomViewport = () => {
+		instance.moveTo(0, 0);
+		instance.zoomAbs(0, 0, 1);
+		console.log(instance.getTransform());
+	};
+
 	const downloadImage = (url, filename, prefixName = '') => {
 	const downloadImage = (url, filename, prefixName = '') => {
 		fetch(url)
 		fetch(url)
 			.then((response) => response.blob())
 			.then((response) => response.blob())
@@ -62,11 +82,16 @@
 		bind:this={previewElement}
 		bind:this={previewElement}
 		class="modal fixed top-0 right-0 left-0 bottom-0 bg-black text-white w-full min-h-screen h-screen flex justify-center z-9999 overflow-hidden overscroll-contain"
 		class="modal fixed top-0 right-0 left-0 bottom-0 bg-black text-white w-full min-h-screen h-screen flex justify-center z-9999 overflow-hidden overscroll-contain"
 	>
 	>
-		<div class=" absolute left-0 w-full flex justify-between select-none">
+		<div class=" absolute left-0 w-full flex justify-between select-none z-10">
 			<div>
 			<div>
 				<button
 				<button
 					class=" p-5"
 					class=" p-5"
-					on:click={() => {
+					on:pointerdown={(e) => {
+						e.stopImmediatePropagation();
+						e.preventDefault();
+						show = false;
+					}}
+					on:click={(e) => {
 						show = false;
 						show = false;
 					}}
 					}}
 				>
 				>
@@ -86,7 +111,12 @@
 			<div>
 			<div>
 				<button
 				<button
 					class=" p-5"
 					class=" p-5"
-					on:click={() => {
+					on:pointerdown={(e) => {
+						e.stopImmediatePropagation();
+						e.preventDefault();
+						downloadImage(src, src.substring(src.lastIndexOf('/') + 1), alt);
+					}}
+					on:click={(e) => {
 						downloadImage(src, src.substring(src.lastIndexOf('/') + 1), alt);
 						downloadImage(src, src.substring(src.lastIndexOf('/') + 1), alt);
 					}}
 					}}
 				>
 				>
@@ -106,6 +136,8 @@
 				</button>
 				</button>
 			</div>
 			</div>
 		</div>
 		</div>
-		<img {src} {alt} class=" mx-auto h-full object-scale-down select-none" draggable="false" />
+		<div bind:this={sceneElement} class="flex h-full max-h-full justify-center items-center">
+			<img {src} {alt} class=" mx-auto h-full object-scale-down select-none" draggable="false" />
+		</div>
 	</div>
 	</div>
 {/if}
 {/if}

+ 12 - 1
src/lib/components/common/Valves.svelte

@@ -80,7 +80,7 @@
 									/>
 									/>
 								</div>
 								</div>
 							</div>
 							</div>
-						{:else}
+						{:else if (valvesSpec.properties[property]?.type ?? null) !== 'string'}
 							<input
 							<input
 								class="w-full rounded-lg py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-hidden border border-gray-100 dark:border-gray-850"
 								class="w-full rounded-lg py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-hidden border border-gray-100 dark:border-gray-850"
 								type="text"
 								type="text"
@@ -92,6 +92,17 @@
 									dispatch('change');
 									dispatch('change');
 								}}
 								}}
 							/>
 							/>
+						{:else}
+							<textarea
+								class="w-full rounded-lg py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-hidden border border-gray-100 dark:border-gray-850"
+								placeholder={valvesSpec.properties[property].title}
+								bind:value={valves[property]}
+								autocomplete="off"
+								required
+								on:change={() => {
+									dispatch('change');
+								}}
+							/>
 						{/if}
 						{/if}
 					</div>
 					</div>
 				</div>
 				</div>

+ 73 - 20
src/lib/components/layout/Navbar/Menu.svelte

@@ -6,6 +6,9 @@
 	import fileSaver from 'file-saver';
 	import fileSaver from 'file-saver';
 	const { saveAs } = fileSaver;
 	const { saveAs } = fileSaver;
 
 
+	import jsPDF from 'jspdf';
+	import html2canvas from 'html2canvas-pro';
+
 	import { downloadChatAsPDF } from '$lib/apis/utils';
 	import { downloadChatAsPDF } from '$lib/apis/utils';
 	import { copyToClipboard, createMessagesList } from '$lib/utils';
 	import { copyToClipboard, createMessagesList } from '$lib/utils';
 
 
@@ -14,7 +17,8 @@
 		showControls,
 		showControls,
 		showArtifacts,
 		showArtifacts,
 		mobile,
 		mobile,
-		temporaryChatEnabled
+		temporaryChatEnabled,
+		theme
 	} from '$lib/stores';
 	} from '$lib/stores';
 	import { flyAndScale } from '$lib/utils/transitions';
 	import { flyAndScale } from '$lib/utils/transitions';
 
 
@@ -58,27 +62,76 @@
 	};
 	};
 
 
 	const downloadPdf = async () => {
 	const downloadPdf = async () => {
-		const history = chat.chat.history;
-		const messages = createMessagesList(history, history.currentId);
-		const blob = await downloadChatAsPDF(localStorage.token, chat.chat.title, messages);
-
-		// Create a URL for the blob
-		const url = window.URL.createObjectURL(blob);
-
-		// Create a link element to trigger the download
-		const a = document.createElement('a');
-		a.href = url;
-		a.download = `chat-${chat.chat.title}.pdf`;
-
-		// Append the link to the body and click it programmatically
-		document.body.appendChild(a);
-		a.click();
+		const containerElement = document.getElementById('messages-container');
+
+		if (containerElement) {
+			try {
+				const isDarkMode = document.documentElement.classList.contains('dark');
+
+				console.log('isDarkMode', isDarkMode);
+
+				// Define a fixed virtual screen size
+				const virtualWidth = 800; // Fixed width (adjust as needed)
+				// Clone the container to avoid layout shifts
+				const clonedElement = containerElement.cloneNode(true);
+				clonedElement.classList.add('text-black');
+				clonedElement.classList.add('dark:text-white');
+				clonedElement.style.width = `${virtualWidth}px`; // Apply fixed width
+				clonedElement.style.height = 'auto'; // Allow content to expand
+
+				document.body.appendChild(clonedElement); // Temporarily add to DOM
+
+				// Render to canvas with predefined width
+				const canvas = await html2canvas(clonedElement, {
+					backgroundColor: isDarkMode ? '#000' : '#fff',
+					useCORS: true,
+					scale: 2, // Keep at 1x to avoid unexpected enlargements
+					width: virtualWidth, // Set fixed virtual screen width
+					windowWidth: virtualWidth // Ensure consistent rendering
+				});
+
+				document.body.removeChild(clonedElement); // Clean up temp element
+
+				const imgData = canvas.toDataURL('image/png');
+
+				// A4 page settings
+				const pdf = new jsPDF('p', 'mm', 'a4');
+				const imgWidth = 210; // A4 width in mm
+				const pageHeight = 297; // A4 height in mm
+
+				// Maintain aspect ratio
+				const imgHeight = (canvas.height * imgWidth) / canvas.width;
+				let heightLeft = imgHeight;
+				let position = 0;
+
+				// Set page background for dark mode
+				if (isDarkMode) {
+					pdf.setFillColor(0, 0, 0);
+					pdf.rect(0, 0, imgWidth, pageHeight, 'F'); // Apply black bg
+				}
+
+				pdf.addImage(imgData, 'PNG', 0, position, imgWidth, imgHeight);
+				heightLeft -= pageHeight;
+
+				// Handle additional pages
+				while (heightLeft > 0) {
+					position -= pageHeight;
+					pdf.addPage();
+
+					if (isDarkMode) {
+						pdf.setFillColor(0, 0, 0);
+						pdf.rect(0, 0, imgWidth, pageHeight, 'F');
+					}
 
 
-		// Remove the link from the body
-		document.body.removeChild(a);
+					pdf.addImage(imgData, 'PNG', 0, position, imgWidth, imgHeight);
+					heightLeft -= pageHeight;
+				}
 
 
-		// Revoke the URL to release memory
-		window.URL.revokeObjectURL(url);
+				pdf.save(`chat-${chat.chat.title}.pdf`);
+			} catch (error) {
+				console.error('Error generating PDF', error);
+			}
+		}
 	};
 	};
 
 
 	const downloadJSONExport = async () => {
 	const downloadJSONExport = async () => {

+ 8 - 0
src/lib/components/layout/Sidebar.svelte

@@ -77,6 +77,7 @@
 	let allChatsLoaded = false;
 	let allChatsLoaded = false;
 
 
 	let folders = {};
 	let folders = {};
+	let newFolderId = null;
 
 
 	const initFolders = async () => {
 	const initFolders = async () => {
 		const folderList = await getFolders(localStorage.token).catch((error) => {
 		const folderList = await getFolders(localStorage.token).catch((error) => {
@@ -90,6 +91,11 @@
 		for (const folder of folderList) {
 		for (const folder of folderList) {
 			// Ensure folder is added to folders with its data
 			// Ensure folder is added to folders with its data
 			folders[folder.id] = { ...(folders[folder.id] || {}), ...folder };
 			folders[folder.id] = { ...(folders[folder.id] || {}), ...folder };
+
+			if (newFolderId && folder.id === newFolderId) {
+				folders[folder.id].new = true;
+				newFolderId = null;
+			}
 		}
 		}
 
 
 		// Second pass: Tie child folders to their parents
 		// Second pass: Tie child folders to their parents
@@ -150,6 +156,7 @@
 		});
 		});
 
 
 		if (res) {
 		if (res) {
+			newFolderId = res.id;
 			await initFolders();
 			await initFolders();
 		}
 		}
 	};
 	};
@@ -611,6 +618,7 @@
 				bind:value={search}
 				bind:value={search}
 				on:input={searchDebounceHandler}
 				on:input={searchDebounceHandler}
 				placeholder={$i18n.t('Search')}
 				placeholder={$i18n.t('Search')}
+				showClearButton={true}
 			/>
 			/>
 		</div>
 		</div>
 
 

+ 14 - 0
src/lib/components/layout/Sidebar/ChatItem.svelte

@@ -198,6 +198,19 @@
 	});
 	});
 
 
 	let showDeleteConfirm = false;
 	let showDeleteConfirm = false;
+
+	const chatTitleInputKeydownHandler = (e) => {
+		if (e.key === 'Enter') {
+			e.preventDefault();
+			editChatTitle(id, chatTitle);
+			confirmEdit = false;
+			chatTitle = '';
+		} else if (e.key === 'Escape') {
+			e.preventDefault();
+			confirmEdit = false;
+			chatTitle = '';
+		}
+	};
 </script>
 </script>
 
 
 <ShareChatModal bind:show={showShareChatModal} chatId={id} />
 <ShareChatModal bind:show={showShareChatModal} chatId={id} />
@@ -246,6 +259,7 @@
 				bind:value={chatTitle}
 				bind:value={chatTitle}
 				id="chat-title-input-{id}"
 				id="chat-title-input-{id}"
 				class=" bg-transparent w-full outline-hidden mr-10"
 				class=" bg-transparent w-full outline-hidden mr-10"
+				on:keydown={chatTitleInputKeydownHandler}
 			/>
 			/>
 		</div>
 		</div>
 	{:else}
 	{:else}

+ 73 - 25
src/lib/components/layout/Sidebar/ChatMenu.svelte

@@ -6,6 +6,9 @@
 	import fileSaver from 'file-saver';
 	import fileSaver from 'file-saver';
 	const { saveAs } = fileSaver;
 	const { saveAs } = fileSaver;
 
 
+	import jsPDF from 'jspdf';
+	import html2canvas from 'html2canvas-pro';
+
 	const dispatch = createEventDispatcher();
 	const dispatch = createEventDispatcher();
 
 
 	import Dropdown from '$lib/components/common/Dropdown.svelte';
 	import Dropdown from '$lib/components/common/Dropdown.svelte';
@@ -23,7 +26,7 @@
 		getChatPinnedStatusById,
 		getChatPinnedStatusById,
 		toggleChatPinnedStatusById
 		toggleChatPinnedStatusById
 	} from '$lib/apis/chats';
 	} from '$lib/apis/chats';
-	import { chats } from '$lib/stores';
+	import { chats, theme } from '$lib/stores';
 	import { createMessagesList } from '$lib/utils';
 	import { createMessagesList } from '$lib/utils';
 	import { downloadChatAsPDF } from '$lib/apis/utils';
 	import { downloadChatAsPDF } from '$lib/apis/utils';
 	import Download from '$lib/components/icons/Download.svelte';
 	import Download from '$lib/components/icons/Download.svelte';
@@ -77,31 +80,76 @@
 
 
 	const downloadPdf = async () => {
 	const downloadPdf = async () => {
 		const chat = await getChatById(localStorage.token, chatId);
 		const chat = await getChatById(localStorage.token, chatId);
-		if (!chat) {
-			return;
-		}
-
-		const history = chat.chat.history;
-		const messages = createMessagesList(history, history.currentId);
-		const blob = await downloadChatAsPDF(localStorage.token, chat.chat.title, messages);
-
-		// Create a URL for the blob
-		const url = window.URL.createObjectURL(blob);
-
-		// Create a link element to trigger the download
-		const a = document.createElement('a');
-		a.href = url;
-		a.download = `chat-${chat.chat.title}.pdf`;
 
 
-		// Append the link to the body and click it programmatically
-		document.body.appendChild(a);
-		a.click();
-
-		// Remove the link from the body
-		document.body.removeChild(a);
-
-		// Revoke the URL to release memory
-		window.URL.revokeObjectURL(url);
+		const containerElement = document.getElementById('messages-container');
+
+		if (containerElement) {
+			try {
+				const isDarkMode = $theme.includes('dark'); // Check theme mode
+
+				// Define a fixed virtual screen size
+				const virtualWidth = 1024; // Fixed width (adjust as needed)
+				const virtualHeight = 1400; // Fixed height (adjust as needed)
+
+				// Clone the container to avoid layout shifts
+				const clonedElement = containerElement.cloneNode(true);
+				clonedElement.style.width = `${virtualWidth}px`; // Apply fixed width
+				clonedElement.style.height = 'auto'; // Allow content to expand
+
+				document.body.appendChild(clonedElement); // Temporarily add to DOM
+
+				// Render to canvas with predefined width
+				const canvas = await html2canvas(clonedElement, {
+					backgroundColor: isDarkMode ? '#000' : '#fff',
+					useCORS: true,
+					scale: 2, // Keep at 1x to avoid unexpected enlargements
+					width: virtualWidth, // Set fixed virtual screen width
+					windowWidth: virtualWidth, // Ensure consistent rendering
+					windowHeight: virtualHeight
+				});
+
+				document.body.removeChild(clonedElement); // Clean up temp element
+
+				const imgData = canvas.toDataURL('image/png');
+
+				// A4 page settings
+				const pdf = new jsPDF('p', 'mm', 'a4');
+				const imgWidth = 210; // A4 width in mm
+				const pageHeight = 297; // A4 height in mm
+
+				// Maintain aspect ratio
+				const imgHeight = (canvas.height * imgWidth) / canvas.width;
+				let heightLeft = imgHeight;
+				let position = 0;
+
+				// Set page background for dark mode
+				if (isDarkMode) {
+					pdf.setFillColor(0, 0, 0);
+					pdf.rect(0, 0, imgWidth, pageHeight, 'F'); // Apply black bg
+				}
+
+				pdf.addImage(imgData, 'PNG', 0, position, imgWidth, imgHeight);
+				heightLeft -= pageHeight;
+
+				// Handle additional pages
+				while (heightLeft > 0) {
+					position -= pageHeight;
+					pdf.addPage();
+
+					if (isDarkMode) {
+						pdf.setFillColor(0, 0, 0);
+						pdf.rect(0, 0, imgWidth, pageHeight, 'F');
+					}
+
+					pdf.addImage(imgData, 'PNG', 0, position, imgWidth, imgHeight);
+					heightLeft -= pageHeight;
+				}
+
+				pdf.save(`chat-${chat.chat.title}.pdf`);
+			} catch (error) {
+				console.error('Error generating PDF', error);
+			}
+		}
 	};
 	};
 
 
 	const downloadJSONExport = async () => {
 	const downloadJSONExport = async () => {

+ 20 - 7
src/lib/components/layout/Sidebar/RecursiveFolder.svelte

@@ -201,7 +201,7 @@
 		dragged = false;
 		dragged = false;
 	};
 	};
 
 
-	onMount(() => {
+	onMount(async () => {
 		open = folders[folderId].is_expanded;
 		open = folders[folderId].is_expanded;
 		if (folderElement) {
 		if (folderElement) {
 			folderElement.addEventListener('dragover', onDragOver);
 			folderElement.addEventListener('dragover', onDragOver);
@@ -215,6 +215,13 @@
 			// Event listener for when dragging ends
 			// Event listener for when dragging ends
 			folderElement.addEventListener('dragend', onDragEnd);
 			folderElement.addEventListener('dragend', onDragEnd);
 		}
 		}
+
+		if (folders[folderId]?.new) {
+			delete folders[folderId].new;
+
+			await tick();
+			editHandler();
+		}
 	});
 	});
 
 
 	onDestroy(() => {
 	onDestroy(() => {
@@ -297,15 +304,15 @@
 		console.log('Edit');
 		console.log('Edit');
 		await tick();
 		await tick();
 		name = folders[folderId].name;
 		name = folders[folderId].name;
-		edit = true;
 
 
+		edit = true;
 		await tick();
 		await tick();
 
 
-		// focus on the input
-		setTimeout(() => {
-			const input = document.getElementById(`folder-${folderId}-input`);
+		const input = document.getElementById(`folder-${folderId}-input`);
+
+		if (input) {
 			input.focus();
 			input.focus();
-		}, 100);
+		}
 	};
 	};
 
 
 	const exportHandler = async () => {
 	const exportHandler = async () => {
@@ -394,6 +401,9 @@
 							id="folder-{folderId}-input"
 							id="folder-{folderId}-input"
 							type="text"
 							type="text"
 							bind:value={name}
 							bind:value={name}
+							on:focus={(e) => {
+								e.target.select();
+							}}
 							on:blur={() => {
 							on:blur={() => {
 								nameUpdateHandler();
 								nameUpdateHandler();
 								edit = false;
 								edit = false;
@@ -427,7 +437,10 @@
 				>
 				>
 					<FolderMenu
 					<FolderMenu
 						on:rename={() => {
 						on:rename={() => {
-							editHandler();
+							// Requires a timeout to prevent the click event from closing the dropdown
+							setTimeout(() => {
+								editHandler();
+							}, 200);
 						}}
 						}}
 						on:delete={() => {
 						on:delete={() => {
 							showDeleteConfirm = true;
 							showDeleteConfirm = true;

+ 19 - 1
src/lib/components/layout/Sidebar/SearchInput.svelte

@@ -3,12 +3,14 @@
 	import { tags } from '$lib/stores';
 	import { tags } from '$lib/stores';
 	import { getContext, createEventDispatcher, onMount, onDestroy, tick } from 'svelte';
 	import { getContext, createEventDispatcher, onMount, onDestroy, tick } from 'svelte';
 	import { fade } from 'svelte/transition';
 	import { fade } from 'svelte/transition';
+	import XMark from '$lib/components/icons/XMark.svelte';
 
 
 	const dispatch = createEventDispatcher();
 	const dispatch = createEventDispatcher();
 	const i18n = getContext('i18n');
 	const i18n = getContext('i18n');
 
 
 	export let placeholder = '';
 	export let placeholder = '';
 	export let value = '';
 	export let value = '';
+	export let showClearButton = false;
 
 
 	let selectedIdx = 0;
 	let selectedIdx = 0;
 
 
@@ -59,6 +61,11 @@
 		loading = false;
 		loading = false;
 	};
 	};
 
 
+	const clearSearchInput = () => {
+		value = '';
+		dispatch('input');
+	};
+
 	const documentClickHandler = (e) => {
 	const documentClickHandler = (e) => {
 		const searchContainer = document.getElementById('search-container');
 		const searchContainer = document.getElementById('search-container');
 		const chatSearch = document.getElementById('chat-search');
 		const chatSearch = document.getElementById('chat-search');
@@ -98,7 +105,7 @@
 		</div>
 		</div>
 
 
 		<input
 		<input
-			class="w-full rounded-r-xl py-1.5 pl-2.5 pr-4 text-sm bg-transparent dark:text-gray-300 outline-hidden"
+			class="w-full rounded-r-xl py-1.5 pl-2.5 text-sm bg-transparent dark:text-gray-300 outline-hidden"
 			placeholder={placeholder ? placeholder : $i18n.t('Search')}
 			placeholder={placeholder ? placeholder : $i18n.t('Search')}
 			bind:value
 			bind:value
 			on:input={() => {
 			on:input={() => {
@@ -140,6 +147,17 @@
 				}
 				}
 			}}
 			}}
 		/>
 		/>
+
+		{#if showClearButton && value}
+			<div class="self-center pr-2 pl-1.5 translate-y-[0.5px] rounded-l-xl bg-transparent">
+				<button
+					class="p-0.5 rounded-full hover:bg-gray-100 dark:hover:bg-gray-900 transition"
+					on:click={clearSearchInput}
+				>
+					<XMark className="size-3" strokeWidth="2" />
+				</button>
+			</div>
+		{/if}
 	</div>
 	</div>
 
 
 	{#if focused && (filteredOptions.length > 0 || filteredTags.length > 0)}
 	{#if focused && (filteredOptions.length > 0 || filteredTags.length > 0)}

+ 18 - 2
src/lib/components/workspace/Knowledge/KnowledgeBase.svelte

@@ -9,7 +9,7 @@
 
 
 	import { goto } from '$app/navigation';
 	import { goto } from '$app/navigation';
 	import { page } from '$app/stores';
 	import { page } from '$app/stores';
-	import { mobile, showSidebar, knowledge as _knowledge } from '$lib/stores';
+	import { mobile, showSidebar, knowledge as _knowledge, config } from '$lib/stores';
 
 
 	import { updateFileDataContentById, uploadFile, deleteFileById } from '$lib/apis/files';
 	import { updateFileDataContentById, uploadFile, deleteFileById } from '$lib/apis/files';
 	import {
 	import {
@@ -131,6 +131,22 @@
 			return null;
 			return null;
 		}
 		}
 
 
+		if (
+			($config?.file?.max_size ?? null) !== null &&
+			file.size > ($config?.file?.max_size ?? 0) * 1024 * 1024
+		) {
+			console.log('File exceeds max size limit:', {
+				fileSize: file.size,
+				maxSize: ($config?.file?.max_size ?? 0) * 1024 * 1024
+			});
+			toast.error(
+				$i18n.t(`File size should not exceed {{maxSize}} MB.`, {
+					maxSize: $config?.file?.max_size
+				})
+			);
+			return;
+		}
+
 		knowledge.files = [...(knowledge.files ?? []), fileItem];
 		knowledge.files = [...(knowledge.files ?? []), fileItem];
 
 
 		try {
 		try {
@@ -681,7 +697,7 @@
 										href={selectedFile.id ? `/api/v1/files/${selectedFile.id}/content` : '#'}
 										href={selectedFile.id ? `/api/v1/files/${selectedFile.id}/content` : '#'}
 										target="_blank"
 										target="_blank"
 									>
 									>
-										{selectedFile?.meta?.name}
+										{decodeURIComponent(selectedFile?.meta?.name)}
 									</a>
 									</a>
 								</div>
 								</div>
 
 

+ 10 - 3
src/lib/components/workspace/Models/FiltersSelector.svelte

@@ -39,10 +39,17 @@
 					<div class=" flex items-center gap-2 mr-3">
 					<div class=" flex items-center gap-2 mr-3">
 						<div class="self-center flex items-center">
 						<div class="self-center flex items-center">
 							<Checkbox
 							<Checkbox
-								state={_filters[filter].selected ? 'checked' : 'unchecked'}
+								state={_filters[filter].is_global
+									? 'checked'
+									: _filters[filter].selected
+										? 'checked'
+										: 'unchecked'}
+								disabled={_filters[filter].is_global}
 								on:change={(e) => {
 								on:change={(e) => {
-									_filters[filter].selected = e.detail === 'checked';
-									selectedFilterIds = Object.keys(_filters).filter((t) => _filters[t].selected);
+									if (!_filters[filter].is_global) {
+										_filters[filter].selected = e.detail === 'checked';
+										selectedFilterIds = Object.keys(_filters).filter((t) => _filters[t].selected);
+									}
 								}}
 								}}
 							/>
 							/>
 						</div>
 						</div>

+ 2 - 2
src/lib/components/workspace/common/AccessControl.svelte

@@ -113,8 +113,8 @@
 						}
 						}
 					}}
 					}}
 				>
 				>
-					<option class=" text-gray-700" value="private" selected>Private</option>
-					<option class=" text-gray-700" value="public" selected>Public</option>
+					<option class=" text-gray-700" value="private" selected>{$i18n.t('Private')}</option>
+					<option class=" text-gray-700" value="public" selected>{$i18n.t('Public')}</option>
 				</select>
 				</select>
 
 
 				<div class=" text-xs text-gray-400 font-medium">
 				<div class=" text-xs text-gray-400 font-medium">

+ 15 - 1
src/lib/i18n/locales/ar-BH/translation.json

@@ -4,6 +4,7 @@
 	"(e.g. `sh webui.sh --api --api-auth username_password`)": "",
 	"(e.g. `sh webui.sh --api --api-auth username_password`)": "",
 	"(e.g. `sh webui.sh --api`)": "( `sh webui.sh --api`مثال)",
 	"(e.g. `sh webui.sh --api`)": "( `sh webui.sh --api`مثال)",
 	"(latest)": "(الأخير)",
 	"(latest)": "(الأخير)",
+	"(Ollama)": "",
 	"{{ models }}": "{{ نماذج }}",
 	"{{ models }}": "{{ نماذج }}",
 	"{{COUNT}} hidden lines": "",
 	"{{COUNT}} hidden lines": "",
 	"{{COUNT}} Replies": "",
 	"{{COUNT}} Replies": "",
@@ -68,6 +69,8 @@
 	"Already have an account?": "هل تملك حساب ؟",
 	"Already have an account?": "هل تملك حساب ؟",
 	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "",
 	"Always": "",
+	"Always Collapse Code Blocks": "",
+	"Always Expand Details": "",
 	"Amazing": "",
 	"Amazing": "",
 	"an assistant": "مساعد",
 	"an assistant": "مساعد",
 	"Analyzed": "",
 	"Analyzed": "",
@@ -270,6 +273,7 @@
 	"Default Prompt Suggestions": "الإفتراضي Prompt الاقتراحات",
 	"Default Prompt Suggestions": "الإفتراضي Prompt الاقتراحات",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to ALL": "",
 	"Default to ALL": "",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "الإفتراضي صلاحيات المستخدم",
 	"Default User Role": "الإفتراضي صلاحيات المستخدم",
 	"Delete": "حذف",
 	"Delete": "حذف",
 	"Delete a model": "حذف الموديل",
 	"Delete a model": "حذف الموديل",
@@ -292,6 +296,7 @@
 	"Describe your knowledge base and objectives": "",
 	"Describe your knowledge base and objectives": "",
 	"Description": "وصف",
 	"Description": "وصف",
 	"Didn't fully follow instructions": "لم أتبع التعليمات بشكل كامل",
 	"Didn't fully follow instructions": "لم أتبع التعليمات بشكل كامل",
+	"Direct": "",
 	"Direct Connections": "",
 	"Direct Connections": "",
 	"Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "",
 	"Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "",
 	"Direct Connections settings updated": "",
 	"Direct Connections settings updated": "",
@@ -314,6 +319,8 @@
 	"Dive into knowledge": "",
 	"Dive into knowledge": "",
 	"Do not install functions from sources you do not fully trust.": "",
 	"Do not install functions from sources you do not fully trust.": "",
 	"Do not install tools from sources you do not fully trust.": "",
 	"Do not install tools from sources you do not fully trust.": "",
+	"Docling": "",
+	"Docling Server URL required.": "",
 	"Document": "المستند",
 	"Document": "المستند",
 	"Document Intelligence": "",
 	"Document Intelligence": "",
 	"Document Intelligence endpoint and key required.": "",
 	"Document Intelligence endpoint and key required.": "",
@@ -384,6 +391,7 @@
 	"Enter Chunk Size": "أدخل Chunk الحجم",
 	"Enter Chunk Size": "أدخل Chunk الحجم",
 	"Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "",
 	"Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "",
 	"Enter description": "",
 	"Enter description": "",
+	"Enter Docling Server URL": "",
 	"Enter Document Intelligence Endpoint": "",
 	"Enter Document Intelligence Endpoint": "",
 	"Enter Document Intelligence Key": "",
 	"Enter Document Intelligence Key": "",
 	"Enter domains separated by commas (e.g., example.com,site.org)": "",
 	"Enter domains separated by commas (e.g., example.com,site.org)": "",
@@ -471,6 +479,7 @@
 	"Export Prompts": "مطالبات التصدير",
 	"Export Prompts": "مطالبات التصدير",
 	"Export to CSV": "",
 	"Export to CSV": "",
 	"Export Tools": "",
 	"Export Tools": "",
+	"External": "",
 	"External Models": "",
 	"External Models": "",
 	"Failed to add file.": "",
 	"Failed to add file.": "",
 	"Failed to create API Key.": "فشل في إنشاء مفتاح API.",
 	"Failed to create API Key.": "فشل في إنشاء مفتاح API.",
@@ -583,6 +592,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "قم بتضمين علامة `-api` عند تشغيل Stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "قم بتضمين علامة `-api` عند تشغيل Stable-diffusion-webui",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "معلومات",
 	"Info": "معلومات",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "إدخال الأوامر",
 	"Input commands": "إدخال الأوامر",
 	"Install from Github URL": "التثبيت من عنوان URL لجيثب",
 	"Install from Github URL": "التثبيت من عنوان URL لجيثب",
 	"Instant Auto-Send After Voice Transcription": "",
 	"Instant Auto-Send After Voice Transcription": "",
@@ -806,6 +816,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "أخر 30 يوم",
 	"Previous 30 days": "أخر 30 يوم",
 	"Previous 7 days": "أخر 7 أيام",
 	"Previous 7 days": "أخر 7 أيام",
+	"Private": "",
 	"Profile Image": "صورة الملف الشخصي",
 	"Profile Image": "صورة الملف الشخصي",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "موجه (على سبيل المثال: أخبرني بحقيقة ممتعة عن الإمبراطورية الرومانية)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "موجه (على سبيل المثال: أخبرني بحقيقة ممتعة عن الإمبراطورية الرومانية)",
@@ -815,6 +826,7 @@
 	"Prompt updated successfully": "",
 	"Prompt updated successfully": "",
 	"Prompts": "مطالبات",
 	"Prompts": "مطالبات",
 	"Prompts Access": "",
 	"Prompts Access": "",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Ollama.com \"{{searchValue}}\" أسحب من ",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Ollama.com \"{{searchValue}}\" أسحب من ",
 	"Pull a model from Ollama.com": "Ollama.com سحب الموديل من ",
 	"Pull a model from Ollama.com": "Ollama.com سحب الموديل من ",
 	"Query Generation Prompt": "",
 	"Query Generation Prompt": "",
@@ -979,6 +991,7 @@
 	"System": "النظام",
 	"System": "النظام",
 	"System Instructions": "",
 	"System Instructions": "",
 	"System Prompt": "محادثة النظام",
 	"System Prompt": "محادثة النظام",
+	"Tags": "",
 	"Tags Generation": "",
 	"Tags Generation": "",
 	"Tags Generation Prompt": "",
 	"Tags Generation Prompt": "",
 	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
@@ -1009,6 +1022,7 @@
 	"Theme": "الثيم",
 	"Theme": "الثيم",
 	"Thinking...": "",
 	"Thinking...": "",
 	"This action cannot be undone. Do you wish to continue?": "",
 	"This action cannot be undone. Do you wish to continue?": "",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "وهذا يضمن حفظ محادثاتك القيمة بشكل آمن في قاعدة بياناتك الخلفية. شكرًا لك!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "وهذا يضمن حفظ محادثاتك القيمة بشكل آمن في قاعدة بياناتك الخلفية. شكرًا لك!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1132,7 @@
 	"Valves updated successfully": "",
 	"Valves updated successfully": "",
 	"variable": "المتغير",
 	"variable": "المتغير",
 	"variable to have them replaced with clipboard content.": "متغير لاستبدالها بمحتوى الحافظة.",
 	"variable to have them replaced with clipboard content.": "متغير لاستبدالها بمحتوى الحافظة.",
+	"Verify Connection": "",
 	"Version": "إصدار",
 	"Version": "إصدار",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"View Replies": "",
 	"View Replies": "",
@@ -1163,7 +1178,6 @@
 	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "",
 	"You cannot upload an empty file.": "",
 	"You cannot upload an empty file.": "",
-	"You do not have permission to access this feature.": "",
 	"You do not have permission to upload files": "",
 	"You do not have permission to upload files": "",
 	"You do not have permission to upload files.": "",
 	"You do not have permission to upload files.": "",
 	"You have no archived conversations.": "لا تملك محادثات محفوظه",
 	"You have no archived conversations.": "لا تملك محادثات محفوظه",

+ 15 - 1
src/lib/i18n/locales/bg-BG/translation.json

@@ -4,6 +4,7 @@
 	"(e.g. `sh webui.sh --api --api-auth username_password`)": "(напр. `sh webui.sh --api --api-auth username_password`)",
 	"(e.g. `sh webui.sh --api --api-auth username_password`)": "(напр. `sh webui.sh --api --api-auth username_password`)",
 	"(e.g. `sh webui.sh --api`)": "(напр. `sh webui.sh --api`)",
 	"(e.g. `sh webui.sh --api`)": "(напр. `sh webui.sh --api`)",
 	"(latest)": "(последна)",
 	"(latest)": "(последна)",
+	"(Ollama)": "",
 	"{{ models }}": "{{ models }}",
 	"{{ models }}": "{{ models }}",
 	"{{COUNT}} hidden lines": "",
 	"{{COUNT}} hidden lines": "",
 	"{{COUNT}} Replies": "{{COUNT}} Отговори",
 	"{{COUNT}} Replies": "{{COUNT}} Отговори",
@@ -68,6 +69,8 @@
 	"Already have an account?": "Вече имате акаунт?",
 	"Already have an account?": "Вече имате акаунт?",
 	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "Винаги",
 	"Always": "Винаги",
+	"Always Collapse Code Blocks": "",
+	"Always Expand Details": "",
 	"Amazing": "Невероятно",
 	"Amazing": "Невероятно",
 	"an assistant": "асистент",
 	"an assistant": "асистент",
 	"Analyzed": "Анализирано",
 	"Analyzed": "Анализирано",
@@ -270,6 +273,7 @@
 	"Default Prompt Suggestions": "Промпт Предложения по подразбиране",
 	"Default Prompt Suggestions": "Промпт Предложения по подразбиране",
 	"Default to 389 or 636 if TLS is enabled": "По подразбиране 389 или 636, ако TLS е активиран",
 	"Default to 389 or 636 if TLS is enabled": "По подразбиране 389 или 636, ако TLS е активиран",
 	"Default to ALL": "По подразбиране за ВСИЧКИ",
 	"Default to ALL": "По подразбиране за ВСИЧКИ",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "Роля на потребителя по подразбиране",
 	"Default User Role": "Роля на потребителя по подразбиране",
 	"Delete": "Изтриване",
 	"Delete": "Изтриване",
 	"Delete a model": "Изтриване на модел",
 	"Delete a model": "Изтриване на модел",
@@ -292,6 +296,7 @@
 	"Describe your knowledge base and objectives": "Опишете вашата база от знания и цели",
 	"Describe your knowledge base and objectives": "Опишете вашата база от знания и цели",
 	"Description": "Описание",
 	"Description": "Описание",
 	"Didn't fully follow instructions": "Не следва напълно инструкциите",
 	"Didn't fully follow instructions": "Не следва напълно инструкциите",
+	"Direct": "",
 	"Direct Connections": "Директни връзки",
 	"Direct Connections": "Директни връзки",
 	"Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "Директните връзки позволяват на потребителите да се свързват със собствени OpenAI съвместими API крайни точки.",
 	"Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "Директните връзки позволяват на потребителите да се свързват със собствени OpenAI съвместими API крайни точки.",
 	"Direct Connections settings updated": "Настройките за директни връзки са актуализирани",
 	"Direct Connections settings updated": "Настройките за директни връзки са актуализирани",
@@ -314,6 +319,8 @@
 	"Dive into knowledge": "Потопете се в знанието",
 	"Dive into knowledge": "Потопете се в знанието",
 	"Do not install functions from sources you do not fully trust.": "Не инсталирайте функции от източници, на които не се доверявате напълно.",
 	"Do not install functions from sources you do not fully trust.": "Не инсталирайте функции от източници, на които не се доверявате напълно.",
 	"Do not install tools from sources you do not fully trust.": "Не инсталирайте инструменти от източници, на които не се доверявате напълно.",
 	"Do not install tools from sources you do not fully trust.": "Не инсталирайте инструменти от източници, на които не се доверявате напълно.",
+	"Docling": "",
+	"Docling Server URL required.": "",
 	"Document": "Документ",
 	"Document": "Документ",
 	"Document Intelligence": "",
 	"Document Intelligence": "",
 	"Document Intelligence endpoint and key required.": "",
 	"Document Intelligence endpoint and key required.": "",
@@ -384,6 +391,7 @@
 	"Enter Chunk Size": "Въведете размер на чънк",
 	"Enter Chunk Size": "Въведете размер на чънк",
 	"Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "",
 	"Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "",
 	"Enter description": "Въведете описание",
 	"Enter description": "Въведете описание",
+	"Enter Docling Server URL": "",
 	"Enter Document Intelligence Endpoint": "",
 	"Enter Document Intelligence Endpoint": "",
 	"Enter Document Intelligence Key": "",
 	"Enter Document Intelligence Key": "",
 	"Enter domains separated by commas (e.g., example.com,site.org)": "Въведете домейни, разделени със запетаи (напр. example.com,site.org)",
 	"Enter domains separated by commas (e.g., example.com,site.org)": "Въведете домейни, разделени със запетаи (напр. example.com,site.org)",
@@ -471,6 +479,7 @@
 	"Export Prompts": "Експортване на промптове",
 	"Export Prompts": "Експортване на промптове",
 	"Export to CSV": "Експортиране в CSV",
 	"Export to CSV": "Експортиране в CSV",
 	"Export Tools": "Експортиране на инструменти",
 	"Export Tools": "Експортиране на инструменти",
+	"External": "",
 	"External Models": "Външни модели",
 	"External Models": "Външни модели",
 	"Failed to add file.": "Неуспешно добавяне на файл.",
 	"Failed to add file.": "Неуспешно добавяне на файл.",
 	"Failed to create API Key.": "Неуспешно създаване на API ключ.",
 	"Failed to create API Key.": "Неуспешно създаване на API ключ.",
@@ -583,6 +592,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "",
 	"Include `--api` flag when running stable-diffusion-webui": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Информация",
 	"Info": "Информация",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "Въведете команди",
 	"Input commands": "Въведете команди",
 	"Install from Github URL": "Инсталиране от URL адреса на Github",
 	"Install from Github URL": "Инсталиране от URL адреса на Github",
 	"Instant Auto-Send After Voice Transcription": "Незабавно автоматично изпращане след гласова транскрипция",
 	"Instant Auto-Send After Voice Transcription": "Незабавно автоматично изпращане след гласова транскрипция",
@@ -806,6 +816,7 @@
 	"Presence Penalty": "Наказание за присъствие",
 	"Presence Penalty": "Наказание за присъствие",
 	"Previous 30 days": "Предишните 30 дни",
 	"Previous 30 days": "Предишните 30 дни",
 	"Previous 7 days": "Предишните 7 дни",
 	"Previous 7 days": "Предишните 7 дни",
+	"Private": "",
 	"Profile Image": "Профилна снимка",
 	"Profile Image": "Профилна снимка",
 	"Prompt": "Промпт",
 	"Prompt": "Промпт",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Промпт (напр. Кажи ми забавен факт за Римската империя)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Промпт (напр. Кажи ми забавен факт за Римската империя)",
@@ -815,6 +826,7 @@
 	"Prompt updated successfully": "Промптът е актуализиран успешно",
 	"Prompt updated successfully": "Промптът е актуализиран успешно",
 	"Prompts": "Промптове",
 	"Prompts": "Промптове",
 	"Prompts Access": "Достъп до промптове",
 	"Prompts Access": "Достъп до промптове",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Извади \"{{searchValue}}\" от Ollama.com",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Извади \"{{searchValue}}\" от Ollama.com",
 	"Pull a model from Ollama.com": "Издърпайте модел от Ollama.com",
 	"Pull a model from Ollama.com": "Издърпайте модел от Ollama.com",
 	"Query Generation Prompt": "Промпт за генериране на запитвания",
 	"Query Generation Prompt": "Промпт за генериране на запитвания",
@@ -979,6 +991,7 @@
 	"System": "Система",
 	"System": "Система",
 	"System Instructions": "Системни инструкции",
 	"System Instructions": "Системни инструкции",
 	"System Prompt": "Системен Промпт",
 	"System Prompt": "Системен Промпт",
+	"Tags": "",
 	"Tags Generation": "Генериране на тагове",
 	"Tags Generation": "Генериране на тагове",
 	"Tags Generation Prompt": "Промпт за генериране на тагове",
 	"Tags Generation Prompt": "Промпт за генериране на тагове",
 	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
@@ -1009,6 +1022,7 @@
 	"Theme": "Тема",
 	"Theme": "Тема",
 	"Thinking...": "Мисля...",
 	"Thinking...": "Мисля...",
 	"This action cannot be undone. Do you wish to continue?": "Това действие не може да бъде отменено. Желаете ли да продължите?",
 	"This action cannot be undone. Do you wish to continue?": "Това действие не може да бъде отменено. Желаете ли да продължите?",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Това гарантира, че ценните ви разговори се запазват сигурно във вашата бекенд база данни. Благодарим ви!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Това гарантира, че ценните ви разговори се запазват сигурно във вашата бекенд база данни. Благодарим ви!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Това е експериментална функция, може да не работи според очакванията и подлежи на промяна по всяко време.",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Това е експериментална функция, може да не работи според очакванията и подлежи на промяна по всяко време.",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1132,7 @@
 	"Valves updated successfully": "Клапаните са актуализирани успешно",
 	"Valves updated successfully": "Клапаните са актуализирани успешно",
 	"variable": "променлива",
 	"variable": "променлива",
 	"variable to have them replaced with clipboard content.": "променлива, за да бъдат заменени със съдържанието от клипборда.",
 	"variable to have them replaced with clipboard content.": "променлива, за да бъдат заменени със съдържанието от клипборда.",
+	"Verify Connection": "",
 	"Version": "Версия",
 	"Version": "Версия",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Версия {{selectedVersion}} от {{totalVersions}}",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Версия {{selectedVersion}} от {{totalVersions}}",
 	"View Replies": "Преглед на отговорите",
 	"View Replies": "Преглед на отговорите",
@@ -1163,7 +1178,6 @@
 	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Можете да чатите с максимум {{maxCount}} файл(а) наведнъж.",
 	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Можете да чатите с максимум {{maxCount}} файл(а) наведнъж.",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Можете да персонализирате взаимодействията си с LLM-и, като добавите спомени чрез бутона 'Управление' по-долу, правейки ги по-полезни и съобразени с вас.",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Можете да персонализирате взаимодействията си с LLM-и, като добавите спомени чрез бутона 'Управление' по-долу, правейки ги по-полезни и съобразени с вас.",
 	"You cannot upload an empty file.": "Не можете да качите празен файл.",
 	"You cannot upload an empty file.": "Не можете да качите празен файл.",
-	"You do not have permission to access this feature.": "Нямате разрешение за достъп до тази функция.",
 	"You do not have permission to upload files": "Нямате разрешение да качвате файлове",
 	"You do not have permission to upload files": "Нямате разрешение да качвате файлове",
 	"You do not have permission to upload files.": "Нямате разрешение да качвате файлове.",
 	"You do not have permission to upload files.": "Нямате разрешение да качвате файлове.",
 	"You have no archived conversations.": "Нямате архивирани разговори.",
 	"You have no archived conversations.": "Нямате архивирани разговори.",

+ 15 - 1
src/lib/i18n/locales/bn-BD/translation.json

@@ -4,6 +4,7 @@
 	"(e.g. `sh webui.sh --api --api-auth username_password`)": "",
 	"(e.g. `sh webui.sh --api --api-auth username_password`)": "",
 	"(e.g. `sh webui.sh --api`)": "(যেমন `sh webui.sh --api`)",
 	"(e.g. `sh webui.sh --api`)": "(যেমন `sh webui.sh --api`)",
 	"(latest)": "(সর্বশেষ)",
 	"(latest)": "(সর্বশেষ)",
+	"(Ollama)": "",
 	"{{ models }}": "{{ মডেল}}",
 	"{{ models }}": "{{ মডেল}}",
 	"{{COUNT}} hidden lines": "",
 	"{{COUNT}} hidden lines": "",
 	"{{COUNT}} Replies": "",
 	"{{COUNT}} Replies": "",
@@ -68,6 +69,8 @@
 	"Already have an account?": "আগে থেকেই একাউন্ট আছে?",
 	"Already have an account?": "আগে থেকেই একাউন্ট আছে?",
 	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "",
 	"Always": "",
+	"Always Collapse Code Blocks": "",
+	"Always Expand Details": "",
 	"Amazing": "",
 	"Amazing": "",
 	"an assistant": "একটা এসিস্ট্যান্ট",
 	"an assistant": "একটা এসিস্ট্যান্ট",
 	"Analyzed": "",
 	"Analyzed": "",
@@ -270,6 +273,7 @@
 	"Default Prompt Suggestions": "ডিফল্ট প্রম্পট সাজেশন",
 	"Default Prompt Suggestions": "ডিফল্ট প্রম্পট সাজেশন",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to ALL": "",
 	"Default to ALL": "",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "ইউজারের ডিফল্ট পদবি",
 	"Default User Role": "ইউজারের ডিফল্ট পদবি",
 	"Delete": "মুছে ফেলুন",
 	"Delete": "মুছে ফেলুন",
 	"Delete a model": "একটি মডেল মুছে ফেলুন",
 	"Delete a model": "একটি মডেল মুছে ফেলুন",
@@ -292,6 +296,7 @@
 	"Describe your knowledge base and objectives": "",
 	"Describe your knowledge base and objectives": "",
 	"Description": "বিবরণ",
 	"Description": "বিবরণ",
 	"Didn't fully follow instructions": "ইনস্ট্রাকশন সম্পূর্ণ অনুসরণ করা হয়নি",
 	"Didn't fully follow instructions": "ইনস্ট্রাকশন সম্পূর্ণ অনুসরণ করা হয়নি",
+	"Direct": "",
 	"Direct Connections": "",
 	"Direct Connections": "",
 	"Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "",
 	"Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "",
 	"Direct Connections settings updated": "",
 	"Direct Connections settings updated": "",
@@ -314,6 +319,8 @@
 	"Dive into knowledge": "",
 	"Dive into knowledge": "",
 	"Do not install functions from sources you do not fully trust.": "",
 	"Do not install functions from sources you do not fully trust.": "",
 	"Do not install tools from sources you do not fully trust.": "",
 	"Do not install tools from sources you do not fully trust.": "",
+	"Docling": "",
+	"Docling Server URL required.": "",
 	"Document": "ডকুমেন্ট",
 	"Document": "ডকুমেন্ট",
 	"Document Intelligence": "",
 	"Document Intelligence": "",
 	"Document Intelligence endpoint and key required.": "",
 	"Document Intelligence endpoint and key required.": "",
@@ -384,6 +391,7 @@
 	"Enter Chunk Size": "চাংক সাইজ লিখুন",
 	"Enter Chunk Size": "চাংক সাইজ লিখুন",
 	"Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "",
 	"Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "",
 	"Enter description": "",
 	"Enter description": "",
+	"Enter Docling Server URL": "",
 	"Enter Document Intelligence Endpoint": "",
 	"Enter Document Intelligence Endpoint": "",
 	"Enter Document Intelligence Key": "",
 	"Enter Document Intelligence Key": "",
 	"Enter domains separated by commas (e.g., example.com,site.org)": "",
 	"Enter domains separated by commas (e.g., example.com,site.org)": "",
@@ -471,6 +479,7 @@
 	"Export Prompts": "প্রম্পটগুলো একপোর্ট করুন",
 	"Export Prompts": "প্রম্পটগুলো একপোর্ট করুন",
 	"Export to CSV": "",
 	"Export to CSV": "",
 	"Export Tools": "",
 	"Export Tools": "",
+	"External": "",
 	"External Models": "",
 	"External Models": "",
 	"Failed to add file.": "",
 	"Failed to add file.": "",
 	"Failed to create API Key.": "API Key তৈরি করা যায়নি।",
 	"Failed to create API Key.": "API Key তৈরি করা যায়নি।",
@@ -583,6 +592,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "stable-diffusion-webui চালু করার সময় `--api` ফ্ল্যাগ সংযুক্ত করুন",
 	"Include `--api` flag when running stable-diffusion-webui": "stable-diffusion-webui চালু করার সময় `--api` ফ্ল্যাগ সংযুক্ত করুন",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "তথ্য",
 	"Info": "তথ্য",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "ইনপুট কমান্ডস",
 	"Input commands": "ইনপুট কমান্ডস",
 	"Install from Github URL": "Github URL থেকে ইনস্টল করুন",
 	"Install from Github URL": "Github URL থেকে ইনস্টল করুন",
 	"Instant Auto-Send After Voice Transcription": "",
 	"Instant Auto-Send After Voice Transcription": "",
@@ -806,6 +816,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "পূর্ব ৩০ দিন",
 	"Previous 30 days": "পূর্ব ৩০ দিন",
 	"Previous 7 days": "পূর্ব ৭ দিন",
 	"Previous 7 days": "পূর্ব ৭ দিন",
+	"Private": "",
 	"Profile Image": "প্রোফাইল ইমেজ",
 	"Profile Image": "প্রোফাইল ইমেজ",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "প্রম্প্ট (উদাহরণস্বরূপ, আমি রোমান ইমপার্টের সম্পর্কে একটি উপস্থিতি জানতে বল)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "প্রম্প্ট (উদাহরণস্বরূপ, আমি রোমান ইমপার্টের সম্পর্কে একটি উপস্থিতি জানতে বল)",
@@ -815,6 +826,7 @@
 	"Prompt updated successfully": "",
 	"Prompt updated successfully": "",
 	"Prompts": "প্রম্পটসমূহ",
 	"Prompts": "প্রম্পটসমূহ",
 	"Prompts Access": "",
 	"Prompts Access": "",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Ollama.com থেকে \"{{searchValue}}\" টানুন",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Ollama.com থেকে \"{{searchValue}}\" টানুন",
 	"Pull a model from Ollama.com": "Ollama.com থেকে একটি টেনে আনুন আনুন",
 	"Pull a model from Ollama.com": "Ollama.com থেকে একটি টেনে আনুন আনুন",
 	"Query Generation Prompt": "",
 	"Query Generation Prompt": "",
@@ -979,6 +991,7 @@
 	"System": "সিস্টেম",
 	"System": "সিস্টেম",
 	"System Instructions": "",
 	"System Instructions": "",
 	"System Prompt": "সিস্টেম প্রম্পট",
 	"System Prompt": "সিস্টেম প্রম্পট",
+	"Tags": "",
 	"Tags Generation": "",
 	"Tags Generation": "",
 	"Tags Generation Prompt": "",
 	"Tags Generation Prompt": "",
 	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
@@ -1009,6 +1022,7 @@
 	"Theme": "থিম",
 	"Theme": "থিম",
 	"Thinking...": "",
 	"Thinking...": "",
 	"This action cannot be undone. Do you wish to continue?": "",
 	"This action cannot be undone. Do you wish to continue?": "",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "এটা নিশ্চিত করে যে, আপনার গুরুত্বপূর্ণ আলোচনা নিরাপদে আপনার ব্যাকএন্ড ডেটাবেজে সংরক্ষিত আছে। ধন্যবাদ!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "এটা নিশ্চিত করে যে, আপনার গুরুত্বপূর্ণ আলোচনা নিরাপদে আপনার ব্যাকএন্ড ডেটাবেজে সংরক্ষিত আছে। ধন্যবাদ!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1132,7 @@
 	"Valves updated successfully": "",
 	"Valves updated successfully": "",
 	"variable": "ভেরিয়েবল",
 	"variable": "ভেরিয়েবল",
 	"variable to have them replaced with clipboard content.": "ক্লিপবোর্ডের কন্টেন্ট দিয়ে যেই ভেরিয়েবল রিপ্লেস করা যাবে।",
 	"variable to have them replaced with clipboard content.": "ক্লিপবোর্ডের কন্টেন্ট দিয়ে যেই ভেরিয়েবল রিপ্লেস করা যাবে।",
+	"Verify Connection": "",
 	"Version": "ভার্সন",
 	"Version": "ভার্সন",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"View Replies": "",
 	"View Replies": "",
@@ -1163,7 +1178,6 @@
 	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "",
 	"You cannot upload an empty file.": "",
 	"You cannot upload an empty file.": "",
-	"You do not have permission to access this feature.": "",
 	"You do not have permission to upload files": "",
 	"You do not have permission to upload files": "",
 	"You do not have permission to upload files.": "",
 	"You do not have permission to upload files.": "",
 	"You have no archived conversations.": "আপনার কোনও আর্কাইভ করা কথোপকথন নেই।",
 	"You have no archived conversations.": "আপনার কোনও আর্কাইভ করা কথোপকথন নেই।",

+ 16 - 2
src/lib/i18n/locales/ca-ES/translation.json

@@ -4,6 +4,7 @@
 	"(e.g. `sh webui.sh --api --api-auth username_password`)": "(p. ex. `sh webui.sh --api --api-auth username_password`)",
 	"(e.g. `sh webui.sh --api --api-auth username_password`)": "(p. ex. `sh webui.sh --api --api-auth username_password`)",
 	"(e.g. `sh webui.sh --api`)": "(p. ex. `sh webui.sh --api`)",
 	"(e.g. `sh webui.sh --api`)": "(p. ex. `sh webui.sh --api`)",
 	"(latest)": "(últim)",
 	"(latest)": "(últim)",
+	"(Ollama)": "",
 	"{{ models }}": "{{ models }}",
 	"{{ models }}": "{{ models }}",
 	"{{COUNT}} hidden lines": "{{COUNT}} línies ocultes",
 	"{{COUNT}} hidden lines": "{{COUNT}} línies ocultes",
 	"{{COUNT}} Replies": "{{COUNT}} respostes",
 	"{{COUNT}} Replies": "{{COUNT}} respostes",
@@ -68,6 +69,8 @@
 	"Already have an account?": "Ja tens un compte?",
 	"Already have an account?": "Ja tens un compte?",
 	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "Alternativa al top_p, i pretén garantir un equilibri de qualitat i varietat. El paràmetre p representa la probabilitat mínima que es consideri un token, en relació amb la probabilitat del token més probable. Per exemple, amb p=0,05 i el token més probable amb una probabilitat de 0,9, es filtren els logits amb un valor inferior a 0,045.",
 	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "Alternativa al top_p, i pretén garantir un equilibri de qualitat i varietat. El paràmetre p representa la probabilitat mínima que es consideri un token, en relació amb la probabilitat del token més probable. Per exemple, amb p=0,05 i el token més probable amb una probabilitat de 0,9, es filtren els logits amb un valor inferior a 0,045.",
 	"Always": "Sempre",
 	"Always": "Sempre",
+	"Always Collapse Code Blocks": "Reduir sempre els blocs de codi",
+	"Always Expand Details": "Expandir sempre els detalls",
 	"Amazing": "Al·lucinant",
 	"Amazing": "Al·lucinant",
 	"an assistant": "un assistent",
 	"an assistant": "un assistent",
 	"Analyzed": "Analitzat",
 	"Analyzed": "Analitzat",
@@ -270,6 +273,7 @@
 	"Default Prompt Suggestions": "Suggeriments d'indicació per defecte",
 	"Default Prompt Suggestions": "Suggeriments d'indicació per defecte",
 	"Default to 389 or 636 if TLS is enabled": "Per defecte 389 o 636 si TLS està habilitat",
 	"Default to 389 or 636 if TLS is enabled": "Per defecte 389 o 636 si TLS està habilitat",
 	"Default to ALL": "Per defecte TOTS",
 	"Default to ALL": "Per defecte TOTS",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "Per defecte, Segmented Retrieval per a l'extracció de contingut rellevant, es recomana en la majoria dels casos.",
 	"Default User Role": "Rol d'usuari per defecte",
 	"Default User Role": "Rol d'usuari per defecte",
 	"Delete": "Eliminar",
 	"Delete": "Eliminar",
 	"Delete a model": "Eliminar un model",
 	"Delete a model": "Eliminar un model",
@@ -292,6 +296,7 @@
 	"Describe your knowledge base and objectives": "Descriu la teva base de coneixement i objectius",
 	"Describe your knowledge base and objectives": "Descriu la teva base de coneixement i objectius",
 	"Description": "Descripció",
 	"Description": "Descripció",
 	"Didn't fully follow instructions": "No s'han seguit les instruccions completament",
 	"Didn't fully follow instructions": "No s'han seguit les instruccions completament",
+	"Direct": "Directe",
 	"Direct Connections": "Connexions directes",
 	"Direct Connections": "Connexions directes",
 	"Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "Les connexions directes permeten als usuaris connectar-se als seus propis endpoints d'API compatibles amb OpenAI.",
 	"Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "Les connexions directes permeten als usuaris connectar-se als seus propis endpoints d'API compatibles amb OpenAI.",
 	"Direct Connections settings updated": "Configuració de les connexions directes actualitzada",
 	"Direct Connections settings updated": "Configuració de les connexions directes actualitzada",
@@ -314,6 +319,8 @@
 	"Dive into knowledge": "Aprofundir en el coneixement",
 	"Dive into knowledge": "Aprofundir en el coneixement",
 	"Do not install functions from sources you do not fully trust.": "No instal·lis funcions de fonts en què no confiïs plenament.",
 	"Do not install functions from sources you do not fully trust.": "No instal·lis funcions de fonts en què no confiïs plenament.",
 	"Do not install tools from sources you do not fully trust.": "No instal·lis eines de fonts en què no confiïs plenament.",
 	"Do not install tools from sources you do not fully trust.": "No instal·lis eines de fonts en què no confiïs plenament.",
+	"Docling": "Docling",
+	"Docling Server URL required.": "La URL del servidor Docling és necessària",
 	"Document": "Document",
 	"Document": "Document",
 	"Document Intelligence": "Document Intelligence",
 	"Document Intelligence": "Document Intelligence",
 	"Document Intelligence endpoint and key required.": "Fa falta un punt de connexió i una clau per a Document Intelligence.",
 	"Document Intelligence endpoint and key required.": "Fa falta un punt de connexió i una clau per a Document Intelligence.",
@@ -359,7 +366,7 @@
 	"Embedding model set to \"{{embedding_model}}\"": "Model d'incrustació configurat a \"{{embedding_model}}\"",
 	"Embedding model set to \"{{embedding_model}}\"": "Model d'incrustació configurat a \"{{embedding_model}}\"",
 	"Enable API Key": "Activar la Clau API",
 	"Enable API Key": "Activar la Clau API",
 	"Enable autocomplete generation for chat messages": "Activar la generació automàtica per als missatges del xat",
 	"Enable autocomplete generation for chat messages": "Activar la generació automàtica per als missatges del xat",
-	"Enable Code Execution": "",
+	"Enable Code Execution": "Permetre l'execució de codi",
 	"Enable Code Interpreter": "Activar l'intèrpret de codi",
 	"Enable Code Interpreter": "Activar l'intèrpret de codi",
 	"Enable Community Sharing": "Activar l'ús compartit amb la comunitat",
 	"Enable Community Sharing": "Activar l'ús compartit amb la comunitat",
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Activar el bloqueig de memòria (mlock) per evitar que les dades del model s'intercanviïn fora de la memòria RAM. Aquesta opció bloqueja el conjunt de pàgines de treball del model a la memòria RAM, assegurant-se que no s'intercanviaran al disc. Això pot ajudar a mantenir el rendiment evitant errors de pàgina i garantint un accés ràpid a les dades.",
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Activar el bloqueig de memòria (mlock) per evitar que les dades del model s'intercanviïn fora de la memòria RAM. Aquesta opció bloqueja el conjunt de pàgines de treball del model a la memòria RAM, assegurant-se que no s'intercanviaran al disc. Això pot ajudar a mantenir el rendiment evitant errors de pàgina i garantint un accés ràpid a les dades.",
@@ -384,6 +391,7 @@
 	"Enter Chunk Size": "Introdueix la mida del bloc",
 	"Enter Chunk Size": "Introdueix la mida del bloc",
 	"Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "Introdueix parelles de \"token:valor de biaix\" separats per comes (exemple: 5432:100, 413:-100)",
 	"Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "Introdueix parelles de \"token:valor de biaix\" separats per comes (exemple: 5432:100, 413:-100)",
 	"Enter description": "Introdueix la descripció",
 	"Enter description": "Introdueix la descripció",
+	"Enter Docling Server URL": "Introdueix la URL del servidor Docling",
 	"Enter Document Intelligence Endpoint": "Introdueix el punt de connexió de Document Intelligence",
 	"Enter Document Intelligence Endpoint": "Introdueix el punt de connexió de Document Intelligence",
 	"Enter Document Intelligence Key": "Introdueix la clau de Document Intelligence",
 	"Enter Document Intelligence Key": "Introdueix la clau de Document Intelligence",
 	"Enter domains separated by commas (e.g., example.com,site.org)": "Introdueix els dominis separats per comes (p. ex. example.com,site.org)",
 	"Enter domains separated by commas (e.g., example.com,site.org)": "Introdueix els dominis separats per comes (p. ex. example.com,site.org)",
@@ -471,6 +479,7 @@
 	"Export Prompts": "Exportar les indicacions",
 	"Export Prompts": "Exportar les indicacions",
 	"Export to CSV": "Exportar a CSV",
 	"Export to CSV": "Exportar a CSV",
 	"Export Tools": "Exportar les eines",
 	"Export Tools": "Exportar les eines",
+	"External": "Extern",
 	"External Models": "Models externs",
 	"External Models": "Models externs",
 	"Failed to add file.": "No s'ha pogut afegir l'arxiu.",
 	"Failed to add file.": "No s'ha pogut afegir l'arxiu.",
 	"Failed to create API Key.": "No s'ha pogut crear la clau API.",
 	"Failed to create API Key.": "No s'ha pogut crear la clau API.",
@@ -583,6 +592,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "Inclou `--api` quan executis stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Inclou `--api` quan executis stable-diffusion-webui",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "Influeix amb la rapidesa amb què l'algoritme respon als comentaris del text generat. Una taxa d'aprenentatge més baixa donarà lloc a ajustos més lents, mentre que una taxa d'aprenentatge més alta farà que l'algorisme sigui més sensible.",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "Influeix amb la rapidesa amb què l'algoritme respon als comentaris del text generat. Una taxa d'aprenentatge més baixa donarà lloc a ajustos més lents, mentre que una taxa d'aprenentatge més alta farà que l'algorisme sigui més sensible.",
 	"Info": "Informació",
 	"Info": "Informació",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "Injectar tot el contingut com a context per a un processament complet, això es recomana per a consultes complexes.",
 	"Input commands": "Entra comandes",
 	"Input commands": "Entra comandes",
 	"Install from Github URL": "Instal·lar des de l'URL de Github",
 	"Install from Github URL": "Instal·lar des de l'URL de Github",
 	"Instant Auto-Send After Voice Transcription": "Enviament automàtic després de la transcripció de veu",
 	"Instant Auto-Send After Voice Transcription": "Enviament automàtic després de la transcripció de veu",
@@ -806,6 +816,7 @@
 	"Presence Penalty": "Penalització de presència",
 	"Presence Penalty": "Penalització de presència",
 	"Previous 30 days": "30 dies anteriors",
 	"Previous 30 days": "30 dies anteriors",
 	"Previous 7 days": "7 dies anteriors",
 	"Previous 7 days": "7 dies anteriors",
+	"Private": "Privat",
 	"Profile Image": "Imatge de perfil",
 	"Profile Image": "Imatge de perfil",
 	"Prompt": "Indicació",
 	"Prompt": "Indicació",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Indicació (p.ex. Digues-me quelcom divertit sobre l'Imperi Romà)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Indicació (p.ex. Digues-me quelcom divertit sobre l'Imperi Romà)",
@@ -815,6 +826,7 @@
 	"Prompt updated successfully": "Indicació actualitzada correctament",
 	"Prompt updated successfully": "Indicació actualitzada correctament",
 	"Prompts": "Indicacions",
 	"Prompts": "Indicacions",
 	"Prompts Access": "Accés a les indicacions",
 	"Prompts Access": "Accés a les indicacions",
+	"Public": "Públic",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Obtenir \"{{searchValue}}\" de Ollama.com",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Obtenir \"{{searchValue}}\" de Ollama.com",
 	"Pull a model from Ollama.com": "Obtenir un model d'Ollama.com",
 	"Pull a model from Ollama.com": "Obtenir un model d'Ollama.com",
 	"Query Generation Prompt": "Indicació per a generació de consulta",
 	"Query Generation Prompt": "Indicació per a generació de consulta",
@@ -979,6 +991,7 @@
 	"System": "Sistema",
 	"System": "Sistema",
 	"System Instructions": "Instruccions de sistema",
 	"System Instructions": "Instruccions de sistema",
 	"System Prompt": "Indicació del Sistema",
 	"System Prompt": "Indicació del Sistema",
+	"Tags": "Etiquetes",
 	"Tags Generation": "Generació d'etiquetes",
 	"Tags Generation": "Generació d'etiquetes",
 	"Tags Generation Prompt": "Indicació per a la generació d'etiquetes",
 	"Tags Generation Prompt": "Indicació per a la generació d'etiquetes",
 	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "El mostreig sense cua s'utilitza per reduir l'impacte de tokens menys probables de la sortida. Un valor més alt (p. ex., 2,0) reduirà més l'impacte, mentre que un valor d'1,0 desactiva aquesta configuració.",
 	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "El mostreig sense cua s'utilitza per reduir l'impacte de tokens menys probables de la sortida. Un valor més alt (p. ex., 2,0) reduirà més l'impacte, mentre que un valor d'1,0 desactiva aquesta configuració.",
@@ -1009,6 +1022,7 @@
 	"Theme": "Tema",
 	"Theme": "Tema",
 	"Thinking...": "Pensant...",
 	"Thinking...": "Pensant...",
 	"This action cannot be undone. Do you wish to continue?": "Aquesta acció no es pot desfer. Vols continuar?",
 	"This action cannot be undone. Do you wish to continue?": "Aquesta acció no es pot desfer. Vols continuar?",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "Aquest canal es va crear el dia {{createdAt}}. Aquest és el començament del canal {{channelName}}.",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Això assegura que les teves converses valuoses queden desades de manera segura a la teva base de dades. Gràcies!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Això assegura que les teves converses valuoses queden desades de manera segura a la teva base de dades. Gràcies!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Aquesta és una funció experimental, és possible que no funcioni com s'espera i està subjecta a canvis en qualsevol moment.",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Aquesta és una funció experimental, és possible que no funcioni com s'espera i està subjecta a canvis en qualsevol moment.",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "Aquesta opció controla quants tokens es conserven en actualitzar el context. Per exemple, si s'estableix en 2, es conservaran els darrers 2 tokens del context de conversa. Preservar el context pot ajudar a mantenir la continuïtat d'una conversa, però pot reduir la capacitat de respondre a nous temes.",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "Aquesta opció controla quants tokens es conserven en actualitzar el context. Per exemple, si s'estableix en 2, es conservaran els darrers 2 tokens del context de conversa. Preservar el context pot ajudar a mantenir la continuïtat d'una conversa, però pot reduir la capacitat de respondre a nous temes.",
@@ -1118,6 +1132,7 @@
 	"Valves updated successfully": "Valves actualitat correctament",
 	"Valves updated successfully": "Valves actualitat correctament",
 	"variable": "variable",
 	"variable": "variable",
 	"variable to have them replaced with clipboard content.": "variable per tenir-les reemplaçades amb el contingut del porta-retalls.",
 	"variable to have them replaced with clipboard content.": "variable per tenir-les reemplaçades amb el contingut del porta-retalls.",
+	"Verify Connection": "Verificar la connexió",
 	"Version": "Versió",
 	"Version": "Versió",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Versió {{selectedVersion}} de {{totalVersions}}",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Versió {{selectedVersion}} de {{totalVersions}}",
 	"View Replies": "Veure les respostes",
 	"View Replies": "Veure les respostes",
@@ -1163,7 +1178,6 @@
 	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Només pots xatejar amb un màxim de {{maxCount}} fitxers alhora.",
 	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Només pots xatejar amb un màxim de {{maxCount}} fitxers alhora.",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Pots personalitzar les teves interaccions amb els models de llenguatge afegint memòries mitjançant el botó 'Gestiona' que hi ha a continuació, fent-les més útils i adaptades a tu.",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Pots personalitzar les teves interaccions amb els models de llenguatge afegint memòries mitjançant el botó 'Gestiona' que hi ha a continuació, fent-les més útils i adaptades a tu.",
 	"You cannot upload an empty file.": "No es pot pujar un ariux buit.",
 	"You cannot upload an empty file.": "No es pot pujar un ariux buit.",
-	"You do not have permission to access this feature.": "No tens permís per accedir a aquesta funcionalitat",
 	"You do not have permission to upload files": "No tens permisos per pujar arxius",
 	"You do not have permission to upload files": "No tens permisos per pujar arxius",
 	"You do not have permission to upload files.": "No tens permisos per pujar arxius.",
 	"You do not have permission to upload files.": "No tens permisos per pujar arxius.",
 	"You have no archived conversations.": "No tens converses arxivades.",
 	"You have no archived conversations.": "No tens converses arxivades.",

+ 15 - 1
src/lib/i18n/locales/ceb-PH/translation.json

@@ -4,6 +4,7 @@
 	"(e.g. `sh webui.sh --api --api-auth username_password`)": "",
 	"(e.g. `sh webui.sh --api --api-auth username_password`)": "",
 	"(e.g. `sh webui.sh --api`)": "(pananglitan `sh webui.sh --api`)",
 	"(e.g. `sh webui.sh --api`)": "(pananglitan `sh webui.sh --api`)",
 	"(latest)": "",
 	"(latest)": "",
+	"(Ollama)": "",
 	"{{ models }}": "",
 	"{{ models }}": "",
 	"{{COUNT}} hidden lines": "",
 	"{{COUNT}} hidden lines": "",
 	"{{COUNT}} Replies": "",
 	"{{COUNT}} Replies": "",
@@ -68,6 +69,8 @@
 	"Already have an account?": "Naa na kay account ?",
 	"Already have an account?": "Naa na kay account ?",
 	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "",
 	"Always": "",
+	"Always Collapse Code Blocks": "",
+	"Always Expand Details": "",
 	"Amazing": "",
 	"Amazing": "",
 	"an assistant": "usa ka katabang",
 	"an assistant": "usa ka katabang",
 	"Analyzed": "",
 	"Analyzed": "",
@@ -270,6 +273,7 @@
 	"Default Prompt Suggestions": "Default nga prompt nga mga sugyot",
 	"Default Prompt Suggestions": "Default nga prompt nga mga sugyot",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to ALL": "",
 	"Default to ALL": "",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "Default nga Papel sa Gumagamit",
 	"Default User Role": "Default nga Papel sa Gumagamit",
 	"Delete": "",
 	"Delete": "",
 	"Delete a model": "Pagtangtang sa usa ka template",
 	"Delete a model": "Pagtangtang sa usa ka template",
@@ -292,6 +296,7 @@
 	"Describe your knowledge base and objectives": "",
 	"Describe your knowledge base and objectives": "",
 	"Description": "Deskripsyon",
 	"Description": "Deskripsyon",
 	"Didn't fully follow instructions": "",
 	"Didn't fully follow instructions": "",
+	"Direct": "",
 	"Direct Connections": "",
 	"Direct Connections": "",
 	"Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "",
 	"Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "",
 	"Direct Connections settings updated": "",
 	"Direct Connections settings updated": "",
@@ -314,6 +319,8 @@
 	"Dive into knowledge": "",
 	"Dive into knowledge": "",
 	"Do not install functions from sources you do not fully trust.": "",
 	"Do not install functions from sources you do not fully trust.": "",
 	"Do not install tools from sources you do not fully trust.": "",
 	"Do not install tools from sources you do not fully trust.": "",
+	"Docling": "",
+	"Docling Server URL required.": "",
 	"Document": "Dokumento",
 	"Document": "Dokumento",
 	"Document Intelligence": "",
 	"Document Intelligence": "",
 	"Document Intelligence endpoint and key required.": "",
 	"Document Intelligence endpoint and key required.": "",
@@ -384,6 +391,7 @@
 	"Enter Chunk Size": "Isulod ang block size",
 	"Enter Chunk Size": "Isulod ang block size",
 	"Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "",
 	"Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "",
 	"Enter description": "",
 	"Enter description": "",
+	"Enter Docling Server URL": "",
 	"Enter Document Intelligence Endpoint": "",
 	"Enter Document Intelligence Endpoint": "",
 	"Enter Document Intelligence Key": "",
 	"Enter Document Intelligence Key": "",
 	"Enter domains separated by commas (e.g., example.com,site.org)": "",
 	"Enter domains separated by commas (e.g., example.com,site.org)": "",
@@ -471,6 +479,7 @@
 	"Export Prompts": "Export prompts",
 	"Export Prompts": "Export prompts",
 	"Export to CSV": "",
 	"Export to CSV": "",
 	"Export Tools": "",
 	"Export Tools": "",
+	"External": "",
 	"External Models": "",
 	"External Models": "",
 	"Failed to add file.": "",
 	"Failed to add file.": "",
 	"Failed to create API Key.": "",
 	"Failed to create API Key.": "",
@@ -583,6 +592,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "Iapil ang `--api` nga bandila kung nagdagan nga stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Iapil ang `--api` nga bandila kung nagdagan nga stable-diffusion-webui",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "",
 	"Info": "",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "Pagsulod sa input commands",
 	"Input commands": "Pagsulod sa input commands",
 	"Install from Github URL": "",
 	"Install from Github URL": "",
 	"Instant Auto-Send After Voice Transcription": "",
 	"Instant Auto-Send After Voice Transcription": "",
@@ -806,6 +816,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "",
 	"Previous 30 days": "",
 	"Previous 7 days": "",
 	"Previous 7 days": "",
+	"Private": "",
 	"Profile Image": "",
 	"Profile Image": "",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "",
@@ -815,6 +826,7 @@
 	"Prompt updated successfully": "",
 	"Prompt updated successfully": "",
 	"Prompts": "Mga aghat",
 	"Prompts": "Mga aghat",
 	"Prompts Access": "",
 	"Prompts Access": "",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "",
 	"Pull a model from Ollama.com": "Pagkuha ug template gikan sa Ollama.com",
 	"Pull a model from Ollama.com": "Pagkuha ug template gikan sa Ollama.com",
 	"Query Generation Prompt": "",
 	"Query Generation Prompt": "",
@@ -979,6 +991,7 @@
 	"System": "Sistema",
 	"System": "Sistema",
 	"System Instructions": "",
 	"System Instructions": "",
 	"System Prompt": "Madasig nga Sistema",
 	"System Prompt": "Madasig nga Sistema",
+	"Tags": "",
 	"Tags Generation": "",
 	"Tags Generation": "",
 	"Tags Generation Prompt": "",
 	"Tags Generation Prompt": "",
 	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
@@ -1009,6 +1022,7 @@
 	"Theme": "Tema",
 	"Theme": "Tema",
 	"Thinking...": "",
 	"Thinking...": "",
 	"This action cannot be undone. Do you wish to continue?": "",
 	"This action cannot be undone. Do you wish to continue?": "",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Kini nagsiguro nga ang imong bililhon nga mga panag-istoryahanay luwas nga natipig sa imong backend database. ",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Kini nagsiguro nga ang imong bililhon nga mga panag-istoryahanay luwas nga natipig sa imong backend database. ",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1132,7 @@
 	"Valves updated successfully": "",
 	"Valves updated successfully": "",
 	"variable": "variable",
 	"variable": "variable",
 	"variable to have them replaced with clipboard content.": "variable aron pulihan kini sa mga sulud sa clipboard.",
 	"variable to have them replaced with clipboard content.": "variable aron pulihan kini sa mga sulud sa clipboard.",
+	"Verify Connection": "",
 	"Version": "Bersyon",
 	"Version": "Bersyon",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"View Replies": "",
 	"View Replies": "",
@@ -1163,7 +1178,6 @@
 	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "",
 	"You cannot upload an empty file.": "",
 	"You cannot upload an empty file.": "",
-	"You do not have permission to access this feature.": "",
 	"You do not have permission to upload files": "",
 	"You do not have permission to upload files": "",
 	"You do not have permission to upload files.": "",
 	"You do not have permission to upload files.": "",
 	"You have no archived conversations.": "",
 	"You have no archived conversations.": "",

+ 15 - 1
src/lib/i18n/locales/cs-CZ/translation.json

@@ -4,6 +4,7 @@
 	"(e.g. `sh webui.sh --api --api-auth username_password`)": "(např. `sh webui.sh --api --api-auth username_password`)",
 	"(e.g. `sh webui.sh --api --api-auth username_password`)": "(např. `sh webui.sh --api --api-auth username_password`)",
 	"(e.g. `sh webui.sh --api`)": "(např. `sh webui.sh --api`)",
 	"(e.g. `sh webui.sh --api`)": "(např. `sh webui.sh --api`)",
 	"(latest)": "Nejnovější",
 	"(latest)": "Nejnovější",
+	"(Ollama)": "",
 	"{{ models }}": "{{ models }}",
 	"{{ models }}": "{{ models }}",
 	"{{COUNT}} hidden lines": "",
 	"{{COUNT}} hidden lines": "",
 	"{{COUNT}} Replies": "",
 	"{{COUNT}} Replies": "",
@@ -68,6 +69,8 @@
 	"Already have an account?": "Už máte účet?",
 	"Already have an account?": "Už máte účet?",
 	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "",
 	"Always": "",
+	"Always Collapse Code Blocks": "",
+	"Always Expand Details": "",
 	"Amazing": "",
 	"Amazing": "",
 	"an assistant": "asistent",
 	"an assistant": "asistent",
 	"Analyzed": "",
 	"Analyzed": "",
@@ -270,6 +273,7 @@
 	"Default Prompt Suggestions": "Výchozí návrhy promptů",
 	"Default Prompt Suggestions": "Výchozí návrhy promptů",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to ALL": "",
 	"Default to ALL": "",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "Výchozí uživatelská role",
 	"Default User Role": "Výchozí uživatelská role",
 	"Delete": "Smazat",
 	"Delete": "Smazat",
 	"Delete a model": "Odstranit model.",
 	"Delete a model": "Odstranit model.",
@@ -292,6 +296,7 @@
 	"Describe your knowledge base and objectives": "",
 	"Describe your knowledge base and objectives": "",
 	"Description": "Popis",
 	"Description": "Popis",
 	"Didn't fully follow instructions": "Nenásledovali jste přesně všechny instrukce.",
 	"Didn't fully follow instructions": "Nenásledovali jste přesně všechny instrukce.",
+	"Direct": "",
 	"Direct Connections": "",
 	"Direct Connections": "",
 	"Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "",
 	"Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "",
 	"Direct Connections settings updated": "",
 	"Direct Connections settings updated": "",
@@ -314,6 +319,8 @@
 	"Dive into knowledge": "",
 	"Dive into knowledge": "",
 	"Do not install functions from sources you do not fully trust.": "Neinstalujte funkce ze zdrojů, kterým plně nedůvěřujete.",
 	"Do not install functions from sources you do not fully trust.": "Neinstalujte funkce ze zdrojů, kterým plně nedůvěřujete.",
 	"Do not install tools from sources you do not fully trust.": "Neinstalujte nástroje ze zdrojů, kterým plně nedůvěřujete.",
 	"Do not install tools from sources you do not fully trust.": "Neinstalujte nástroje ze zdrojů, kterým plně nedůvěřujete.",
+	"Docling": "",
+	"Docling Server URL required.": "",
 	"Document": "Dokument",
 	"Document": "Dokument",
 	"Document Intelligence": "",
 	"Document Intelligence": "",
 	"Document Intelligence endpoint and key required.": "",
 	"Document Intelligence endpoint and key required.": "",
@@ -384,6 +391,7 @@
 	"Enter Chunk Size": "Zadejte velikost bloku",
 	"Enter Chunk Size": "Zadejte velikost bloku",
 	"Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "",
 	"Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "",
 	"Enter description": "Zadejte popis",
 	"Enter description": "Zadejte popis",
+	"Enter Docling Server URL": "",
 	"Enter Document Intelligence Endpoint": "",
 	"Enter Document Intelligence Endpoint": "",
 	"Enter Document Intelligence Key": "",
 	"Enter Document Intelligence Key": "",
 	"Enter domains separated by commas (e.g., example.com,site.org)": "",
 	"Enter domains separated by commas (e.g., example.com,site.org)": "",
@@ -471,6 +479,7 @@
 	"Export Prompts": "Exportovat prompty",
 	"Export Prompts": "Exportovat prompty",
 	"Export to CSV": "",
 	"Export to CSV": "",
 	"Export Tools": "Exportní nástroje",
 	"Export Tools": "Exportní nástroje",
+	"External": "",
 	"External Models": "Externí modely",
 	"External Models": "Externí modely",
 	"Failed to add file.": "Nepodařilo se přidat soubor.",
 	"Failed to add file.": "Nepodařilo se přidat soubor.",
 	"Failed to create API Key.": "Nepodařilo se vytvořit API klíč.",
 	"Failed to create API Key.": "Nepodařilo se vytvořit API klíč.",
@@ -583,6 +592,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "Při spuštění stable-diffusion-webui zahrňte příznak `--api`.",
 	"Include `--api` flag when running stable-diffusion-webui": "Při spuštění stable-diffusion-webui zahrňte příznak `--api`.",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Info",
 	"Info": "Info",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "Vstupní příkazy",
 	"Input commands": "Vstupní příkazy",
 	"Install from Github URL": "Instalace z URL adresy Githubu",
 	"Install from Github URL": "Instalace z URL adresy Githubu",
 	"Instant Auto-Send After Voice Transcription": "Okamžité automatické odeslání po přepisu hlasu",
 	"Instant Auto-Send After Voice Transcription": "Okamžité automatické odeslání po přepisu hlasu",
@@ -806,6 +816,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "Předchozích 30 dnů",
 	"Previous 30 days": "Předchozích 30 dnů",
 	"Previous 7 days": "Předchozích 7 dní",
 	"Previous 7 days": "Předchozích 7 dní",
+	"Private": "",
 	"Profile Image": "Profilový obrázek",
 	"Profile Image": "Profilový obrázek",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (např. Řekni mi zábavný fakt o Římské říši)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (např. Řekni mi zábavný fakt o Římské říši)",
@@ -815,6 +826,7 @@
 	"Prompt updated successfully": "",
 	"Prompt updated successfully": "",
 	"Prompts": "Prompty",
 	"Prompts": "Prompty",
 	"Prompts Access": "",
 	"Prompts Access": "",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Stáhněte \"{{searchValue}}\" z Ollama.com",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Stáhněte \"{{searchValue}}\" z Ollama.com",
 	"Pull a model from Ollama.com": "Stáhněte model z Ollama.com",
 	"Pull a model from Ollama.com": "Stáhněte model z Ollama.com",
 	"Query Generation Prompt": "",
 	"Query Generation Prompt": "",
@@ -979,6 +991,7 @@
 	"System": "System",
 	"System": "System",
 	"System Instructions": "",
 	"System Instructions": "",
 	"System Prompt": "Systémový prompt",
 	"System Prompt": "Systémový prompt",
+	"Tags": "",
 	"Tags Generation": "",
 	"Tags Generation": "",
 	"Tags Generation Prompt": "Prompt pro generování značek",
 	"Tags Generation Prompt": "Prompt pro generování značek",
 	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
@@ -1009,6 +1022,7 @@
 	"Theme": "Téma",
 	"Theme": "Téma",
 	"Thinking...": "Přemýšlím...",
 	"Thinking...": "Přemýšlím...",
 	"This action cannot be undone. Do you wish to continue?": "Tuto akci nelze vrátit zpět. Přejete si pokračovat?",
 	"This action cannot be undone. Do you wish to continue?": "Tuto akci nelze vrátit zpět. Přejete si pokračovat?",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "To zajišťuje, že vaše cenné konverzace jsou bezpečně uloženy ve vaší backendové databázi. Děkujeme!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "To zajišťuje, že vaše cenné konverzace jsou bezpečně uloženy ve vaší backendové databázi. Děkujeme!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Jedná se o experimentální funkci, nemusí fungovat podle očekávání a může být kdykoliv změněna.",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Jedná se o experimentální funkci, nemusí fungovat podle očekávání a může být kdykoliv změněna.",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1132,7 @@
 	"Valves updated successfully": "Ventily byly úspěšně aktualizovány.",
 	"Valves updated successfully": "Ventily byly úspěšně aktualizovány.",
 	"variable": "proměnná",
 	"variable": "proměnná",
 	"variable to have them replaced with clipboard content.": "proměnnou, aby byl jejich obsah nahrazen obsahem schránky.",
 	"variable to have them replaced with clipboard content.": "proměnnou, aby byl jejich obsah nahrazen obsahem schránky.",
+	"Verify Connection": "",
 	"Version": "Verze",
 	"Version": "Verze",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Verze {{selectedVersion}} z {{totalVersions}}",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Verze {{selectedVersion}} z {{totalVersions}}",
 	"View Replies": "",
 	"View Replies": "",
@@ -1163,7 +1178,6 @@
 	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Můžete komunikovat pouze s maximálně {{maxCount}} soubor(y) najednou.",
 	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Můžete komunikovat pouze s maximálně {{maxCount}} soubor(y) najednou.",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Můžete personalizovat své interakce s LLM pomocí přidávání vzpomínek prostřednictvím tlačítka 'Spravovat' níže, což je učiní pro vás užitečnějšími a lépe přizpůsobenými.",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Můžete personalizovat své interakce s LLM pomocí přidávání vzpomínek prostřednictvím tlačítka 'Spravovat' níže, což je učiní pro vás užitečnějšími a lépe přizpůsobenými.",
 	"You cannot upload an empty file.": "Nemůžete nahrát prázdný soubor.",
 	"You cannot upload an empty file.": "Nemůžete nahrát prázdný soubor.",
-	"You do not have permission to access this feature.": "",
 	"You do not have permission to upload files": "",
 	"You do not have permission to upload files": "",
 	"You do not have permission to upload files.": "",
 	"You do not have permission to upload files.": "",
 	"You have no archived conversations.": "Nemáte žádné archivované konverzace.",
 	"You have no archived conversations.": "Nemáte žádné archivované konverzace.",

+ 15 - 1
src/lib/i18n/locales/da-DK/translation.json

@@ -4,6 +4,7 @@
 	"(e.g. `sh webui.sh --api --api-auth username_password`)": "(f.eks. `sh webui.sh --api --api-auth username_password`)",
 	"(e.g. `sh webui.sh --api --api-auth username_password`)": "(f.eks. `sh webui.sh --api --api-auth username_password`)",
 	"(e.g. `sh webui.sh --api`)": "(f.eks. `sh webui.sh --api`)",
 	"(e.g. `sh webui.sh --api`)": "(f.eks. `sh webui.sh --api`)",
 	"(latest)": "(seneste)",
 	"(latest)": "(seneste)",
+	"(Ollama)": "",
 	"{{ models }}": "{{ modeller }}",
 	"{{ models }}": "{{ modeller }}",
 	"{{COUNT}} hidden lines": "",
 	"{{COUNT}} hidden lines": "",
 	"{{COUNT}} Replies": "",
 	"{{COUNT}} Replies": "",
@@ -68,6 +69,8 @@
 	"Already have an account?": "Har du allerede en profil?",
 	"Already have an account?": "Har du allerede en profil?",
 	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "",
 	"Always": "",
+	"Always Collapse Code Blocks": "",
+	"Always Expand Details": "",
 	"Amazing": "",
 	"Amazing": "",
 	"an assistant": "en assistent",
 	"an assistant": "en assistent",
 	"Analyzed": "",
 	"Analyzed": "",
@@ -270,6 +273,7 @@
 	"Default Prompt Suggestions": "Standardforslag til prompt",
 	"Default Prompt Suggestions": "Standardforslag til prompt",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to ALL": "",
 	"Default to ALL": "",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "Brugers rolle som standard",
 	"Default User Role": "Brugers rolle som standard",
 	"Delete": "Slet",
 	"Delete": "Slet",
 	"Delete a model": "Slet en model",
 	"Delete a model": "Slet en model",
@@ -292,6 +296,7 @@
 	"Describe your knowledge base and objectives": "",
 	"Describe your knowledge base and objectives": "",
 	"Description": "Beskrivelse",
 	"Description": "Beskrivelse",
 	"Didn't fully follow instructions": "Fulgte ikke instruktioner",
 	"Didn't fully follow instructions": "Fulgte ikke instruktioner",
+	"Direct": "",
 	"Direct Connections": "",
 	"Direct Connections": "",
 	"Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "",
 	"Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "",
 	"Direct Connections settings updated": "",
 	"Direct Connections settings updated": "",
@@ -314,6 +319,8 @@
 	"Dive into knowledge": "",
 	"Dive into knowledge": "",
 	"Do not install functions from sources you do not fully trust.": "Lad være med at installere funktioner fra kilder, som du ikke stoler på.",
 	"Do not install functions from sources you do not fully trust.": "Lad være med at installere funktioner fra kilder, som du ikke stoler på.",
 	"Do not install tools from sources you do not fully trust.": "Lad være med at installere værktøjer fra kilder, som du ikke stoler på.",
 	"Do not install tools from sources you do not fully trust.": "Lad være med at installere værktøjer fra kilder, som du ikke stoler på.",
+	"Docling": "",
+	"Docling Server URL required.": "",
 	"Document": "Dokument",
 	"Document": "Dokument",
 	"Document Intelligence": "",
 	"Document Intelligence": "",
 	"Document Intelligence endpoint and key required.": "",
 	"Document Intelligence endpoint and key required.": "",
@@ -384,6 +391,7 @@
 	"Enter Chunk Size": "Indtast størrelse af tekststykker",
 	"Enter Chunk Size": "Indtast størrelse af tekststykker",
 	"Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "",
 	"Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "",
 	"Enter description": "",
 	"Enter description": "",
+	"Enter Docling Server URL": "",
 	"Enter Document Intelligence Endpoint": "",
 	"Enter Document Intelligence Endpoint": "",
 	"Enter Document Intelligence Key": "",
 	"Enter Document Intelligence Key": "",
 	"Enter domains separated by commas (e.g., example.com,site.org)": "",
 	"Enter domains separated by commas (e.g., example.com,site.org)": "",
@@ -471,6 +479,7 @@
 	"Export Prompts": "Eksportér prompts",
 	"Export Prompts": "Eksportér prompts",
 	"Export to CSV": "",
 	"Export to CSV": "",
 	"Export Tools": "Eksportér værktøjer",
 	"Export Tools": "Eksportér værktøjer",
+	"External": "",
 	"External Models": "Eksterne modeller",
 	"External Models": "Eksterne modeller",
 	"Failed to add file.": "",
 	"Failed to add file.": "",
 	"Failed to create API Key.": "Kunne ikke oprette API-nøgle.",
 	"Failed to create API Key.": "Kunne ikke oprette API-nøgle.",
@@ -583,6 +592,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "Inkluder `--api` flag, når du kører stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Inkluder `--api` flag, når du kører stable-diffusion-webui",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Info",
 	"Info": "Info",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "Inputkommandoer",
 	"Input commands": "Inputkommandoer",
 	"Install from Github URL": "Installer fra Github URL",
 	"Install from Github URL": "Installer fra Github URL",
 	"Instant Auto-Send After Voice Transcription": "Øjeblikkelig automatisk afsendelse efter stemmetransskription",
 	"Instant Auto-Send After Voice Transcription": "Øjeblikkelig automatisk afsendelse efter stemmetransskription",
@@ -806,6 +816,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "Seneste 30 dage",
 	"Previous 30 days": "Seneste 30 dage",
 	"Previous 7 days": "Seneste 7 dage",
 	"Previous 7 days": "Seneste 7 dage",
+	"Private": "",
 	"Profile Image": "Profilbillede",
 	"Profile Image": "Profilbillede",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (f.eks. Fortæl mig en sjov kendsgerning om Romerriget)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (f.eks. Fortæl mig en sjov kendsgerning om Romerriget)",
@@ -815,6 +826,7 @@
 	"Prompt updated successfully": "",
 	"Prompt updated successfully": "",
 	"Prompts": "Prompts",
 	"Prompts": "Prompts",
 	"Prompts Access": "",
 	"Prompts Access": "",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Hent \"{{searchValue}}\" fra Ollama.com",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Hent \"{{searchValue}}\" fra Ollama.com",
 	"Pull a model from Ollama.com": "Hent en model fra Ollama.com",
 	"Pull a model from Ollama.com": "Hent en model fra Ollama.com",
 	"Query Generation Prompt": "",
 	"Query Generation Prompt": "",
@@ -979,6 +991,7 @@
 	"System": "System",
 	"System": "System",
 	"System Instructions": "",
 	"System Instructions": "",
 	"System Prompt": "Systemprompt",
 	"System Prompt": "Systemprompt",
+	"Tags": "",
 	"Tags Generation": "",
 	"Tags Generation": "",
 	"Tags Generation Prompt": "",
 	"Tags Generation Prompt": "",
 	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
@@ -1009,6 +1022,7 @@
 	"Theme": "Tema",
 	"Theme": "Tema",
 	"Thinking...": "Tænker...",
 	"Thinking...": "Tænker...",
 	"This action cannot be undone. Do you wish to continue?": "Denne handling kan ikke fortrydes. Vil du fortsætte?",
 	"This action cannot be undone. Do you wish to continue?": "Denne handling kan ikke fortrydes. Vil du fortsætte?",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Dette sikrer, at dine værdifulde samtaler gemmes sikkert i din backend-database. Tak!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Dette sikrer, at dine værdifulde samtaler gemmes sikkert i din backend-database. Tak!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Dette er en eksperimentel funktion, den fungerer muligvis ikke som forventet og kan ændres når som helst.",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Dette er en eksperimentel funktion, den fungerer muligvis ikke som forventet og kan ændres når som helst.",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1132,7 @@
 	"Valves updated successfully": "Ventiler opdateret.",
 	"Valves updated successfully": "Ventiler opdateret.",
 	"variable": "variabel",
 	"variable": "variabel",
 	"variable to have them replaced with clipboard content.": "variabel for at få dem erstattet med indholdet af udklipsholderen.",
 	"variable to have them replaced with clipboard content.": "variabel for at få dem erstattet med indholdet af udklipsholderen.",
+	"Verify Connection": "",
 	"Version": "Version",
 	"Version": "Version",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Version {{selectedVersion}} af {{totalVersions}}",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Version {{selectedVersion}} af {{totalVersions}}",
 	"View Replies": "",
 	"View Replies": "",
@@ -1163,7 +1178,6 @@
 	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Du kan kun chatte med maksimalt {{maxCount}} fil(er) ad gangen.",
 	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Du kan kun chatte med maksimalt {{maxCount}} fil(er) ad gangen.",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Du kan personliggøre dine interaktioner med LLM'er ved at tilføje minder via knappen 'Administrer' nedenfor, hvilket gør dem mere nyttige og skræddersyet til dig.",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Du kan personliggøre dine interaktioner med LLM'er ved at tilføje minder via knappen 'Administrer' nedenfor, hvilket gør dem mere nyttige og skræddersyet til dig.",
 	"You cannot upload an empty file.": "",
 	"You cannot upload an empty file.": "",
-	"You do not have permission to access this feature.": "",
 	"You do not have permission to upload files": "",
 	"You do not have permission to upload files": "",
 	"You do not have permission to upload files.": "",
 	"You do not have permission to upload files.": "",
 	"You have no archived conversations.": "Du har ingen arkiverede samtaler.",
 	"You have no archived conversations.": "Du har ingen arkiverede samtaler.",

+ 88 - 74
src/lib/i18n/locales/de-DE/translation.json

@@ -4,14 +4,15 @@
 	"(e.g. `sh webui.sh --api --api-auth username_password`)": "(z. B. `sh webui.sh --api --api-auth username_password`)",
 	"(e.g. `sh webui.sh --api --api-auth username_password`)": "(z. B. `sh webui.sh --api --api-auth username_password`)",
 	"(e.g. `sh webui.sh --api`)": "(z. B. `sh webui.sh --api`)",
 	"(e.g. `sh webui.sh --api`)": "(z. B. `sh webui.sh --api`)",
 	"(latest)": "(neueste)",
 	"(latest)": "(neueste)",
+	"(Ollama)": "",
 	"{{ models }}": "{{ Modelle }}",
 	"{{ models }}": "{{ Modelle }}",
-	"{{COUNT}} hidden lines": "",
+	"{{COUNT}} hidden lines": "{{COUNT}} versteckte Zeilen",
 	"{{COUNT}} Replies": "{{COUNT}} Antworten",
 	"{{COUNT}} Replies": "{{COUNT}} Antworten",
-	"{{user}}'s Chats": "{{user}}s Unterhaltungen",
+	"{{user}}'s Chats": "{{user}}s Chats",
 	"{{webUIName}} Backend Required": "{{webUIName}}-Backend erforderlich",
 	"{{webUIName}} Backend Required": "{{webUIName}}-Backend erforderlich",
 	"*Prompt node ID(s) are required for image generation": "*Prompt-Node-ID(s) sind für die Bildgenerierung erforderlich",
 	"*Prompt node ID(s) are required for image generation": "*Prompt-Node-ID(s) sind für die Bildgenerierung erforderlich",
 	"A new version (v{{LATEST_VERSION}}) is now available.": "Eine neue Version (v{{LATEST_VERSION}}) ist jetzt verfügbar.",
 	"A new version (v{{LATEST_VERSION}}) is now available.": "Eine neue Version (v{{LATEST_VERSION}}) ist jetzt verfügbar.",
-	"A task model is used when performing tasks such as generating titles for chats and web search queries": "Aufgabenmodelle können Unterhaltungstitel oder Websuchanfragen generieren.",
+	"A task model is used when performing tasks such as generating titles for chats and web search queries": "Aufgabenmodelle können Chat-Titel oder Websuchanfragen generieren.",
 	"a user": "ein Benutzer",
 	"a user": "ein Benutzer",
 	"About": "Über",
 	"About": "Über",
 	"Accept autocomplete generation / Jump to prompt variable": "Automatische Vervollständigung akzeptieren / Zur Prompt-Variable springen",
 	"Accept autocomplete generation / Jump to prompt variable": "Automatische Vervollständigung akzeptieren / Zur Prompt-Variable springen",
@@ -47,27 +48,29 @@
 	"Adjusting these settings will apply changes universally to all users.": "Das Anpassen dieser Einstellungen wird Änderungen universell auf alle Benutzer anwenden.",
 	"Adjusting these settings will apply changes universally to all users.": "Das Anpassen dieser Einstellungen wird Änderungen universell auf alle Benutzer anwenden.",
 	"admin": "Administrator",
 	"admin": "Administrator",
 	"Admin": "Administrator",
 	"Admin": "Administrator",
-	"Admin Panel": "Administrationsbereich",
-	"Admin Settings": "Administrationsbereich",
+	"Admin Panel": "Administration",
+	"Admin Settings": "Administration",
 	"Admins have access to all tools at all times; users need tools assigned per model in the workspace.": "Administratoren haben jederzeit Zugriff auf alle Werkzeuge. Benutzer können im Arbeitsbereich zugewiesen.",
 	"Admins have access to all tools at all times; users need tools assigned per model in the workspace.": "Administratoren haben jederzeit Zugriff auf alle Werkzeuge. Benutzer können im Arbeitsbereich zugewiesen.",
 	"Advanced Parameters": "Erweiterte Parameter",
 	"Advanced Parameters": "Erweiterte Parameter",
 	"Advanced Params": "Erweiterte Parameter",
 	"Advanced Params": "Erweiterte Parameter",
-	"All": "",
+	"All": "Alle",
 	"All Documents": "Alle Dokumente",
 	"All Documents": "Alle Dokumente",
 	"All models deleted successfully": "Alle Modelle erfolgreich gelöscht",
 	"All models deleted successfully": "Alle Modelle erfolgreich gelöscht",
 	"Allow Chat Controls": "Chat-Steuerung erlauben",
 	"Allow Chat Controls": "Chat-Steuerung erlauben",
-	"Allow Chat Delete": "Löschen von Unterhaltungen erlauben",
-	"Allow Chat Deletion": "Löschen von Unterhaltungen erlauben",
-	"Allow Chat Edit": "Bearbeiten von Unterhaltungen erlauben",
+	"Allow Chat Delete": "Löschen von Chats erlauben",
+	"Allow Chat Deletion": "Löschen von Chats erlauben",
+	"Allow Chat Edit": "Bearbeiten von Chats erlauben",
 	"Allow File Upload": "Hochladen von Dateien erlauben",
 	"Allow File Upload": "Hochladen von Dateien erlauben",
 	"Allow non-local voices": "Nicht-lokale Stimmen erlauben",
 	"Allow non-local voices": "Nicht-lokale Stimmen erlauben",
-	"Allow Temporary Chat": "Temporäre Unterhaltungen erlauben",
+	"Allow Temporary Chat": "Temporäre Chats erlauben",
 	"Allow User Location": "Standort freigeben",
 	"Allow User Location": "Standort freigeben",
 	"Allow Voice Interruption in Call": "Unterbrechung durch Stimme im Anruf zulassen",
 	"Allow Voice Interruption in Call": "Unterbrechung durch Stimme im Anruf zulassen",
 	"Allowed Endpoints": "Erlaubte Endpunkte",
 	"Allowed Endpoints": "Erlaubte Endpunkte",
 	"Already have an account?": "Haben Sie bereits einen Account?",
 	"Already have an account?": "Haben Sie bereits einen Account?",
 	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "Immer",
 	"Always": "Immer",
+	"Always Collapse Code Blocks": "Code-Blöcke immer zuklappen",
+	"Always Expand Details": "Details immer aufklappen",
 	"Amazing": "Fantastisch",
 	"Amazing": "Fantastisch",
 	"an assistant": "ein Assistent",
 	"an assistant": "ein Assistent",
 	"Analyzed": "Analysiert",
 	"Analyzed": "Analysiert",
@@ -85,17 +88,17 @@
 	"applies to all users with the \"user\" role": "gilt für alle Benutzer mit der Rolle \"Benutzer\"",
 	"applies to all users with the \"user\" role": "gilt für alle Benutzer mit der Rolle \"Benutzer\"",
 	"April": "April",
 	"April": "April",
 	"Archive": "Archivieren",
 	"Archive": "Archivieren",
-	"Archive All Chats": "Alle Unterhaltungen archivieren",
-	"Archived Chats": "Archivierte Unterhaltungen",
+	"Archive All Chats": "Alle Chats archivieren",
+	"Archived Chats": "Archivierte Chats",
 	"archived-chat-export": "archivierter-chat-export",
 	"archived-chat-export": "archivierter-chat-export",
 	"Are you sure you want to clear all memories? This action cannot be undone.": "Sind Sie sicher, dass Sie alle Erinnerungen löschen möchten? Diese Handlung kann nicht rückgängig gemacht werden.",
 	"Are you sure you want to clear all memories? This action cannot be undone.": "Sind Sie sicher, dass Sie alle Erinnerungen löschen möchten? Diese Handlung kann nicht rückgängig gemacht werden.",
 	"Are you sure you want to delete this channel?": "Sind Sie sicher, dass Sie diesen Kanal löschen möchten?",
 	"Are you sure you want to delete this channel?": "Sind Sie sicher, dass Sie diesen Kanal löschen möchten?",
 	"Are you sure you want to delete this message?": "Sind Sie sicher, dass Sie diese Nachricht löschen möchten?",
 	"Are you sure you want to delete this message?": "Sind Sie sicher, dass Sie diese Nachricht löschen möchten?",
-	"Are you sure you want to unarchive all archived chats?": "Sind Sie sicher, dass Sie alle archivierten Unterhaltungen wiederherstellen möchten?",
+	"Are you sure you want to unarchive all archived chats?": "Sind Sie sicher, dass Sie alle archivierten Chats wiederherstellen möchten?",
 	"Are you sure?": "Sind Sie sicher?",
 	"Are you sure?": "Sind Sie sicher?",
 	"Arena Models": "Arena-Modelle",
 	"Arena Models": "Arena-Modelle",
 	"Artifacts": "Artefakte",
 	"Artifacts": "Artefakte",
-	"Ask": "",
+	"Ask": "Fragen",
 	"Ask a question": "Stellen Sie eine Frage",
 	"Ask a question": "Stellen Sie eine Frage",
 	"Assistant": "Assistent",
 	"Assistant": "Assistent",
 	"Attach file from knowledge": "Datei aus Wissensspeicher anhängen",
 	"Attach file from knowledge": "Datei aus Wissensspeicher anhängen",
@@ -150,14 +153,14 @@
 	"Character limit for autocomplete generation input": "Zeichenlimit für die Eingabe der automatischen Vervollständigung",
 	"Character limit for autocomplete generation input": "Zeichenlimit für die Eingabe der automatischen Vervollständigung",
 	"Chart new frontiers": "Neue Wege beschreiten",
 	"Chart new frontiers": "Neue Wege beschreiten",
 	"Chat": "Gespräch",
 	"Chat": "Gespräch",
-	"Chat Background Image": "Hintergrundbild des Unterhaltungsfensters",
-	"Chat Bubble UI": "Chat Bubble UI",
+	"Chat Background Image": "Hintergrundbild des Chat-Fensters",
+	"Chat Bubble UI": "Sprechblasen-Layout",
 	"Chat Controls": "Chat-Steuerung",
 	"Chat Controls": "Chat-Steuerung",
 	"Chat direction": "Textrichtung",
 	"Chat direction": "Textrichtung",
-	"Chat Overview": "Unterhaltungsübersicht",
-	"Chat Permissions": "Unterhaltungsberechtigungen",
-	"Chat Tags Auto-Generation": "Automatische Generierung von Unterhaltungstags",
-	"Chats": "Unterhaltungen",
+	"Chat Overview": "Chat-Übersicht",
+	"Chat Permissions": "Chat-Berechtigungen",
+	"Chat Tags Auto-Generation": "Automatische Generierung von Chat-Tags",
+	"Chats": "Chats",
 	"Check Again": "Erneut überprüfen",
 	"Check Again": "Erneut überprüfen",
 	"Check for updates": "Nach Updates suchen",
 	"Check for updates": "Nach Updates suchen",
 	"Checking for updates...": "Sucht nach Updates...",
 	"Checking for updates...": "Sucht nach Updates...",
@@ -167,7 +170,7 @@
 	"Ciphers": "Verschlüsselungen",
 	"Ciphers": "Verschlüsselungen",
 	"Citation": "Zitate",
 	"Citation": "Zitate",
 	"Clear memory": "Alle Erinnerungen entfernen",
 	"Clear memory": "Alle Erinnerungen entfernen",
-	"Clear Memory": "",
+	"Clear Memory": "Alle Erinnerungen entfernen",
 	"click here": "hier klicken",
 	"click here": "hier klicken",
 	"Click here for filter guides.": "Klicken Sie hier für Filteranleitungen.",
 	"Click here for filter guides.": "Klicken Sie hier für Filteranleitungen.",
 	"Click here for help.": "Klicken Sie hier für Hilfe.",
 	"Click here for help.": "Klicken Sie hier für Hilfe.",
@@ -189,12 +192,12 @@
 	"Code execution": "Codeausführung",
 	"Code execution": "Codeausführung",
 	"Code Execution": "Codeausführung",
 	"Code Execution": "Codeausführung",
 	"Code Execution Engine": "",
 	"Code Execution Engine": "",
-	"Code Execution Timeout": "",
+	"Code Execution Timeout": "Timeout für Codeausführung",
 	"Code formatted successfully": "Code erfolgreich formatiert",
 	"Code formatted successfully": "Code erfolgreich formatiert",
 	"Code Interpreter": "Code-Interpreter",
 	"Code Interpreter": "Code-Interpreter",
 	"Code Interpreter Engine": "",
 	"Code Interpreter Engine": "",
 	"Code Interpreter Prompt Template": "",
 	"Code Interpreter Prompt Template": "",
-	"Collapse": "",
+	"Collapse": "Zuklappen",
 	"Collection": "Kollektion",
 	"Collection": "Kollektion",
 	"Color": "Farbe",
 	"Color": "Farbe",
 	"ComfyUI": "ComfyUI",
 	"ComfyUI": "ComfyUI",
@@ -250,7 +253,7 @@
 	"Created At": "Erstellt am",
 	"Created At": "Erstellt am",
 	"Created by": "Erstellt von",
 	"Created by": "Erstellt von",
 	"CSV Import": "CSV-Import",
 	"CSV Import": "CSV-Import",
-	"Ctrl+Enter to Send": "",
+	"Ctrl+Enter to Send": "Strg+Enter zum Senden",
 	"Current Model": "Aktuelles Modell",
 	"Current Model": "Aktuelles Modell",
 	"Current Password": "Aktuelles Passwort",
 	"Current Password": "Aktuelles Passwort",
 	"Custom": "Benutzerdefiniert",
 	"Custom": "Benutzerdefiniert",
@@ -270,18 +273,19 @@
 	"Default Prompt Suggestions": "Prompt-Vorschläge",
 	"Default Prompt Suggestions": "Prompt-Vorschläge",
 	"Default to 389 or 636 if TLS is enabled": "Standardmäßig auf 389 oder 636 setzen, wenn TLS aktiviert ist",
 	"Default to 389 or 636 if TLS is enabled": "Standardmäßig auf 389 oder 636 setzen, wenn TLS aktiviert ist",
 	"Default to ALL": "Standardmäßig auf ALLE setzen",
 	"Default to ALL": "Standardmäßig auf ALLE setzen",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "Standardbenutzerrolle",
 	"Default User Role": "Standardbenutzerrolle",
 	"Delete": "Löschen",
 	"Delete": "Löschen",
 	"Delete a model": "Ein Modell löschen",
 	"Delete a model": "Ein Modell löschen",
-	"Delete All Chats": "Alle Unterhaltungen löschen",
+	"Delete All Chats": "Alle Chats löschen",
 	"Delete All Models": "Alle Modelle löschen",
 	"Delete All Models": "Alle Modelle löschen",
-	"Delete chat": "Unterhaltung löschen",
-	"Delete Chat": "Unterhaltung löschen",
-	"Delete chat?": "Unterhaltung löschen?",
+	"Delete chat": "Chat löschen",
+	"Delete Chat": "Chat löschen",
+	"Delete chat?": "Chat löschen?",
 	"Delete folder?": "Ordner löschen?",
 	"Delete folder?": "Ordner löschen?",
 	"Delete function?": "Funktion löschen?",
 	"Delete function?": "Funktion löschen?",
 	"Delete Message": "Nachricht löschen",
 	"Delete Message": "Nachricht löschen",
-	"Delete message?": "",
+	"Delete message?": "Nachricht löschen?",
 	"Delete prompt?": "Prompt löschen?",
 	"Delete prompt?": "Prompt löschen?",
 	"delete this link": "diesen Link löschen",
 	"delete this link": "diesen Link löschen",
 	"Delete tool?": "Werkzeug löschen?",
 	"Delete tool?": "Werkzeug löschen?",
@@ -292,6 +296,7 @@
 	"Describe your knowledge base and objectives": "Beschreibe deinen Wissensspeicher und deine Ziele",
 	"Describe your knowledge base and objectives": "Beschreibe deinen Wissensspeicher und deine Ziele",
 	"Description": "Beschreibung",
 	"Description": "Beschreibung",
 	"Didn't fully follow instructions": "Nicht genau den Answeisungen gefolgt",
 	"Didn't fully follow instructions": "Nicht genau den Answeisungen gefolgt",
+	"Direct": "Direkt",
 	"Direct Connections": "Direktverbindungen",
 	"Direct Connections": "Direktverbindungen",
 	"Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "Direktverbindungen ermöglichen es Benutzern, sich mit ihren eigenen OpenAI-kompatiblen API-Endpunkten zu verbinden.",
 	"Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "Direktverbindungen ermöglichen es Benutzern, sich mit ihren eigenen OpenAI-kompatiblen API-Endpunkten zu verbinden.",
 	"Direct Connections settings updated": "Direktverbindungs-Einstellungen aktualisiert",
 	"Direct Connections settings updated": "Direktverbindungs-Einstellungen aktualisiert",
@@ -300,7 +305,7 @@
 	"Discover a model": "Entdecken Sie weitere Modelle",
 	"Discover a model": "Entdecken Sie weitere Modelle",
 	"Discover a prompt": "Entdecken Sie weitere Prompts",
 	"Discover a prompt": "Entdecken Sie weitere Prompts",
 	"Discover a tool": "Entdecken Sie weitere Werkzeuge",
 	"Discover a tool": "Entdecken Sie weitere Werkzeuge",
-	"Discover how to use Open WebUI and seek support from the community.": "",
+	"Discover how to use Open WebUI and seek support from the community.": "Entdecke, wie Sie Open WebUI nutzen und erhalten Sie Unterstützung von der Community.",
 	"Discover wonders": "Entdecken Sie Wunder",
 	"Discover wonders": "Entdecken Sie Wunder",
 	"Discover, download, and explore custom functions": "Entdecken und beziehen Sie benutzerdefinierte Funktionen",
 	"Discover, download, and explore custom functions": "Entdecken und beziehen Sie benutzerdefinierte Funktionen",
 	"Discover, download, and explore custom prompts": "Entdecken und beziehen Sie benutzerdefinierte Prompts",
 	"Discover, download, and explore custom prompts": "Entdecken und beziehen Sie benutzerdefinierte Prompts",
@@ -314,13 +319,15 @@
 	"Dive into knowledge": "Tauchen Sie in das Wissen ein",
 	"Dive into knowledge": "Tauchen Sie in das Wissen ein",
 	"Do not install functions from sources you do not fully trust.": "Installieren Sie keine Funktionen aus Quellen, denen Sie nicht vollständig vertrauen.",
 	"Do not install functions from sources you do not fully trust.": "Installieren Sie keine Funktionen aus Quellen, denen Sie nicht vollständig vertrauen.",
 	"Do not install tools from sources you do not fully trust.": "Installieren Sie keine Werkzeuge aus Quellen, denen Sie nicht vollständig vertrauen.",
 	"Do not install tools from sources you do not fully trust.": "Installieren Sie keine Werkzeuge aus Quellen, denen Sie nicht vollständig vertrauen.",
+	"Docling": "",
+	"Docling Server URL required.": "",
 	"Document": "Dokument",
 	"Document": "Dokument",
 	"Document Intelligence": "",
 	"Document Intelligence": "",
 	"Document Intelligence endpoint and key required.": "",
 	"Document Intelligence endpoint and key required.": "",
 	"Documentation": "Dokumentation",
 	"Documentation": "Dokumentation",
 	"Documents": "Dokumente",
 	"Documents": "Dokumente",
 	"does not make any external connections, and your data stays securely on your locally hosted server.": "stellt keine externen Verbindungen her, und Ihre Daten bleiben sicher auf Ihrem lokal gehosteten Server.",
 	"does not make any external connections, and your data stays securely on your locally hosted server.": "stellt keine externen Verbindungen her, und Ihre Daten bleiben sicher auf Ihrem lokal gehosteten Server.",
-	"Domain Filter List": "",
+	"Domain Filter List": "Domain Filter-Liste",
 	"Don't have an account?": "Haben Sie noch kein Benutzerkonto?",
 	"Don't have an account?": "Haben Sie noch kein Benutzerkonto?",
 	"don't install random functions from sources you don't trust.": "installieren Sie keine Funktionen aus Quellen, denen Sie nicht vertrauen.",
 	"don't install random functions from sources you don't trust.": "installieren Sie keine Funktionen aus Quellen, denen Sie nicht vertrauen.",
 	"don't install random tools from sources you don't trust.": "installieren Sie keine Werkzeuge aus Quellen, denen Sie nicht vertrauen.",
 	"don't install random tools from sources you don't trust.": "installieren Sie keine Werkzeuge aus Quellen, denen Sie nicht vertrauen.",
@@ -332,7 +339,7 @@
 	"Download Database": "Datenbank exportieren",
 	"Download Database": "Datenbank exportieren",
 	"Drag and drop a file to upload or select a file to view": "Ziehen Sie eine Datei zum Hochladen oder wählen Sie eine Datei zum Anzeigen aus",
 	"Drag and drop a file to upload or select a file to view": "Ziehen Sie eine Datei zum Hochladen oder wählen Sie eine Datei zum Anzeigen aus",
 	"Draw": "Zeichnen",
 	"Draw": "Zeichnen",
-	"Drop any files here to add to the conversation": "Ziehen Sie beliebige Dateien hierher, um sie der Unterhaltung hinzuzufügen",
+	"Drop any files here to add to the conversation": "Ziehen Sie beliebige Dateien hierher, um sie dem Chat hinzuzufügen",
 	"e.g. '30s','10m'. Valid time units are 's', 'm', 'h'.": "z. B. '30s','10m'. Gültige Zeiteinheiten sind 's', 'm', 'h'.",
 	"e.g. '30s','10m'. Valid time units are 's', 'm', 'h'.": "z. B. '30s','10m'. Gültige Zeiteinheiten sind 's', 'm', 'h'.",
 	"e.g. 60": "z. B. 60",
 	"e.g. 60": "z. B. 60",
 	"e.g. A filter to remove profanity from text": "z. B. Ein Filter, um Schimpfwörter aus Text zu entfernen",
 	"e.g. A filter to remove profanity from text": "z. B. Ein Filter, um Schimpfwörter aus Text zu entfernen",
@@ -359,8 +366,8 @@
 	"Embedding model set to \"{{embedding_model}}\"": "Embedding-Modell auf \"{{embedding_model}}\" gesetzt",
 	"Embedding model set to \"{{embedding_model}}\"": "Embedding-Modell auf \"{{embedding_model}}\" gesetzt",
 	"Enable API Key": "API-Schlüssel aktivieren",
 	"Enable API Key": "API-Schlüssel aktivieren",
 	"Enable autocomplete generation for chat messages": "Automatische Vervollständigung für Chat-Nachrichten aktivieren",
 	"Enable autocomplete generation for chat messages": "Automatische Vervollständigung für Chat-Nachrichten aktivieren",
-	"Enable Code Execution": "",
-	"Enable Code Interpreter": "",
+	"Enable Code Execution": "Codeausführung aktivieren",
+	"Enable Code Interpreter": "Code-Interpreter aktivieren",
 	"Enable Community Sharing": "Community-Freigabe aktivieren",
 	"Enable Community Sharing": "Community-Freigabe aktivieren",
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Aktiviere Memory Locking (mlock), um zu verhindern, dass Modelldaten aus dem RAM ausgelagert werden. Diese Option sperrt die Arbeitsseiten des Modells im RAM, um sicherzustellen, dass sie nicht auf die Festplatte ausgelagert werden. Dies kann die Leistung verbessern, indem Page Faults vermieden und ein schneller Datenzugriff sichergestellt werden.",
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Aktiviere Memory Locking (mlock), um zu verhindern, dass Modelldaten aus dem RAM ausgelagert werden. Diese Option sperrt die Arbeitsseiten des Modells im RAM, um sicherzustellen, dass sie nicht auf die Festplatte ausgelagert werden. Dies kann die Leistung verbessern, indem Page Faults vermieden und ein schneller Datenzugriff sichergestellt werden.",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Aktiviere Memory Mapping (mmap), um Modelldaten zu laden. Diese Option ermöglicht es dem System, den Festplattenspeicher als Erweiterung des RAM zu verwenden, indem Festplattendateien so behandelt werden, als ob sie im RAM wären. Dies kann die Modellleistung verbessern, indem ein schnellerer Datenzugriff ermöglicht wird. Es kann jedoch nicht auf allen Systemen korrekt funktionieren und einen erheblichen Teil des Festplattenspeichers beanspruchen.",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Aktiviere Memory Mapping (mmap), um Modelldaten zu laden. Diese Option ermöglicht es dem System, den Festplattenspeicher als Erweiterung des RAM zu verwenden, indem Festplattendateien so behandelt werden, als ob sie im RAM wären. Dies kann die Modellleistung verbessern, indem ein schnellerer Datenzugriff ermöglicht wird. Es kann jedoch nicht auf allen Systemen korrekt funktionieren und einen erheblichen Teil des Festplattenspeichers beanspruchen.",
@@ -384,6 +391,7 @@
 	"Enter Chunk Size": "Geben Sie die Blockgröße ein",
 	"Enter Chunk Size": "Geben Sie die Blockgröße ein",
 	"Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "",
 	"Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "",
 	"Enter description": "Geben Sie eine Beschreibung ein",
 	"Enter description": "Geben Sie eine Beschreibung ein",
+	"Enter Docling Server URL": "",
 	"Enter Document Intelligence Endpoint": "",
 	"Enter Document Intelligence Endpoint": "",
 	"Enter Document Intelligence Key": "",
 	"Enter Document Intelligence Key": "",
 	"Enter domains separated by commas (e.g., example.com,site.org)": "Geben Sie die Domains durch Kommas separiert ein (z.B. example.com,site.org)",
 	"Enter domains separated by commas (e.g., example.com,site.org)": "Geben Sie die Domains durch Kommas separiert ein (z.B. example.com,site.org)",
@@ -393,17 +401,17 @@
 	"Enter Google PSE Engine Id": "Geben Sie die Google PSE-Engine-ID ein",
 	"Enter Google PSE Engine Id": "Geben Sie die Google PSE-Engine-ID ein",
 	"Enter Image Size (e.g. 512x512)": "Geben Sie die Bildgröße ein (z. B. 512x512)",
 	"Enter Image Size (e.g. 512x512)": "Geben Sie die Bildgröße ein (z. B. 512x512)",
 	"Enter Jina API Key": "Geben Sie den Jina-API-Schlüssel ein",
 	"Enter Jina API Key": "Geben Sie den Jina-API-Schlüssel ein",
-	"Enter Jupyter Password": "",
-	"Enter Jupyter Token": "",
-	"Enter Jupyter URL": "",
-	"Enter Kagi Search API Key": "Geben sie den Kagi Search API-Schlüssel ein",
-	"Enter Key Behavior": "",
+	"Enter Jupyter Password": "Geben Sie das Jupyter-Passwort ein",
+	"Enter Jupyter Token": "Geben Sie den Jupyter-Token ein",
+	"Enter Jupyter URL": "Geben Sie die Jupyter-URL ein",
+	"Enter Kagi Search API Key": "Geben Sie den Kagi Search API-Schlüssel ein",
+	"Enter Key Behavior": "Verhalten von 'Enter'",
 	"Enter language codes": "Geben Sie die Sprachcodes ein",
 	"Enter language codes": "Geben Sie die Sprachcodes ein",
 	"Enter Model ID": "Geben Sie die Modell-ID ein",
 	"Enter Model ID": "Geben Sie die Modell-ID ein",
 	"Enter model tag (e.g. {{modelTag}})": "Geben Sie den Model-Tag ein",
 	"Enter model tag (e.g. {{modelTag}})": "Geben Sie den Model-Tag ein",
 	"Enter Mojeek Search API Key": "Geben Sie den Mojeek Search API-Schlüssel ein",
 	"Enter Mojeek Search API Key": "Geben Sie den Mojeek Search API-Schlüssel ein",
 	"Enter Number of Steps (e.g. 50)": "Geben Sie die Anzahl an Schritten ein (z. B. 50)",
 	"Enter Number of Steps (e.g. 50)": "Geben Sie die Anzahl an Schritten ein (z. B. 50)",
-	"Enter Perplexity API Key": "",
+	"Enter Perplexity API Key": "Geben Sie den Perplexity API-Key ein",
 	"Enter proxy URL (e.g. https://user:password@host:port)": "Geben sie die Proxy-URL ein (z. B. https://user:password@host:port)",
 	"Enter proxy URL (e.g. https://user:password@host:port)": "Geben sie die Proxy-URL ein (z. B. https://user:password@host:port)",
 	"Enter reasoning effort": "Geben Sie den Schlussfolgerungsaufwand ein",
 	"Enter reasoning effort": "Geben Sie den Schlussfolgerungsaufwand ein",
 	"Enter Sampler (e.g. Euler a)": "Geben Sie den Sampler ein (z. B. Euler a)",
 	"Enter Sampler (e.g. Euler a)": "Geben Sie den Sampler ein (z. B. Euler a)",
@@ -426,8 +434,8 @@
 	"Enter Tavily API Key": "Geben Sie den Tavily-API-Schlüssel ein",
 	"Enter Tavily API Key": "Geben Sie den Tavily-API-Schlüssel ein",
 	"Enter the public URL of your WebUI. This URL will be used to generate links in the notifications.": "Geben sie die öffentliche URL Ihrer WebUI ein. Diese URL wird verwendet, um Links in den Benachrichtigungen zu generieren.",
 	"Enter the public URL of your WebUI. This URL will be used to generate links in the notifications.": "Geben sie die öffentliche URL Ihrer WebUI ein. Diese URL wird verwendet, um Links in den Benachrichtigungen zu generieren.",
 	"Enter Tika Server URL": "Geben Sie die Tika-Server-URL ein",
 	"Enter Tika Server URL": "Geben Sie die Tika-Server-URL ein",
-	"Enter timeout in seconds": "",
-	"Enter to Send": "",
+	"Enter timeout in seconds": "Geben Sie den Timeout in Sekunden ein",
+	"Enter to Send": "'Enter' zum Senden",
 	"Enter Top K": "Geben Sie Top K ein",
 	"Enter Top K": "Geben Sie Top K ein",
 	"Enter Top K Reranker": "Geben Sie Top K für Reranker ein",
 	"Enter Top K Reranker": "Geben Sie Top K für Reranker ein",
 	"Enter URL (e.g. http://127.0.0.1:7860/)": "Geben Sie die URL ein (z. B. http://127.0.0.1:7860/)",
 	"Enter URL (e.g. http://127.0.0.1:7860/)": "Geben Sie die URL ein (z. B. http://127.0.0.1:7860/)",
@@ -455,16 +463,16 @@
 	"Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "",
 	"Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "",
 	"Exclude": "Ausschließen",
 	"Exclude": "Ausschließen",
 	"Execute code for analysis": "Code für Analyse ausführen",
 	"Execute code for analysis": "Code für Analyse ausführen",
-	"Expand": "",
+	"Expand": "Aufklappen",
 	"Experimental": "Experimentell",
 	"Experimental": "Experimentell",
-	"Explain": "",
-	"Explain this section to me in more detail": "",
+	"Explain": "Erklären",
+	"Explain this section to me in more detail": "Erkläre mir diesen Abschnitt im Detail",
 	"Explore the cosmos": "Erforschen Sie das Universum",
 	"Explore the cosmos": "Erforschen Sie das Universum",
 	"Export": "Exportieren",
 	"Export": "Exportieren",
-	"Export All Archived Chats": "Alle archivierten Unterhaltungen exportieren",
-	"Export All Chats (All Users)": "Alle Unterhaltungen exportieren (alle Benutzer)",
-	"Export chat (.json)": "Unterhaltung exportieren (.json)",
-	"Export Chats": "Unterhaltungen exportieren",
+	"Export All Archived Chats": "Alle archivierten Chats exportieren",
+	"Export All Chats (All Users)": "Alle Chats exportieren (alle Benutzer)",
+	"Export chat (.json)": "Chat exportieren (.json)",
+	"Export Chats": "Chats exportieren",
 	"Export Config to JSON File": "Exportiere Konfiguration als JSON-Datei",
 	"Export Config to JSON File": "Exportiere Konfiguration als JSON-Datei",
 	"Export Functions": "Funktionen exportieren",
 	"Export Functions": "Funktionen exportieren",
 	"Export Models": "Modelle exportieren",
 	"Export Models": "Modelle exportieren",
@@ -472,6 +480,7 @@
 	"Export Prompts": "Prompts exportieren",
 	"Export Prompts": "Prompts exportieren",
 	"Export to CSV": "Als CSV exportieren",
 	"Export to CSV": "Als CSV exportieren",
 	"Export Tools": "Werkzeuge exportieren",
 	"Export Tools": "Werkzeuge exportieren",
+	"External": "Extern",
 	"External Models": "Externe Modelle",
 	"External Models": "Externe Modelle",
 	"Failed to add file.": "Fehler beim Hinzufügen der Datei.",
 	"Failed to add file.": "Fehler beim Hinzufügen der Datei.",
 	"Failed to create API Key.": "Fehler beim Erstellen des API-Schlüssels.",
 	"Failed to create API Key.": "Fehler beim Erstellen des API-Schlüssels.",
@@ -510,7 +519,7 @@
 	"Form": "Formular",
 	"Form": "Formular",
 	"Format your variables using brackets like this:": "Formatieren Sie Ihre Variablen mit Klammern, wie hier:",
 	"Format your variables using brackets like this:": "Formatieren Sie Ihre Variablen mit Klammern, wie hier:",
 	"Frequency Penalty": "Frequenzstrafe",
 	"Frequency Penalty": "Frequenzstrafe",
-	"Full Context Mode": "",
+	"Full Context Mode": "Voll-Kontext Modus",
 	"Function": "Funktion",
 	"Function": "Funktion",
 	"Function Calling": "Funktionsaufruf",
 	"Function Calling": "Funktionsaufruf",
 	"Function created successfully": "Funktion erfolgreich erstellt",
 	"Function created successfully": "Funktion erfolgreich erstellt",
@@ -547,7 +556,7 @@
 	"Group updated successfully": "Gruppe erfolgreich aktualisiert",
 	"Group updated successfully": "Gruppe erfolgreich aktualisiert",
 	"Groups": "Gruppen",
 	"Groups": "Gruppen",
 	"Haptic Feedback": "Haptisches Feedback",
 	"Haptic Feedback": "Haptisches Feedback",
-	"has no conversations.": "hat keine Unterhaltungen.",
+	"has no conversations.": "hat keine Chats.",
 	"Hello, {{name}}": "Hallo, {{name}}",
 	"Hello, {{name}}": "Hallo, {{name}}",
 	"Help": "Hilfe",
 	"Help": "Hilfe",
 	"Help us create the best community leaderboard by sharing your feedback history!": "Helfen Sie uns, die beste Community-Bestenliste zu erstellen, indem Sie Ihren Feedback-Verlauf teilen!",
 	"Help us create the best community leaderboard by sharing your feedback history!": "Helfen Sie uns, die beste Community-Bestenliste zu erstellen, indem Sie Ihren Feedback-Verlauf teilen!",
@@ -572,7 +581,7 @@
 	"Image Prompt Generation Prompt": "Prompt für die Bild-Prompt-Generierung",
 	"Image Prompt Generation Prompt": "Prompt für die Bild-Prompt-Generierung",
 	"Image Settings": "Bildeinstellungen",
 	"Image Settings": "Bildeinstellungen",
 	"Images": "Bilder",
 	"Images": "Bilder",
-	"Import Chats": "Unterhaltungen importieren",
+	"Import Chats": "Chats importieren",
 	"Import Config from JSON File": "Konfiguration aus JSON-Datei importieren",
 	"Import Config from JSON File": "Konfiguration aus JSON-Datei importieren",
 	"Import Functions": "Funktionen importieren",
 	"Import Functions": "Funktionen importieren",
 	"Import Models": "Modelle importieren",
 	"Import Models": "Modelle importieren",
@@ -584,11 +593,12 @@
 	"Include `--api` flag when running stable-diffusion-webui": "Fügen Sie beim Ausführen von stable-diffusion-webui die Option `--api` hinzu",
 	"Include `--api` flag when running stable-diffusion-webui": "Fügen Sie beim Ausführen von stable-diffusion-webui die Option `--api` hinzu",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Info",
 	"Info": "Info",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "Eingabebefehle",
 	"Input commands": "Eingabebefehle",
 	"Install from Github URL": "Installiere von der Github-URL",
 	"Install from Github URL": "Installiere von der Github-URL",
 	"Instant Auto-Send After Voice Transcription": "Spracherkennung direkt absenden",
 	"Instant Auto-Send After Voice Transcription": "Spracherkennung direkt absenden",
 	"Integration": "",
 	"Integration": "",
-	"Interface": "Benutzeroberfläche",
+	"Interface": "Oberfläche",
 	"Invalid file format.": "Ungültiges Dateiformat.",
 	"Invalid file format.": "Ungültiges Dateiformat.",
 	"Invalid Tag": "Ungültiger Tag",
 	"Invalid Tag": "Ungültiger Tag",
 	"is typing...": "schreibt ...",
 	"is typing...": "schreibt ...",
@@ -667,7 +677,7 @@
 	"Memory updated successfully": "Erinnerung erfolgreich aktualisiert",
 	"Memory updated successfully": "Erinnerung erfolgreich aktualisiert",
 	"Merge Responses": "Antworten zusammenführen",
 	"Merge Responses": "Antworten zusammenführen",
 	"Message rating should be enabled to use this feature": "Antwortbewertung muss aktiviert sein, um diese Funktion zu verwenden",
 	"Message rating should be enabled to use this feature": "Antwortbewertung muss aktiviert sein, um diese Funktion zu verwenden",
-	"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "Nachrichten, die Sie nach der Erstellung Ihres Links senden, werden nicht geteilt. Nutzer mit der URL können die freigegebene Unterhaltung einsehen.",
+	"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "Nachrichten, die Sie nach der Erstellung Ihres Links senden, werden nicht geteilt. Nutzer mit der URL können den freigegebenen Chat einsehen.",
 	"Min P": "Min P",
 	"Min P": "Min P",
 	"Minimum Score": "Mindestpunktzahl",
 	"Minimum Score": "Mindestpunktzahl",
 	"Mirostat": "Mirostat",
 	"Mirostat": "Mirostat",
@@ -679,7 +689,7 @@
 	"Model {{modelId}} not found": "Modell {{modelId}} nicht gefunden",
 	"Model {{modelId}} not found": "Modell {{modelId}} nicht gefunden",
 	"Model {{modelName}} is not vision capable": "Das Modell {{modelName}} ist nicht für die Bildverarbeitung geeignet",
 	"Model {{modelName}} is not vision capable": "Das Modell {{modelName}} ist nicht für die Bildverarbeitung geeignet",
 	"Model {{name}} is now {{status}}": "Modell {{name}} ist jetzt {{status}}",
 	"Model {{name}} is now {{status}}": "Modell {{name}} ist jetzt {{status}}",
-	"Model accepts image inputs": "Modell akzeptiert Bileingaben",
+	"Model accepts image inputs": "Modell akzeptiert Bildeingaben",
 	"Model created successfully!": "Modell erfolgreich erstellt!",
 	"Model created successfully!": "Modell erfolgreich erstellt!",
 	"Model filesystem path detected. Model shortname is required for update, cannot continue.": "Modell-Dateisystempfad erkannt. Modellkurzname ist für das Update erforderlich, Fortsetzung nicht möglich.",
 	"Model filesystem path detected. Model shortname is required for update, cannot continue.": "Modell-Dateisystempfad erkannt. Modellkurzname ist für das Update erforderlich, Fortsetzung nicht möglich.",
 	"Model Filtering": "Modellfilterung",
 	"Model Filtering": "Modellfilterung",
@@ -700,7 +710,7 @@
 	"Name": "Name",
 	"Name": "Name",
 	"Name your knowledge base": "Benennen Sie Ihren Wissensspeicher",
 	"Name your knowledge base": "Benennen Sie Ihren Wissensspeicher",
 	"Native": "Nativ",
 	"Native": "Nativ",
-	"New Chat": "Neue Unterhaltung",
+	"New Chat": "Neuer Chat",
 	"New Folder": "Neuer Ordner",
 	"New Folder": "Neuer Ordner",
 	"New Password": "Neues Passwort",
 	"New Password": "Neues Passwort",
 	"new-channel": "neuer-kanal",
 	"new-channel": "neuer-kanal",
@@ -807,6 +817,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "Vorherige 30 Tage",
 	"Previous 30 days": "Vorherige 30 Tage",
 	"Previous 7 days": "Vorherige 7 Tage",
 	"Previous 7 days": "Vorherige 7 Tage",
+	"Private": "Privat",
 	"Profile Image": "Profilbild",
 	"Profile Image": "Profilbild",
 	"Prompt": "Prompt",
 	"Prompt": "Prompt",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (z. B. \"Erzähle mir eine interessante Tatsache über das Römische Reich\")",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (z. B. \"Erzähle mir eine interessante Tatsache über das Römische Reich\")",
@@ -816,6 +827,7 @@
 	"Prompt updated successfully": "Prompt erfolgreich aktualisiert",
 	"Prompt updated successfully": "Prompt erfolgreich aktualisiert",
 	"Prompts": "Prompts",
 	"Prompts": "Prompts",
 	"Prompts Access": "Prompt-Zugriff",
 	"Prompts Access": "Prompt-Zugriff",
+	"Public": "Öffentlich",
 	"Pull \"{{searchValue}}\" from Ollama.com": "\"{{searchValue}}\" von Ollama.com beziehen",
 	"Pull \"{{searchValue}}\" from Ollama.com": "\"{{searchValue}}\" von Ollama.com beziehen",
 	"Pull a model from Ollama.com": "Modell von Ollama.com beziehen",
 	"Pull a model from Ollama.com": "Modell von Ollama.com beziehen",
 	"Query Generation Prompt": "Abfragegenerierungsprompt",
 	"Query Generation Prompt": "Abfragegenerierungsprompt",
@@ -855,7 +867,7 @@
 	"Result": "Ergebnis",
 	"Result": "Ergebnis",
 	"Retrieval": "",
 	"Retrieval": "",
 	"Retrieval Query Generation": "Abfragegenerierung",
 	"Retrieval Query Generation": "Abfragegenerierung",
-	"Rich Text Input for Chat": "Rich-Text-Eingabe für Unterhaltungen",
+	"Rich Text Input for Chat": "Rich-Text-Eingabe für Chats",
 	"RK": "RK",
 	"RK": "RK",
 	"Role": "Rolle",
 	"Role": "Rolle",
 	"Rosé Pine": "Rosé Pine",
 	"Rosé Pine": "Rosé Pine",
@@ -869,12 +881,12 @@
 	"Save As Copy": "Als Kopie speichern",
 	"Save As Copy": "Als Kopie speichern",
 	"Save Tag": "Tag speichern",
 	"Save Tag": "Tag speichern",
 	"Saved": "Gespeichert",
 	"Saved": "Gespeichert",
-	"Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "Das direkte Speichern von Unterhaltungen im Browser-Speicher wird nicht mehr unterstützt. Bitte nehmen Sie einen Moment Zeit, um Ihre Unterhaltungen zu exportieren und zu löschen, indem Sie auf die Schaltfläche unten klicken. Keine Sorge, Sie können Ihre Unterhaltungen problemlos über das Backend wieder importieren.",
+	"Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "Das direkte Speichern von Chats im Browser-Speicher wird nicht mehr unterstützt. Bitte nehmen Sie einen Moment Zeit, um Ihre Chats zu exportieren und zu löschen, indem Sie auf die Schaltfläche unten klicken. Keine Sorge, Sie können Ihre Chats problemlos über das Backend wieder importieren.",
 	"Scroll to bottom when switching between branches": "Beim Wechsel zwischen Branches nach unten scrollen",
 	"Scroll to bottom when switching between branches": "Beim Wechsel zwischen Branches nach unten scrollen",
 	"Search": "Suchen",
 	"Search": "Suchen",
 	"Search a model": "Modell suchen",
 	"Search a model": "Modell suchen",
 	"Search Base": "Suchbasis",
 	"Search Base": "Suchbasis",
-	"Search Chats": "Unterhaltungen durchsuchen...",
+	"Search Chats": "Chats durchsuchen...",
 	"Search Collection": "Sammlung durchsuchen",
 	"Search Collection": "Sammlung durchsuchen",
 	"Search Filters": "Suchfilter",
 	"Search Filters": "Suchfilter",
 	"search for tags": "nach Tags suchen",
 	"search for tags": "nach Tags suchen",
@@ -945,7 +957,7 @@
 	"Settings": "Einstellungen",
 	"Settings": "Einstellungen",
 	"Settings saved successfully!": "Einstellungen erfolgreich gespeichert!",
 	"Settings saved successfully!": "Einstellungen erfolgreich gespeichert!",
 	"Share": "Teilen",
 	"Share": "Teilen",
-	"Share Chat": "Unterhaltung teilen",
+	"Share Chat": "Chat teilen",
 	"Share to Open WebUI Community": "Mit OpenWebUI Community teilen",
 	"Share to Open WebUI Community": "Mit OpenWebUI Community teilen",
 	"Show": "Anzeigen",
 	"Show": "Anzeigen",
 	"Show \"What's New\" modal on login": "\"Was gibt's Neues\"-Modal beim Anmelden anzeigen",
 	"Show \"What's New\" modal on login": "\"Was gibt's Neues\"-Modal beim Anmelden anzeigen",
@@ -967,7 +979,7 @@
 	"Speech-to-Text Engine": "Sprache-zu-Text-Engine",
 	"Speech-to-Text Engine": "Sprache-zu-Text-Engine",
 	"Stop": "Stop",
 	"Stop": "Stop",
 	"Stop Sequence": "Stop-Sequenz",
 	"Stop Sequence": "Stop-Sequenz",
-	"Stream Chat Response": "Unterhaltungsantwort streamen",
+	"Stream Chat Response": "Chat-Antwort streamen",
 	"STT Model": "STT-Modell",
 	"STT Model": "STT-Modell",
 	"STT Settings": "STT-Einstellungen",
 	"STT Settings": "STT-Einstellungen",
 	"Subtitle (e.g. about the Roman Empire)": "Untertitel (z. B. über das Römische Reich)",
 	"Subtitle (e.g. about the Roman Empire)": "Untertitel (z. B. über das Römische Reich)",
@@ -980,6 +992,7 @@
 	"System": "System",
 	"System": "System",
 	"System Instructions": "Systemanweisungen",
 	"System Instructions": "Systemanweisungen",
 	"System Prompt": "System-Prompt",
 	"System Prompt": "System-Prompt",
+	"Tags": "",
 	"Tags Generation": "Tag-Generierung",
 	"Tags Generation": "Tag-Generierung",
 	"Tags Generation Prompt": "Prompt für Tag-Generierung",
 	"Tags Generation Prompt": "Prompt für Tag-Generierung",
 	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "Tail-Free Sampling wird verwendet, um den Einfluss weniger wahrscheinlicher Tokens auf die Ausgabe zu reduzieren. Ein höherer Wert (z.B. 2.0) reduziert den Einfluss stärker, während ein Wert von 1.0 diese Einstellung deaktiviert. (Standard: 1)",
 	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "Tail-Free Sampling wird verwendet, um den Einfluss weniger wahrscheinlicher Tokens auf die Ausgabe zu reduzieren. Ein höherer Wert (z.B. 2.0) reduziert den Einfluss stärker, während ein Wert von 1.0 diese Einstellung deaktiviert. (Standard: 1)",
@@ -990,7 +1003,7 @@
 	"Tell us more:": "Erzähl uns mehr",
 	"Tell us more:": "Erzähl uns mehr",
 	"Temperature": "Temperatur",
 	"Temperature": "Temperatur",
 	"Template": "Vorlage",
 	"Template": "Vorlage",
-	"Temporary Chat": "Temporäre Unterhaltung",
+	"Temporary Chat": "Temporärer Chat",
 	"Text Splitter": "Text-Splitter",
 	"Text Splitter": "Text-Splitter",
 	"Text-to-Speech Engine": "Text-zu-Sprache-Engine",
 	"Text-to-Speech Engine": "Text-zu-Sprache-Engine",
 	"Tfs Z": "Tfs Z",
 	"Tfs Z": "Tfs Z",
@@ -1004,13 +1017,14 @@
 	"The LDAP attribute that maps to the username that users use to sign in.": "Das LDAP-Attribut, das dem Benutzernamen zugeordnet ist, den Benutzer zum Anmelden verwenden.",
 	"The LDAP attribute that maps to the username that users use to sign in.": "Das LDAP-Attribut, das dem Benutzernamen zugeordnet ist, den Benutzer zum Anmelden verwenden.",
 	"The leaderboard is currently in beta, and we may adjust the rating calculations as we refine the algorithm.": "Die Bestenliste befindet sich derzeit in der Beta-Phase, und es ist möglich, dass wir die Bewertungsberechnungen anpassen, während wir den Algorithmus verfeinern.",
 	"The leaderboard is currently in beta, and we may adjust the rating calculations as we refine the algorithm.": "Die Bestenliste befindet sich derzeit in der Beta-Phase, und es ist möglich, dass wir die Bewertungsberechnungen anpassen, während wir den Algorithmus verfeinern.",
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "Die maximale Dateigröße in MB. Wenn die Dateigröße dieses Limit überschreitet, wird die Datei nicht hochgeladen.",
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "Die maximale Dateigröße in MB. Wenn die Dateigröße dieses Limit überschreitet, wird die Datei nicht hochgeladen.",
-	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Die maximale Anzahl von Dateien, die gleichzeitig in der Unterhaltung verwendet werden können. Wenn die Anzahl der Dateien dieses Limit überschreitet, werden die Dateien nicht hochgeladen.",
+	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Die maximale Anzahl von Dateien, die gleichzeitig im Chat verwendet werden können. Wenn die Anzahl der Dateien dieses Limit überschreitet, werden die Dateien nicht hochgeladen.",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Die Punktzahl sollte ein Wert zwischen 0,0 (0 %) und 1,0 (100 %) sein.",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Die Punktzahl sollte ein Wert zwischen 0,0 (0 %) und 1,0 (100 %) sein.",
 	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "Design",
 	"Theme": "Design",
 	"Thinking...": "Denke nach...",
 	"Thinking...": "Denke nach...",
 	"This action cannot be undone. Do you wish to continue?": "Diese Aktion kann nicht rückgängig gemacht werden. Möchten Sie fortfahren?",
 	"This action cannot be undone. Do you wish to continue?": "Diese Aktion kann nicht rückgängig gemacht werden. Möchten Sie fortfahren?",
-	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Dies stellt sicher, dass Ihre wertvollen Unterhaltungen sicher in Ihrer Backend-Datenbank gespeichert werden. Vielen Dank!",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "Dieser Kanal wurde am {{createdAt}} erstellt. Dies ist der Beginn des {{channelName}} Kanals.",
+	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Dies stellt sicher, dass Ihre wertvollen Chats sicher in Ihrer Backend-Datenbank gespeichert werden. Vielen Dank!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Dies ist eine experimentelle Funktion, sie funktioniert möglicherweise nicht wie erwartet und kann jederzeit geändert werden.",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Dies ist eine experimentelle Funktion, sie funktioniert möglicherweise nicht wie erwartet und kann jederzeit geändert werden.",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
@@ -1027,10 +1041,10 @@
 	"Tika": "Tika",
 	"Tika": "Tika",
 	"Tika Server URL required.": "Tika-Server-URL erforderlich.",
 	"Tika Server URL required.": "Tika-Server-URL erforderlich.",
 	"Tiktoken": "Tiktoken",
 	"Tiktoken": "Tiktoken",
-	"Tip: Update multiple variable slots consecutively by pressing the tab key in the chat input after each replacement.": "Tipp: Aktualisieren Sie mehrere Variablenfelder nacheinander, indem Sie nach jedem Ersetzen die Tabulatortaste im Eingabefeld der Unterhaltung drücken.",
+	"Tip: Update multiple variable slots consecutively by pressing the tab key in the chat input after each replacement.": "Tipp: Aktualisieren Sie mehrere Variablenfelder nacheinander, indem Sie nach jedem Ersetzen die Tabulatortaste im Eingabefeld des Chats drücken.",
 	"Title": "Titel",
 	"Title": "Titel",
 	"Title (e.g. Tell me a fun fact)": "Titel (z. B. Erzähl mir einen lustigen Fakt)",
 	"Title (e.g. Tell me a fun fact)": "Titel (z. B. Erzähl mir einen lustigen Fakt)",
-	"Title Auto-Generation": "Unterhaltungstitel automatisch generieren",
+	"Title Auto-Generation": "Chat-Titel automatisch generieren",
 	"Title cannot be an empty string.": "Titel darf nicht leer sein.",
 	"Title cannot be an empty string.": "Titel darf nicht leer sein.",
 	"Title Generation": "Titelgenerierung",
 	"Title Generation": "Titelgenerierung",
 	"Title Generation Prompt": "Prompt für Titelgenerierung",
 	"Title Generation Prompt": "Prompt für Titelgenerierung",
@@ -1040,7 +1054,7 @@
 	"To access the WebUI, please reach out to the administrator. Admins can manage user statuses from the Admin Panel.": "Um auf das WebUI zugreifen zu können, wenden Sie sich bitte an einen Administrator. Administratoren können den Benutzerstatus über das Admin-Panel verwalten.",
 	"To access the WebUI, please reach out to the administrator. Admins can manage user statuses from the Admin Panel.": "Um auf das WebUI zugreifen zu können, wenden Sie sich bitte an einen Administrator. Administratoren können den Benutzerstatus über das Admin-Panel verwalten.",
 	"To attach knowledge base here, add them to the \"Knowledge\" workspace first.": "Um Wissensdatenbanken hier anzuhängen, fügen Sie sie zunächst dem Arbeitsbereich \"Wissen\" hinzu.",
 	"To attach knowledge base here, add them to the \"Knowledge\" workspace first.": "Um Wissensdatenbanken hier anzuhängen, fügen Sie sie zunächst dem Arbeitsbereich \"Wissen\" hinzu.",
 	"To learn more about available endpoints, visit our documentation.": "Um mehr über verfügbare Endpunkte zu erfahren, besuchen Sie unsere Dokumentation.",
 	"To learn more about available endpoints, visit our documentation.": "Um mehr über verfügbare Endpunkte zu erfahren, besuchen Sie unsere Dokumentation.",
-	"To protect your privacy, only ratings, model IDs, tags, and metadata are shared from your feedback—your chat logs remain private and are not included.": "Um Ihre Privatsphäre zu schützen, werden nur Bewertungen, Modell-IDs, Tags und Metadaten aus Ihrem Feedback geteilt – Ihre Unterhaltungen bleiben privat und werden nicht einbezogen.",
+	"To protect your privacy, only ratings, model IDs, tags, and metadata are shared from your feedback—your chat logs remain private and are not included.": "Um Ihre Privatsphäre zu schützen, werden nur Bewertungen, Modell-IDs, Tags und Metadaten aus Ihrem Feedback geteilt – Ihre Chats bleiben privat und werden nicht einbezogen.",
 	"To select actions here, add them to the \"Functions\" workspace first.": "Um Aktionen auszuwählen, fügen Sie diese zunächst dem Arbeitsbereich „Funktionen“ hinzu.",
 	"To select actions here, add them to the \"Functions\" workspace first.": "Um Aktionen auszuwählen, fügen Sie diese zunächst dem Arbeitsbereich „Funktionen“ hinzu.",
 	"To select filters here, add them to the \"Functions\" workspace first.": "Um Filter auszuwählen, fügen Sie diese zunächst dem Arbeitsbereich „Funktionen“ hinzu.",
 	"To select filters here, add them to the \"Functions\" workspace first.": "Um Filter auszuwählen, fügen Sie diese zunächst dem Arbeitsbereich „Funktionen“ hinzu.",
 	"To select toolkits here, add them to the \"Tools\" workspace first.": "Um Toolkits auszuwählen, fügen Sie sie zunächst dem Arbeitsbereich „Werkzeuge“ hinzu.",
 	"To select toolkits here, add them to the \"Tools\" workspace first.": "Um Toolkits auszuwählen, fügen Sie sie zunächst dem Arbeitsbereich „Werkzeuge“ hinzu.",
@@ -1068,7 +1082,7 @@
 	"Top P": "Top P",
 	"Top P": "Top P",
 	"Transformers": "Transformers",
 	"Transformers": "Transformers",
 	"Trouble accessing Ollama?": "Probleme beim Zugriff auf Ollama?",
 	"Trouble accessing Ollama?": "Probleme beim Zugriff auf Ollama?",
-	"Trust Proxy Environment": "",
+	"Trust Proxy Environment": "Proxy-Umgebung vertrauen",
 	"TTS Model": "TTS-Modell",
 	"TTS Model": "TTS-Modell",
 	"TTS Settings": "TTS-Einstellungen",
 	"TTS Settings": "TTS-Einstellungen",
 	"TTS Voice": "TTS-Stimme",
 	"TTS Voice": "TTS-Stimme",
@@ -1077,8 +1091,8 @@
 	"Uh-oh! There was an issue with the response.": "Oh nein! Es gab ein Problem mit der Antwort.",
 	"Uh-oh! There was an issue with the response.": "Oh nein! Es gab ein Problem mit der Antwort.",
 	"UI": "Oberfläche",
 	"UI": "Oberfläche",
 	"Unarchive All": "Alle wiederherstellen",
 	"Unarchive All": "Alle wiederherstellen",
-	"Unarchive All Archived Chats": "Alle archivierten Unterhaltungen wiederherstellen",
-	"Unarchive Chat": "Unterhaltung wiederherstellen",
+	"Unarchive All Archived Chats": "Alle archivierten Chats wiederherstellen",
+	"Unarchive Chat": "Chat wiederherstellen",
 	"Unlock mysteries": "Geheimnisse entsperren",
 	"Unlock mysteries": "Geheimnisse entsperren",
 	"Unpin": "Lösen",
 	"Unpin": "Lösen",
 	"Unravel secrets": "Geheimnisse lüften",
 	"Unravel secrets": "Geheimnisse lüften",
@@ -1090,7 +1104,7 @@
 	"Updated": "Aktualisiert",
 	"Updated": "Aktualisiert",
 	"Updated at": "Aktualisiert am",
 	"Updated at": "Aktualisiert am",
 	"Updated At": "Aktualisiert am",
 	"Updated At": "Aktualisiert am",
-	"Upgrade to a licensed plan for enhanced capabilities, including custom theming and branding, and dedicated support.": "",
+	"Upgrade to a licensed plan for enhanced capabilities, including custom theming and branding, and dedicated support.": "Upgrade auf einen lizenzierten Plan für erweiterte Funktionen wie individuelles Design, Branding und dedizierten Support.",
 	"Upload": "Hochladen",
 	"Upload": "Hochladen",
 	"Upload a GGUF model": "GGUF-Model hochladen",
 	"Upload a GGUF model": "GGUF-Model hochladen",
 	"Upload directory": "Upload-Verzeichnis",
 	"Upload directory": "Upload-Verzeichnis",
@@ -1119,6 +1133,7 @@
 	"Valves updated successfully": "Valves erfolgreich aktualisiert",
 	"Valves updated successfully": "Valves erfolgreich aktualisiert",
 	"variable": "Variable",
 	"variable": "Variable",
 	"variable to have them replaced with clipboard content.": "Variable, um den Inhalt der Zwischenablage beim Nutzen des Prompts zu ersetzen.",
 	"variable to have them replaced with clipboard content.": "Variable, um den Inhalt der Zwischenablage beim Nutzen des Prompts zu ersetzen.",
+	"Verify Connection": "Verbindung verifizieren",
 	"Version": "Version",
 	"Version": "Version",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Version {{selectedVersion}} von {{totalVersions}}",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Version {{selectedVersion}} von {{totalVersions}}",
 	"View Replies": "Antworten anzeigen",
 	"View Replies": "Antworten anzeigen",
@@ -1164,11 +1179,10 @@
 	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Sie können nur mit maximal {{maxCount}} Datei(en) gleichzeitig chatten.",
 	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Sie können nur mit maximal {{maxCount}} Datei(en) gleichzeitig chatten.",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Personalisieren Sie Interaktionen mit LLMs, indem Sie über die Schaltfläche \"Verwalten\" Erinnerungen hinzufügen.",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Personalisieren Sie Interaktionen mit LLMs, indem Sie über die Schaltfläche \"Verwalten\" Erinnerungen hinzufügen.",
 	"You cannot upload an empty file.": "Sie können keine leere Datei hochladen.",
 	"You cannot upload an empty file.": "Sie können keine leere Datei hochladen.",
-	"You do not have permission to access this feature.": "Sie haben keine Berechtigung, auf diese Funktion zuzugreifen.",
 	"You do not have permission to upload files": "Sie haben keine Berechtigung, Dateien hochzuladen",
 	"You do not have permission to upload files": "Sie haben keine Berechtigung, Dateien hochzuladen",
 	"You do not have permission to upload files.": "Sie haben keine Berechtigung zum Hochladen von Dateien.",
 	"You do not have permission to upload files.": "Sie haben keine Berechtigung zum Hochladen von Dateien.",
-	"You have no archived conversations.": "Du hast keine archivierten Unterhaltungen.",
-	"You have shared this chat": "Sie haben diese Unterhaltung geteilt",
+	"You have no archived conversations.": "Du hast keine archivierten Chats.",
+	"You have shared this chat": "Sie haben diesen Chat geteilt",
 	"You're a helpful assistant.": "Du bist ein hilfreicher Assistent.",
 	"You're a helpful assistant.": "Du bist ein hilfreicher Assistent.",
 	"You're now logged in.": "Sie sind jetzt eingeloggt.",
 	"You're now logged in.": "Sie sind jetzt eingeloggt.",
 	"Your account status is currently pending activation.": "Ihr Kontostatus ist derzeit ausstehend und wartet auf Aktivierung.",
 	"Your account status is currently pending activation.": "Ihr Kontostatus ist derzeit ausstehend und wartet auf Aktivierung.",

+ 15 - 1
src/lib/i18n/locales/dg-DG/translation.json

@@ -4,6 +4,7 @@
 	"(e.g. `sh webui.sh --api --api-auth username_password`)": "",
 	"(e.g. `sh webui.sh --api --api-auth username_password`)": "",
 	"(e.g. `sh webui.sh --api`)": "(such e.g. `sh webui.sh --api`)",
 	"(e.g. `sh webui.sh --api`)": "(such e.g. `sh webui.sh --api`)",
 	"(latest)": "(much latest)",
 	"(latest)": "(much latest)",
+	"(Ollama)": "",
 	"{{ models }}": "",
 	"{{ models }}": "",
 	"{{COUNT}} hidden lines": "",
 	"{{COUNT}} hidden lines": "",
 	"{{COUNT}} Replies": "",
 	"{{COUNT}} Replies": "",
@@ -68,6 +69,8 @@
 	"Already have an account?": "Such account exists?",
 	"Already have an account?": "Such account exists?",
 	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "",
 	"Always": "",
+	"Always Collapse Code Blocks": "",
+	"Always Expand Details": "",
 	"Amazing": "",
 	"Amazing": "",
 	"an assistant": "such assistant",
 	"an assistant": "such assistant",
 	"Analyzed": "",
 	"Analyzed": "",
@@ -270,6 +273,7 @@
 	"Default Prompt Suggestions": "Default Prompt Suggestions",
 	"Default Prompt Suggestions": "Default Prompt Suggestions",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to ALL": "",
 	"Default to ALL": "",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "Default User Role",
 	"Default User Role": "Default User Role",
 	"Delete": "",
 	"Delete": "",
 	"Delete a model": "Delete a model",
 	"Delete a model": "Delete a model",
@@ -292,6 +296,7 @@
 	"Describe your knowledge base and objectives": "",
 	"Describe your knowledge base and objectives": "",
 	"Description": "Description",
 	"Description": "Description",
 	"Didn't fully follow instructions": "",
 	"Didn't fully follow instructions": "",
+	"Direct": "",
 	"Direct Connections": "",
 	"Direct Connections": "",
 	"Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "",
 	"Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "",
 	"Direct Connections settings updated": "",
 	"Direct Connections settings updated": "",
@@ -314,6 +319,8 @@
 	"Dive into knowledge": "",
 	"Dive into knowledge": "",
 	"Do not install functions from sources you do not fully trust.": "",
 	"Do not install functions from sources you do not fully trust.": "",
 	"Do not install tools from sources you do not fully trust.": "",
 	"Do not install tools from sources you do not fully trust.": "",
+	"Docling": "",
+	"Docling Server URL required.": "",
 	"Document": "Document",
 	"Document": "Document",
 	"Document Intelligence": "",
 	"Document Intelligence": "",
 	"Document Intelligence endpoint and key required.": "",
 	"Document Intelligence endpoint and key required.": "",
@@ -384,6 +391,7 @@
 	"Enter Chunk Size": "Enter Size of Chunk",
 	"Enter Chunk Size": "Enter Size of Chunk",
 	"Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "",
 	"Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "",
 	"Enter description": "",
 	"Enter description": "",
+	"Enter Docling Server URL": "",
 	"Enter Document Intelligence Endpoint": "",
 	"Enter Document Intelligence Endpoint": "",
 	"Enter Document Intelligence Key": "",
 	"Enter Document Intelligence Key": "",
 	"Enter domains separated by commas (e.g., example.com,site.org)": "",
 	"Enter domains separated by commas (e.g., example.com,site.org)": "",
@@ -471,6 +479,7 @@
 	"Export Prompts": "Export Promptos",
 	"Export Prompts": "Export Promptos",
 	"Export to CSV": "",
 	"Export to CSV": "",
 	"Export Tools": "",
 	"Export Tools": "",
+	"External": "",
 	"External Models": "",
 	"External Models": "",
 	"Failed to add file.": "",
 	"Failed to add file.": "",
 	"Failed to create API Key.": "",
 	"Failed to create API Key.": "",
@@ -583,6 +592,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "Include `--api` flag when running stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Include `--api` flag when running stable-diffusion-webui",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "",
 	"Info": "",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "Input commands",
 	"Input commands": "Input commands",
 	"Install from Github URL": "",
 	"Install from Github URL": "",
 	"Instant Auto-Send After Voice Transcription": "",
 	"Instant Auto-Send After Voice Transcription": "",
@@ -806,6 +816,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "",
 	"Previous 30 days": "",
 	"Previous 7 days": "",
 	"Previous 7 days": "",
+	"Private": "",
 	"Profile Image": "",
 	"Profile Image": "",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "",
@@ -815,6 +826,7 @@
 	"Prompt updated successfully": "",
 	"Prompt updated successfully": "",
 	"Prompts": "Promptos",
 	"Prompts": "Promptos",
 	"Prompts Access": "",
 	"Prompts Access": "",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "",
 	"Pull a model from Ollama.com": "Pull a wowdel from Ollama.com",
 	"Pull a model from Ollama.com": "Pull a wowdel from Ollama.com",
 	"Query Generation Prompt": "",
 	"Query Generation Prompt": "",
@@ -979,6 +991,7 @@
 	"System": "System very system",
 	"System": "System very system",
 	"System Instructions": "",
 	"System Instructions": "",
 	"System Prompt": "System Prompt much prompt",
 	"System Prompt": "System Prompt much prompt",
+	"Tags": "",
 	"Tags Generation": "",
 	"Tags Generation": "",
 	"Tags Generation Prompt": "",
 	"Tags Generation Prompt": "",
 	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
@@ -1009,6 +1022,7 @@
 	"Theme": "Theme much theme",
 	"Theme": "Theme much theme",
 	"Thinking...": "",
 	"Thinking...": "",
 	"This action cannot be undone. Do you wish to continue?": "",
 	"This action cannot be undone. Do you wish to continue?": "",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "This ensures that your valuable conversations are securely saved to your backend database. Thank you! Much secure!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "This ensures that your valuable conversations are securely saved to your backend database. Thank you! Much secure!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1132,7 @@
 	"Valves updated successfully": "",
 	"Valves updated successfully": "",
 	"variable": "variable very variable",
 	"variable": "variable very variable",
 	"variable to have them replaced with clipboard content.": "variable to have them replaced with clipboard content. Very replace.",
 	"variable to have them replaced with clipboard content.": "variable to have them replaced with clipboard content. Very replace.",
+	"Verify Connection": "",
 	"Version": "Version much version",
 	"Version": "Version much version",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"View Replies": "",
 	"View Replies": "",
@@ -1163,7 +1178,6 @@
 	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "",
 	"You cannot upload an empty file.": "",
 	"You cannot upload an empty file.": "",
-	"You do not have permission to access this feature.": "",
 	"You do not have permission to upload files": "",
 	"You do not have permission to upload files": "",
 	"You do not have permission to upload files.": "",
 	"You do not have permission to upload files.": "",
 	"You have no archived conversations.": "",
 	"You have no archived conversations.": "",

+ 15 - 1
src/lib/i18n/locales/el-GR/translation.json

@@ -4,6 +4,7 @@
 	"(e.g. `sh webui.sh --api --api-auth username_password`)": "(π.χ. `sh webui.sh --api --api-auth username_password`)",
 	"(e.g. `sh webui.sh --api --api-auth username_password`)": "(π.χ. `sh webui.sh --api --api-auth username_password`)",
 	"(e.g. `sh webui.sh --api`)": "(π.χ. `sh webui.sh --api`)",
 	"(e.g. `sh webui.sh --api`)": "(π.χ. `sh webui.sh --api`)",
 	"(latest)": "(τελευταίο)",
 	"(latest)": "(τελευταίο)",
+	"(Ollama)": "",
 	"{{ models }}": "{{ models }}",
 	"{{ models }}": "{{ models }}",
 	"{{COUNT}} hidden lines": "",
 	"{{COUNT}} hidden lines": "",
 	"{{COUNT}} Replies": "",
 	"{{COUNT}} Replies": "",
@@ -68,6 +69,8 @@
 	"Already have an account?": "Έχετε ήδη λογαριασμό;",
 	"Already have an account?": "Έχετε ήδη λογαριασμό;",
 	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "",
 	"Always": "",
+	"Always Collapse Code Blocks": "",
+	"Always Expand Details": "",
 	"Amazing": "Καταπληκτικό",
 	"Amazing": "Καταπληκτικό",
 	"an assistant": "ένας βοηθός",
 	"an assistant": "ένας βοηθός",
 	"Analyzed": "",
 	"Analyzed": "",
@@ -270,6 +273,7 @@
 	"Default Prompt Suggestions": "Προεπιλεγμένες Προτάσεις Προτροπής",
 	"Default Prompt Suggestions": "Προεπιλεγμένες Προτάσεις Προτροπής",
 	"Default to 389 or 636 if TLS is enabled": "Προεπιλογή στο 389 ή 636 εάν είναι ενεργοποιημένο το TLS",
 	"Default to 389 or 636 if TLS is enabled": "Προεπιλογή στο 389 ή 636 εάν είναι ενεργοποιημένο το TLS",
 	"Default to ALL": "Προεπιλογή σε ΟΛΑ",
 	"Default to ALL": "Προεπιλογή σε ΟΛΑ",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "Προεπιλεγμένος Ρόλος Χρήστη",
 	"Default User Role": "Προεπιλεγμένος Ρόλος Χρήστη",
 	"Delete": "Διαγραφή",
 	"Delete": "Διαγραφή",
 	"Delete a model": "Διαγραφή ενός μοντέλου",
 	"Delete a model": "Διαγραφή ενός μοντέλου",
@@ -292,6 +296,7 @@
 	"Describe your knowledge base and objectives": "Περιγράψτε τη βάση γνώσης και τους στόχους σας",
 	"Describe your knowledge base and objectives": "Περιγράψτε τη βάση γνώσης και τους στόχους σας",
 	"Description": "Περιγραφή",
 	"Description": "Περιγραφή",
 	"Didn't fully follow instructions": "Δεν ακολούθησε πλήρως τις οδηγίες",
 	"Didn't fully follow instructions": "Δεν ακολούθησε πλήρως τις οδηγίες",
+	"Direct": "",
 	"Direct Connections": "",
 	"Direct Connections": "",
 	"Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "",
 	"Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "",
 	"Direct Connections settings updated": "",
 	"Direct Connections settings updated": "",
@@ -314,6 +319,8 @@
 	"Dive into knowledge": "Βυθιστείτε στη γνώση",
 	"Dive into knowledge": "Βυθιστείτε στη γνώση",
 	"Do not install functions from sources you do not fully trust.": "Μην εγκαθιστάτε λειτουργίες από πηγές που δεν εμπιστεύεστε πλήρως.",
 	"Do not install functions from sources you do not fully trust.": "Μην εγκαθιστάτε λειτουργίες από πηγές που δεν εμπιστεύεστε πλήρως.",
 	"Do not install tools from sources you do not fully trust.": "Μην εγκαθιστάτε εργαλεία από πηγές που δεν εμπιστεύεστε πλήρως.",
 	"Do not install tools from sources you do not fully trust.": "Μην εγκαθιστάτε εργαλεία από πηγές που δεν εμπιστεύεστε πλήρως.",
+	"Docling": "",
+	"Docling Server URL required.": "",
 	"Document": "Έγγραφο",
 	"Document": "Έγγραφο",
 	"Document Intelligence": "",
 	"Document Intelligence": "",
 	"Document Intelligence endpoint and key required.": "",
 	"Document Intelligence endpoint and key required.": "",
@@ -384,6 +391,7 @@
 	"Enter Chunk Size": "Εισάγετε το Μέγεθος Τμημάτων",
 	"Enter Chunk Size": "Εισάγετε το Μέγεθος Τμημάτων",
 	"Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "",
 	"Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "",
 	"Enter description": "Εισάγετε την περιγραφή",
 	"Enter description": "Εισάγετε την περιγραφή",
+	"Enter Docling Server URL": "",
 	"Enter Document Intelligence Endpoint": "",
 	"Enter Document Intelligence Endpoint": "",
 	"Enter Document Intelligence Key": "",
 	"Enter Document Intelligence Key": "",
 	"Enter domains separated by commas (e.g., example.com,site.org)": "",
 	"Enter domains separated by commas (e.g., example.com,site.org)": "",
@@ -471,6 +479,7 @@
 	"Export Prompts": "Εξαγωγή Προτροπών",
 	"Export Prompts": "Εξαγωγή Προτροπών",
 	"Export to CSV": "Εξαγωγή σε CSV",
 	"Export to CSV": "Εξαγωγή σε CSV",
 	"Export Tools": "Εξαγωγή Εργαλείων",
 	"Export Tools": "Εξαγωγή Εργαλείων",
+	"External": "",
 	"External Models": "Εξωτερικά Μοντέλα",
 	"External Models": "Εξωτερικά Μοντέλα",
 	"Failed to add file.": "Αποτυχία προσθήκης αρχείου.",
 	"Failed to add file.": "Αποτυχία προσθήκης αρχείου.",
 	"Failed to create API Key.": "Αποτυχία δημιουργίας Κλειδιού API.",
 	"Failed to create API Key.": "Αποτυχία δημιουργίας Κλειδιού API.",
@@ -583,6 +592,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "Συμπεριλάβετε το flag `--api` όταν τρέχετε το stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Συμπεριλάβετε το flag `--api` όταν τρέχετε το stable-diffusion-webui",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Πληροφορίες",
 	"Info": "Πληροφορίες",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "Εισαγωγή εντολών",
 	"Input commands": "Εισαγωγή εντολών",
 	"Install from Github URL": "Εγκατάσταση από URL Github",
 	"Install from Github URL": "Εγκατάσταση από URL Github",
 	"Instant Auto-Send After Voice Transcription": "Άμεση Αυτόματη Αποστολή μετά τη μεταγραφή φωνής",
 	"Instant Auto-Send After Voice Transcription": "Άμεση Αυτόματη Αποστολή μετά τη μεταγραφή φωνής",
@@ -806,6 +816,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "Προηγούμενες 30 ημέρες",
 	"Previous 30 days": "Προηγούμενες 30 ημέρες",
 	"Previous 7 days": "Προηγούμενες 7 ημέρες",
 	"Previous 7 days": "Προηγούμενες 7 ημέρες",
+	"Private": "",
 	"Profile Image": "Εικόνα Προφίλ",
 	"Profile Image": "Εικόνα Προφίλ",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Προτροπή (π.χ. Πες μου ένα διασκεδαστικό γεγονός για την Ρωμαϊκή Αυτοκρατορία)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Προτροπή (π.χ. Πες μου ένα διασκεδαστικό γεγονός για την Ρωμαϊκή Αυτοκρατορία)",
@@ -815,6 +826,7 @@
 	"Prompt updated successfully": "Η προτροπή ενημερώθηκε με επιτυχία",
 	"Prompt updated successfully": "Η προτροπή ενημερώθηκε με επιτυχία",
 	"Prompts": "Προτροπές",
 	"Prompts": "Προτροπές",
 	"Prompts Access": "Πρόσβαση Προτροπών",
 	"Prompts Access": "Πρόσβαση Προτροπών",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Τραβήξτε \"{{searchValue}}\" από το Ollama.com",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Τραβήξτε \"{{searchValue}}\" από το Ollama.com",
 	"Pull a model from Ollama.com": "Τραβήξτε ένα μοντέλο από το Ollama.com",
 	"Pull a model from Ollama.com": "Τραβήξτε ένα μοντέλο από το Ollama.com",
 	"Query Generation Prompt": "Προτροπή Δημιουργίας Ερωτήσεων",
 	"Query Generation Prompt": "Προτροπή Δημιουργίας Ερωτήσεων",
@@ -979,6 +991,7 @@
 	"System": "Σύστημα",
 	"System": "Σύστημα",
 	"System Instructions": "Οδηγίες Συστήματος",
 	"System Instructions": "Οδηγίες Συστήματος",
 	"System Prompt": "Προτροπή Συστήματος",
 	"System Prompt": "Προτροπή Συστήματος",
+	"Tags": "",
 	"Tags Generation": "",
 	"Tags Generation": "",
 	"Tags Generation Prompt": "Προτροπή Γενιάς Ετικετών",
 	"Tags Generation Prompt": "Προτροπή Γενιάς Ετικετών",
 	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
@@ -1009,6 +1022,7 @@
 	"Theme": "Θέμα",
 	"Theme": "Θέμα",
 	"Thinking...": "Σκέφτομαι...",
 	"Thinking...": "Σκέφτομαι...",
 	"This action cannot be undone. Do you wish to continue?": "Αυτή η ενέργεια δεν μπορεί να αναιρεθεί. Θέλετε να συνεχίσετε;",
 	"This action cannot be undone. Do you wish to continue?": "Αυτή η ενέργεια δεν μπορεί να αναιρεθεί. Θέλετε να συνεχίσετε;",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Αυτό διασφαλίζει ότι οι πολύτιμες συνομιλίες σας αποθηκεύονται με ασφάλεια στη βάση δεδομένων backend σας. Ευχαριστούμε!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Αυτό διασφαλίζει ότι οι πολύτιμες συνομιλίες σας αποθηκεύονται με ασφάλεια στη βάση δεδομένων backend σας. Ευχαριστούμε!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Αυτή είναι μια πειραματική λειτουργία, μπορεί να μην λειτουργεί όπως αναμένεται και υπόκειται σε αλλαγές οποιαδήποτε στιγμή.",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Αυτή είναι μια πειραματική λειτουργία, μπορεί να μην λειτουργεί όπως αναμένεται και υπόκειται σε αλλαγές οποιαδήποτε στιγμή.",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1132,7 @@
 	"Valves updated successfully": "Οι βαλβίδες ενημερώθηκαν με επιτυχία",
 	"Valves updated successfully": "Οι βαλβίδες ενημερώθηκαν με επιτυχία",
 	"variable": "μεταβλητή",
 	"variable": "μεταβλητή",
 	"variable to have them replaced with clipboard content.": "μεταβλητή να αντικατασταθούν με το περιεχόμενο του πρόχειρου.",
 	"variable to have them replaced with clipboard content.": "μεταβλητή να αντικατασταθούν με το περιεχόμενο του πρόχειρου.",
+	"Verify Connection": "",
 	"Version": "Έκδοση",
 	"Version": "Έκδοση",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Έκδοση {{selectedVersion}} από {{totalVersions}}",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Έκδοση {{selectedVersion}} από {{totalVersions}}",
 	"View Replies": "",
 	"View Replies": "",
@@ -1163,7 +1178,6 @@
 	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Μπορείτε να συνομιλήσετε μόνο με μέγιστο αριθμό {{maxCount}} αρχείου(-ων) ταυτόχρονα.",
 	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Μπορείτε να συνομιλήσετε μόνο με μέγιστο αριθμό {{maxCount}} αρχείου(-ων) ταυτόχρονα.",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Μπορείτε να προσωποποιήσετε τις αλληλεπιδράσεις σας με τα LLMs προσθέτοντας αναμνήσεις μέσω του κουμπιού 'Διαχείριση' παρακάτω, κάνοντάς τα πιο χρήσιμα και προσαρμοσμένα σε εσάς.",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Μπορείτε να προσωποποιήσετε τις αλληλεπιδράσεις σας με τα LLMs προσθέτοντας αναμνήσεις μέσω του κουμπιού 'Διαχείριση' παρακάτω, κάνοντάς τα πιο χρήσιμα και προσαρμοσμένα σε εσάς.",
 	"You cannot upload an empty file.": "Δεν μπορείτε να ανεβάσετε ένα κενό αρχείο.",
 	"You cannot upload an empty file.": "Δεν μπορείτε να ανεβάσετε ένα κενό αρχείο.",
-	"You do not have permission to access this feature.": "",
 	"You do not have permission to upload files": "",
 	"You do not have permission to upload files": "",
 	"You do not have permission to upload files.": "Δεν έχετε άδεια να ανεβάσετε αρχεία.",
 	"You do not have permission to upload files.": "Δεν έχετε άδεια να ανεβάσετε αρχεία.",
 	"You have no archived conversations.": "Δεν έχετε αρχειοθετημένες συνομιλίες.",
 	"You have no archived conversations.": "Δεν έχετε αρχειοθετημένες συνομιλίες.",

+ 15 - 1
src/lib/i18n/locales/en-GB/translation.json

@@ -4,6 +4,7 @@
 	"(e.g. `sh webui.sh --api --api-auth username_password`)": "",
 	"(e.g. `sh webui.sh --api --api-auth username_password`)": "",
 	"(e.g. `sh webui.sh --api`)": "",
 	"(e.g. `sh webui.sh --api`)": "",
 	"(latest)": "",
 	"(latest)": "",
+	"(Ollama)": "",
 	"{{ models }}": "",
 	"{{ models }}": "",
 	"{{COUNT}} hidden lines": "",
 	"{{COUNT}} hidden lines": "",
 	"{{COUNT}} Replies": "",
 	"{{COUNT}} Replies": "",
@@ -68,6 +69,8 @@
 	"Already have an account?": "",
 	"Already have an account?": "",
 	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "",
 	"Always": "",
+	"Always Collapse Code Blocks": "",
+	"Always Expand Details": "",
 	"Amazing": "",
 	"Amazing": "",
 	"an assistant": "",
 	"an assistant": "",
 	"Analyzed": "",
 	"Analyzed": "",
@@ -270,6 +273,7 @@
 	"Default Prompt Suggestions": "",
 	"Default Prompt Suggestions": "",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to ALL": "",
 	"Default to ALL": "",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "",
 	"Default User Role": "",
 	"Delete": "",
 	"Delete": "",
 	"Delete a model": "",
 	"Delete a model": "",
@@ -292,6 +296,7 @@
 	"Describe your knowledge base and objectives": "",
 	"Describe your knowledge base and objectives": "",
 	"Description": "",
 	"Description": "",
 	"Didn't fully follow instructions": "",
 	"Didn't fully follow instructions": "",
+	"Direct": "",
 	"Direct Connections": "",
 	"Direct Connections": "",
 	"Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "",
 	"Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "",
 	"Direct Connections settings updated": "",
 	"Direct Connections settings updated": "",
@@ -314,6 +319,8 @@
 	"Dive into knowledge": "",
 	"Dive into knowledge": "",
 	"Do not install functions from sources you do not fully trust.": "",
 	"Do not install functions from sources you do not fully trust.": "",
 	"Do not install tools from sources you do not fully trust.": "",
 	"Do not install tools from sources you do not fully trust.": "",
+	"Docling": "",
+	"Docling Server URL required.": "",
 	"Document": "",
 	"Document": "",
 	"Document Intelligence": "",
 	"Document Intelligence": "",
 	"Document Intelligence endpoint and key required.": "",
 	"Document Intelligence endpoint and key required.": "",
@@ -384,6 +391,7 @@
 	"Enter Chunk Size": "",
 	"Enter Chunk Size": "",
 	"Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "",
 	"Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "",
 	"Enter description": "",
 	"Enter description": "",
+	"Enter Docling Server URL": "",
 	"Enter Document Intelligence Endpoint": "",
 	"Enter Document Intelligence Endpoint": "",
 	"Enter Document Intelligence Key": "",
 	"Enter Document Intelligence Key": "",
 	"Enter domains separated by commas (e.g., example.com,site.org)": "",
 	"Enter domains separated by commas (e.g., example.com,site.org)": "",
@@ -472,6 +480,7 @@
 	"Export Prompts": "",
 	"Export Prompts": "",
 	"Export to CSV": "",
 	"Export to CSV": "",
 	"Export Tools": "",
 	"Export Tools": "",
+	"External": "",
 	"External Models": "",
 	"External Models": "",
 	"Failed to add file.": "",
 	"Failed to add file.": "",
 	"Failed to create API Key.": "",
 	"Failed to create API Key.": "",
@@ -584,6 +593,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "",
 	"Include `--api` flag when running stable-diffusion-webui": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "",
 	"Info": "",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "",
 	"Input commands": "",
 	"Install from Github URL": "",
 	"Install from Github URL": "",
 	"Instant Auto-Send After Voice Transcription": "",
 	"Instant Auto-Send After Voice Transcription": "",
@@ -807,6 +817,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "",
 	"Previous 30 days": "",
 	"Previous 7 days": "",
 	"Previous 7 days": "",
+	"Private": "",
 	"Profile Image": "",
 	"Profile Image": "",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "",
@@ -816,6 +827,7 @@
 	"Prompt updated successfully": "",
 	"Prompt updated successfully": "",
 	"Prompts": "",
 	"Prompts": "",
 	"Prompts Access": "",
 	"Prompts Access": "",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "",
 	"Pull a model from Ollama.com": "",
 	"Pull a model from Ollama.com": "",
 	"Query Generation Prompt": "",
 	"Query Generation Prompt": "",
@@ -980,6 +992,7 @@
 	"System": "",
 	"System": "",
 	"System Instructions": "",
 	"System Instructions": "",
 	"System Prompt": "",
 	"System Prompt": "",
+	"Tags": "",
 	"Tags Generation": "",
 	"Tags Generation": "",
 	"Tags Generation Prompt": "",
 	"Tags Generation Prompt": "",
 	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
@@ -1010,6 +1023,7 @@
 	"Theme": "",
 	"Theme": "",
 	"Thinking...": "",
 	"Thinking...": "",
 	"This action cannot be undone. Do you wish to continue?": "",
 	"This action cannot be undone. Do you wish to continue?": "",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1119,6 +1133,7 @@
 	"Valves updated successfully": "",
 	"Valves updated successfully": "",
 	"variable": "",
 	"variable": "",
 	"variable to have them replaced with clipboard content.": "",
 	"variable to have them replaced with clipboard content.": "",
+	"Verify Connection": "",
 	"Version": "",
 	"Version": "",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"View Replies": "",
 	"View Replies": "",
@@ -1164,7 +1179,6 @@
 	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "",
 	"You cannot upload an empty file.": "",
 	"You cannot upload an empty file.": "",
-	"You do not have permission to access this feature.": "",
 	"You do not have permission to upload files": "",
 	"You do not have permission to upload files": "",
 	"You do not have permission to upload files.": "",
 	"You do not have permission to upload files.": "",
 	"You have no archived conversations.": "",
 	"You have no archived conversations.": "",

Деякі файли не було показано, через те що забагато файлів було змінено