Browse Source

Merge pull request #5565 from open-webui/dev

0.3.23
Timothy Jaeryang Baek 9 months ago
parent
commit
ff8a2da751

+ 21 - 0
CHANGELOG.md

@@ -5,6 +5,27 @@ All notable changes to this project will be documented in this file.
 The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
 The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
 and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
 and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
 
 
+## [0.3.23] - 2024-09-21
+
+### Added
+
+- **🚀 WebSocket Redis Support**: Enhanced load balancing capabilities for multiple instance setups, promoting better performance and reliability in WebUI.
+- **🔧 Adjustable Chat Controls**: Introduced width-adjustable chat controls, enabling a personalized and more comfortable user interface.
+- **🌎 i18n Updates**: Improved and updated the Chinese translations.
+
+### Fixed
+
+- **🌐 Task Model Unloading Issue**: Modified task handling to use the Ollama /api/chat endpoint instead of OpenAI compatible endpoint, ensuring models stay loaded and ready with custom parameters, thus minimizing delays in task execution.
+- **📝 Title Generation Fix for OpenAI Compatible APIs**: Resolved an issue preventing the generation of titles, enhancing consistency and reliability when using multiple API providers.
+- **🗃️ RAG Duplicate Collection Issue**: Fixed a bug causing repeated processing of the same uploaded file. Now utilizes indexed files to prevent unnecessary duplications, optimizing resource usage.
+- **🖼️ Image Generation Enhancement**: Refactored OpenAI image generation endpoint to be asynchronous, preventing the WebUI from becoming unresponsive during processing, thus enhancing user experience.
+- **🔓 Downgrade Authlib**: Reverted Authlib to version 1.3.1 to address and resolve issues concerning OAuth functionality.
+
+### Changed
+
+- **🔍 Improved Message Interaction**: Enhanced the message node interface to allow for easier focus redirection with a simple click, streamlining user interaction.
+- **✨ Styling Refactor**: Updated WebUI styling for a cleaner, more modern look, enhancing user experience across the platform.
+
 ## [0.3.22] - 2024-09-19
 ## [0.3.22] - 2024-09-19
 
 
 ### Added
 ### Added

+ 8 - 3
backend/open_webui/apps/openai/main.py

@@ -405,14 +405,19 @@ async def generate_chat_completion(
             "role": user.role,
             "role": user.role,
         }
         }
 
 
+    url = app.state.config.OPENAI_API_BASE_URLS[idx]
+    key = app.state.config.OPENAI_API_KEYS[idx]
+
+    # Change max_completion_tokens to max_tokens (Backward compatible)
+    if "api.openai.com" not in url and not payload["model"].lower().startswith("o1-"):
+        if "max_completion_tokens" in payload:
+            payload["max_tokens"] = payload.pop("max_completion_tokens")
+
     # Convert the modified body back to JSON
     # Convert the modified body back to JSON
     payload = json.dumps(payload)
     payload = json.dumps(payload)
 
 
     log.debug(payload)
     log.debug(payload)
 
 
-    url = app.state.config.OPENAI_API_BASE_URLS[idx]
-    key = app.state.config.OPENAI_API_KEYS[idx]
-
     headers = {}
     headers = {}
     headers["Authorization"] = f"Bearer {key}"
     headers["Authorization"] = f"Bearer {key}"
     headers["Content-Type"] = "application/json"
     headers["Content-Type"] = "application/json"

+ 25 - 25
backend/open_webui/apps/rag/main.py

@@ -1099,35 +1099,35 @@ def store_docs_in_vector_db(
                 log.info(f"deleting existing collection {collection_name}")
                 log.info(f"deleting existing collection {collection_name}")
                 VECTOR_DB_CLIENT.delete_collection(collection_name=collection_name)
                 VECTOR_DB_CLIENT.delete_collection(collection_name=collection_name)
 
 
-        embedding_function = get_embedding_function(
-            app.state.config.RAG_EMBEDDING_ENGINE,
-            app.state.config.RAG_EMBEDDING_MODEL,
-            app.state.sentence_transformer_ef,
-            app.state.config.OPENAI_API_KEY,
-            app.state.config.OPENAI_API_BASE_URL,
-            app.state.config.RAG_EMBEDDING_OPENAI_BATCH_SIZE,
-        )
+        if VECTOR_DB_CLIENT.has_collection(collection_name=collection_name):
+            log.info(f"collection {collection_name} already exists")
+            return True
+        else:
+            embedding_function = get_embedding_function(
+                app.state.config.RAG_EMBEDDING_ENGINE,
+                app.state.config.RAG_EMBEDDING_MODEL,
+                app.state.sentence_transformer_ef,
+                app.state.config.OPENAI_API_KEY,
+                app.state.config.OPENAI_API_BASE_URL,
+                app.state.config.RAG_EMBEDDING_OPENAI_BATCH_SIZE,
+            )
 
 
-        VECTOR_DB_CLIENT.insert(
-            collection_name=collection_name,
-            items=[
-                {
-                    "id": str(uuid.uuid4()),
-                    "text": text,
-                    "vector": embedding_function(text.replace("\n", " ")),
-                    "metadata": metadatas[idx],
-                }
-                for idx, text in enumerate(texts)
-            ],
-        )
+            VECTOR_DB_CLIENT.insert(
+                collection_name=collection_name,
+                items=[
+                    {
+                        "id": str(uuid.uuid4()),
+                        "text": text,
+                        "vector": embedding_function(text.replace("\n", " ")),
+                        "metadata": metadatas[idx],
+                    }
+                    for idx, text in enumerate(texts)
+                ],
+            )
 
 
-        return True
-    except Exception as e:
-        if e.__class__.__name__ == "UniqueConstraintError":
             return True
             return True
-
+    except Exception as e:
         log.exception(e)
         log.exception(e)
-
         return False
         return False
 
 
 
 

+ 30 - 8
backend/open_webui/apps/socket/main.py

@@ -2,16 +2,38 @@ import asyncio
 
 
 import socketio
 import socketio
 from open_webui.apps.webui.models.users import Users
 from open_webui.apps.webui.models.users import Users
-from open_webui.env import ENABLE_WEBSOCKET_SUPPORT
+from open_webui.env import (
+    ENABLE_WEBSOCKET_SUPPORT,
+    WEBSOCKET_MANAGER,
+    WEBSOCKET_REDIS_URL,
+)
 from open_webui.utils.utils import decode_token
 from open_webui.utils.utils import decode_token
 
 
-sio = socketio.AsyncServer(
-    cors_allowed_origins=[],
-    async_mode="asgi",
-    transports=(["polling", "websocket"] if ENABLE_WEBSOCKET_SUPPORT else ["polling"]),
-    allow_upgrades=ENABLE_WEBSOCKET_SUPPORT,
-    always_connect=True,
-)
+
+if WEBSOCKET_MANAGER == "redis":
+    mgr = socketio.AsyncRedisManager(WEBSOCKET_REDIS_URL)
+    sio = socketio.AsyncServer(
+        cors_allowed_origins=[],
+        async_mode="asgi",
+        transports=(
+            ["polling", "websocket"] if ENABLE_WEBSOCKET_SUPPORT else ["polling"]
+        ),
+        allow_upgrades=ENABLE_WEBSOCKET_SUPPORT,
+        always_connect=True,
+        client_manager=mgr,
+    )
+else:
+    sio = socketio.AsyncServer(
+        cors_allowed_origins=[],
+        async_mode="asgi",
+        transports=(
+            ["polling", "websocket"] if ENABLE_WEBSOCKET_SUPPORT else ["polling"]
+        ),
+        allow_upgrades=ENABLE_WEBSOCKET_SUPPORT,
+        always_connect=True,
+    )
+
+
 app = socketio.ASGIApp(sio, socketio_path="/ws/socket.io")
 app = socketio.ASGIApp(sio, socketio_path="/ws/socket.io")
 
 
 # Dictionary to maintain the user pool
 # Dictionary to maintain the user pool

+ 4 - 0
backend/open_webui/env.py

@@ -302,3 +302,7 @@ if WEBUI_AUTH and WEBUI_SECRET_KEY == "":
 ENABLE_WEBSOCKET_SUPPORT = (
 ENABLE_WEBSOCKET_SUPPORT = (
     os.environ.get("ENABLE_WEBSOCKET_SUPPORT", "True").lower() == "true"
     os.environ.get("ENABLE_WEBSOCKET_SUPPORT", "True").lower() == "true"
 )
 )
+
+WEBSOCKET_MANAGER = os.environ.get("WEBSOCKET_MANAGER", "")
+
+WEBSOCKET_REDIS_URL = os.environ.get("WEBSOCKET_REDIS_URL", "redis://localhost:6379/0")

+ 36 - 16
backend/open_webui/main.py

@@ -19,7 +19,9 @@ from open_webui.apps.audio.main import app as audio_app
 from open_webui.apps.images.main import app as images_app
 from open_webui.apps.images.main import app as images_app
 from open_webui.apps.ollama.main import app as ollama_app
 from open_webui.apps.ollama.main import app as ollama_app
 from open_webui.apps.ollama.main import (
 from open_webui.apps.ollama.main import (
-    generate_openai_chat_completion as generate_ollama_chat_completion,
+    GenerateChatCompletionForm,
+    generate_chat_completion as generate_ollama_chat_completion,
+    generate_openai_chat_completion as generate_ollama_openai_chat_completion,
 )
 )
 from open_webui.apps.ollama.main import get_all_models as get_ollama_models
 from open_webui.apps.ollama.main import get_all_models as get_ollama_models
 from open_webui.apps.openai.main import app as openai_app
 from open_webui.apps.openai.main import app as openai_app
@@ -135,6 +137,12 @@ from open_webui.utils.utils import (
 )
 )
 from open_webui.utils.webhook import post_webhook
 from open_webui.utils.webhook import post_webhook
 
 
+from open_webui.utils.payload import convert_payload_openai_to_ollama
+from open_webui.utils.response import (
+    convert_response_ollama_to_openai,
+    convert_streaming_response_ollama_to_openai,
+)
+
 if SAFE_MODE:
 if SAFE_MODE:
     print("SAFE MODE ENABLED")
     print("SAFE MODE ENABLED")
     Functions.deactivate_all_functions()
     Functions.deactivate_all_functions()
@@ -1048,7 +1056,18 @@ async def generate_chat_completions(form_data: dict, user=Depends(get_verified_u
     if model.get("pipe"):
     if model.get("pipe"):
         return await generate_function_chat_completion(form_data, user=user)
         return await generate_function_chat_completion(form_data, user=user)
     if model["owned_by"] == "ollama":
     if model["owned_by"] == "ollama":
-        return await generate_ollama_chat_completion(form_data, user=user)
+        # Using /ollama/api/chat endpoint
+        form_data = convert_payload_openai_to_ollama(form_data)
+        form_data = GenerateChatCompletionForm(**form_data)
+        response = await generate_ollama_chat_completion(form_data=form_data, user=user)
+        if form_data.stream:
+            response.headers["content-type"] = "text/event-stream"
+            return StreamingResponse(
+                convert_streaming_response_ollama_to_openai(response),
+                headers=dict(response.headers),
+            )
+        else:
+            return convert_response_ollama_to_openai(response)
     else:
     else:
         return await generate_openai_chat_completion(form_data, user=user)
         return await generate_openai_chat_completion(form_data, user=user)
 
 
@@ -1399,9 +1418,10 @@ async def generate_title(form_data: dict, user=Depends(get_verified_user)):
     # Check if the user has a custom task model
     # Check if the user has a custom task model
     # If the user has a custom task model, use that model
     # If the user has a custom task model, use that model
     task_model_id = get_task_model_id(model_id)
     task_model_id = get_task_model_id(model_id)
-
     print(task_model_id)
     print(task_model_id)
 
 
+    model = app.state.MODELS[task_model_id]
+
     if app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE != "":
     if app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE != "":
         template = app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE
         template = app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE
     else:
     else:
@@ -1440,9 +1460,9 @@ Prompt: {{prompt:middletruncate:8000}}"""
         "chat_id": form_data.get("chat_id", None),
         "chat_id": form_data.get("chat_id", None),
         "metadata": {"task": str(TASKS.TITLE_GENERATION)},
         "metadata": {"task": str(TASKS.TITLE_GENERATION)},
     }
     }
-
     log.debug(payload)
     log.debug(payload)
 
 
+    # Handle pipeline filters
     try:
     try:
         payload = filter_pipeline(payload, user)
         payload = filter_pipeline(payload, user)
     except Exception as e:
     except Exception as e:
@@ -1456,7 +1476,6 @@ Prompt: {{prompt:middletruncate:8000}}"""
                 status_code=status.HTTP_400_BAD_REQUEST,
                 status_code=status.HTTP_400_BAD_REQUEST,
                 content={"detail": str(e)},
                 content={"detail": str(e)},
             )
             )
-
     if "chat_id" in payload:
     if "chat_id" in payload:
         del payload["chat_id"]
         del payload["chat_id"]
 
 
@@ -1484,6 +1503,8 @@ async def generate_search_query(form_data: dict, user=Depends(get_verified_user)
     task_model_id = get_task_model_id(model_id)
     task_model_id = get_task_model_id(model_id)
     print(task_model_id)
     print(task_model_id)
 
 
+    model = app.state.MODELS[task_model_id]
+
     if app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE != "":
     if app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE != "":
         template = app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE
         template = app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE
     else:
     else:
@@ -1516,9 +1537,9 @@ Search Query:"""
         ),
         ),
         "metadata": {"task": str(TASKS.QUERY_GENERATION)},
         "metadata": {"task": str(TASKS.QUERY_GENERATION)},
     }
     }
+    log.debug(payload)
 
 
-    print(payload)
-
+    # Handle pipeline filters
     try:
     try:
         payload = filter_pipeline(payload, user)
         payload = filter_pipeline(payload, user)
     except Exception as e:
     except Exception as e:
@@ -1532,7 +1553,6 @@ Search Query:"""
                 status_code=status.HTTP_400_BAD_REQUEST,
                 status_code=status.HTTP_400_BAD_REQUEST,
                 content={"detail": str(e)},
                 content={"detail": str(e)},
             )
             )
-
     if "chat_id" in payload:
     if "chat_id" in payload:
         del payload["chat_id"]
         del payload["chat_id"]
 
 
@@ -1555,12 +1575,13 @@ async def generate_emoji(form_data: dict, user=Depends(get_verified_user)):
     task_model_id = get_task_model_id(model_id)
     task_model_id = get_task_model_id(model_id)
     print(task_model_id)
     print(task_model_id)
 
 
+    model = app.state.MODELS[task_model_id]
+
     template = '''
     template = '''
 Your task is to reflect the speaker's likely facial expression through a fitting emoji. Interpret emotions from the message and reflect their facial expression using fitting, diverse emojis (e.g., 😊, 😢, 😡, 😱).
 Your task is to reflect the speaker's likely facial expression through a fitting emoji. Interpret emotions from the message and reflect their facial expression using fitting, diverse emojis (e.g., 😊, 😢, 😡, 😱).
 
 
 Message: """{{prompt}}"""
 Message: """{{prompt}}"""
 '''
 '''
-
     content = title_generation_template(
     content = title_generation_template(
         template,
         template,
         form_data["prompt"],
         form_data["prompt"],
@@ -1584,9 +1605,9 @@ Message: """{{prompt}}"""
         "chat_id": form_data.get("chat_id", None),
         "chat_id": form_data.get("chat_id", None),
         "metadata": {"task": str(TASKS.EMOJI_GENERATION)},
         "metadata": {"task": str(TASKS.EMOJI_GENERATION)},
     }
     }
-
     log.debug(payload)
     log.debug(payload)
 
 
+    # Handle pipeline filters
     try:
     try:
         payload = filter_pipeline(payload, user)
         payload = filter_pipeline(payload, user)
     except Exception as e:
     except Exception as e:
@@ -1600,7 +1621,6 @@ Message: """{{prompt}}"""
                 status_code=status.HTTP_400_BAD_REQUEST,
                 status_code=status.HTTP_400_BAD_REQUEST,
                 content={"detail": str(e)},
                 content={"detail": str(e)},
             )
             )
-
     if "chat_id" in payload:
     if "chat_id" in payload:
         del payload["chat_id"]
         del payload["chat_id"]
 
 
@@ -1620,8 +1640,10 @@ async def generate_moa_response(form_data: dict, user=Depends(get_verified_user)
 
 
     # Check if the user has a custom task model
     # Check if the user has a custom task model
     # If the user has a custom task model, use that model
     # If the user has a custom task model, use that model
-    model_id = get_task_model_id(model_id)
-    print(model_id)
+    task_model_id = get_task_model_id(model_id)
+    print(task_model_id)
+
+    model = app.state.MODELS[task_model_id]
 
 
     template = """You have been provided with a set of responses from various models to the latest user query: "{{prompt}}"
     template = """You have been provided with a set of responses from various models to the latest user query: "{{prompt}}"
 
 
@@ -1636,13 +1658,12 @@ Responses from models: {{responses}}"""
     )
     )
 
 
     payload = {
     payload = {
-        "model": model_id,
+        "model": task_model_id,
         "messages": [{"role": "user", "content": content}],
         "messages": [{"role": "user", "content": content}],
         "stream": form_data.get("stream", False),
         "stream": form_data.get("stream", False),
         "chat_id": form_data.get("chat_id", None),
         "chat_id": form_data.get("chat_id", None),
         "metadata": {"task": str(TASKS.MOA_RESPONSE_GENERATION)},
         "metadata": {"task": str(TASKS.MOA_RESPONSE_GENERATION)},
     }
     }
-
     log.debug(payload)
     log.debug(payload)
 
 
     try:
     try:
@@ -1658,7 +1679,6 @@ Responses from models: {{responses}}"""
                 status_code=status.HTTP_400_BAD_REQUEST,
                 status_code=status.HTTP_400_BAD_REQUEST,
                 content={"detail": str(e)},
                 content={"detail": str(e)},
             )
             )
-
     if "chat_id" in payload:
     if "chat_id" in payload:
         del payload["chat_id"]
         del payload["chat_id"]
 
 

+ 12 - 4
backend/open_webui/utils/misc.py

@@ -105,17 +105,25 @@ def openai_chat_message_template(model: str):
     }
     }
 
 
 
 
-def openai_chat_chunk_message_template(model: str, message: str) -> dict:
+def openai_chat_chunk_message_template(
+    model: str, message: Optional[str] = None
+) -> dict:
     template = openai_chat_message_template(model)
     template = openai_chat_message_template(model)
     template["object"] = "chat.completion.chunk"
     template["object"] = "chat.completion.chunk"
-    template["choices"][0]["delta"] = {"content": message}
+    if message:
+        template["choices"][0]["delta"] = {"content": message}
+    else:
+        template["choices"][0]["finish_reason"] = "stop"
     return template
     return template
 
 
 
 
-def openai_chat_completion_message_template(model: str, message: str) -> dict:
+def openai_chat_completion_message_template(
+    model: str, message: Optional[str] = None
+) -> dict:
     template = openai_chat_message_template(model)
     template = openai_chat_message_template(model)
     template["object"] = "chat.completion"
     template["object"] = "chat.completion"
-    template["choices"][0]["message"] = {"content": message, "role": "assistant"}
+    if message:
+        template["choices"][0]["message"] = {"content": message, "role": "assistant"}
     template["choices"][0]["finish_reason"] = "stop"
     template["choices"][0]["finish_reason"] = "stop"
     return template
     return template
 
 

+ 46 - 0
backend/open_webui/utils/payload.py

@@ -86,3 +86,49 @@ def apply_model_params_to_body_ollama(params: dict, form_data: dict) -> dict:
             form_data[value] = param
             form_data[value] = param
 
 
     return form_data
     return form_data
+
+
+def convert_payload_openai_to_ollama(openai_payload: dict) -> dict:
+    """
+    Converts a payload formatted for OpenAI's API to be compatible with Ollama's API endpoint for chat completions.
+
+    Args:
+        openai_payload (dict): The payload originally designed for OpenAI API usage.
+
+    Returns:
+        dict: A modified payload compatible with the Ollama API.
+    """
+    ollama_payload = {}
+
+    # Mapping basic model and message details
+    ollama_payload["model"] = openai_payload.get("model")
+    ollama_payload["messages"] = openai_payload.get("messages")
+    ollama_payload["stream"] = openai_payload.get("stream", False)
+
+    # If there are advanced parameters in the payload, format them in Ollama's options field
+    ollama_options = {}
+
+    # Handle parameters which map directly
+    for param in ["temperature", "top_p", "seed"]:
+        if param in openai_payload:
+            ollama_options[param] = openai_payload[param]
+
+    # Mapping OpenAI's `max_tokens` -> Ollama's `num_predict`
+    if "max_completion_tokens" in openai_payload:
+        ollama_options["num_predict"] = openai_payload["max_completion_tokens"]
+    elif "max_tokens" in openai_payload:
+        ollama_options["num_predict"] = openai_payload["max_tokens"]
+
+    # Handle frequency / presence_penalty, which needs renaming and checking
+    if "frequency_penalty" in openai_payload:
+        ollama_options["repeat_penalty"] = openai_payload["frequency_penalty"]
+
+    if "presence_penalty" in openai_payload and "penalty" not in ollama_options:
+        # We are assuming presence penalty uses a similar concept in Ollama, which needs custom handling if exists.
+        ollama_options["new_topic_penalty"] = openai_payload["presence_penalty"]
+
+    # Add options to payload if any have been set
+    if ollama_options:
+        ollama_payload["options"] = ollama_options
+
+    return ollama_payload

+ 32 - 0
backend/open_webui/utils/response.py

@@ -0,0 +1,32 @@
+import json
+from open_webui.utils.misc import (
+    openai_chat_chunk_message_template,
+    openai_chat_completion_message_template,
+)
+
+
+def convert_response_ollama_to_openai(ollama_response: dict) -> dict:
+    model = ollama_response.get("model", "ollama")
+    message_content = ollama_response.get("message", {}).get("content", "")
+
+    response = openai_chat_completion_message_template(model, message_content)
+    return response
+
+
+async def convert_streaming_response_ollama_to_openai(ollama_streaming_response):
+    async for data in ollama_streaming_response.body_iterator:
+        data = json.loads(data)
+
+        model = data.get("model", "ollama")
+        message_content = data.get("message", {}).get("content", "")
+        done = data.get("done", False)
+
+        data = openai_chat_chunk_message_template(
+            model, message_content if not done else None
+        )
+
+        line = f"data: {json.dumps(data)}\n\n"
+        if done:
+            line += "data: [DONE]\n\n"
+
+        yield line

+ 14 - 2
package-lock.json

@@ -1,12 +1,12 @@
 {
 {
 	"name": "open-webui",
 	"name": "open-webui",
-	"version": "0.3.22",
+	"version": "0.3.23",
 	"lockfileVersion": 3,
 	"lockfileVersion": 3,
 	"requires": true,
 	"requires": true,
 	"packages": {
 	"packages": {
 		"": {
 		"": {
 			"name": "open-webui",
 			"name": "open-webui",
-			"version": "0.3.22",
+			"version": "0.3.23",
 			"dependencies": {
 			"dependencies": {
 				"@codemirror/lang-javascript": "^6.2.2",
 				"@codemirror/lang-javascript": "^6.2.2",
 				"@codemirror/lang-python": "^6.1.6",
 				"@codemirror/lang-python": "^6.1.6",
@@ -32,6 +32,7 @@
 				"katex": "^0.16.9",
 				"katex": "^0.16.9",
 				"marked": "^9.1.0",
 				"marked": "^9.1.0",
 				"mermaid": "^10.9.1",
 				"mermaid": "^10.9.1",
+				"paneforge": "^0.0.6",
 				"pyodide": "^0.26.1",
 				"pyodide": "^0.26.1",
 				"socket.io-client": "^4.2.0",
 				"socket.io-client": "^4.2.0",
 				"sortablejs": "^1.15.2",
 				"sortablejs": "^1.15.2",
@@ -6986,6 +6987,17 @@
 				"url": "https://github.com/sponsors/sindresorhus"
 				"url": "https://github.com/sponsors/sindresorhus"
 			}
 			}
 		},
 		},
+		"node_modules/paneforge": {
+			"version": "0.0.6",
+			"resolved": "https://registry.npmjs.org/paneforge/-/paneforge-0.0.6.tgz",
+			"integrity": "sha512-jYeN/wdREihja5c6nK3S5jritDQ+EbCqC5NrDo97qCZzZ9GkmEcN5C0ZCjF4nmhBwkDKr6tLIgz4QUKWxLXjAw==",
+			"dependencies": {
+				"nanoid": "^5.0.4"
+			},
+			"peerDependencies": {
+				"svelte": "^4.0.0 || ^5.0.0-next.1"
+			}
+		},
 		"node_modules/parent-module": {
 		"node_modules/parent-module": {
 			"version": "1.0.1",
 			"version": "1.0.1",
 			"resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz",
 			"resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz",

+ 2 - 1
package.json

@@ -1,6 +1,6 @@
 {
 {
 	"name": "open-webui",
 	"name": "open-webui",
-	"version": "0.3.22",
+	"version": "0.3.23",
 	"private": true,
 	"private": true,
 	"scripts": {
 	"scripts": {
 		"dev": "npm run pyodide:fetch && vite dev --host",
 		"dev": "npm run pyodide:fetch && vite dev --host",
@@ -72,6 +72,7 @@
 		"katex": "^0.16.9",
 		"katex": "^0.16.9",
 		"marked": "^9.1.0",
 		"marked": "^9.1.0",
 		"mermaid": "^10.9.1",
 		"mermaid": "^10.9.1",
+		"paneforge": "^0.0.6",
 		"pyodide": "^0.26.1",
 		"pyodide": "^0.26.1",
 		"socket.io-client": "^4.2.0",
 		"socket.io-client": "^4.2.0",
 		"sortablejs": "^1.15.2",
 		"sortablejs": "^1.15.2",

+ 132 - 105
src/lib/components/chat/Chat.svelte

@@ -2,6 +2,7 @@
 	import { v4 as uuidv4 } from 'uuid';
 	import { v4 as uuidv4 } from 'uuid';
 	import { toast } from 'svelte-sonner';
 	import { toast } from 'svelte-sonner';
 	import mermaid from 'mermaid';
 	import mermaid from 'mermaid';
+	import { PaneGroup, Pane, PaneResizer } from 'paneforge';
 
 
 	import { getContext, onDestroy, onMount, tick } from 'svelte';
 	import { getContext, onDestroy, onMount, tick } from 'svelte';
 	import { goto } from '$app/navigation';
 	import { goto } from '$app/navigation';
@@ -26,7 +27,9 @@
 		showControls,
 		showControls,
 		showCallOverlay,
 		showCallOverlay,
 		currentChatPage,
 		currentChatPage,
-		temporaryChatEnabled
+		temporaryChatEnabled,
+		mobile,
+		showOverview
 	} from '$lib/stores';
 	} from '$lib/stores';
 	import {
 	import {
 		convertMessagesToHistory,
 		convertMessagesToHistory,
@@ -64,12 +67,14 @@
 	import Navbar from '$lib/components/layout/Navbar.svelte';
 	import Navbar from '$lib/components/layout/Navbar.svelte';
 	import ChatControls from './ChatControls.svelte';
 	import ChatControls from './ChatControls.svelte';
 	import EventConfirmDialog from '../common/ConfirmDialog.svelte';
 	import EventConfirmDialog from '../common/ConfirmDialog.svelte';
+	import EllipsisVertical from '../icons/EllipsisVertical.svelte';
 
 
 	const i18n: Writable<i18nType> = getContext('i18n');
 	const i18n: Writable<i18nType> = getContext('i18n');
 
 
 	export let chatIdProp = '';
 	export let chatIdProp = '';
 	let loaded = false;
 	let loaded = false;
 	const eventTarget = new EventTarget();
 	const eventTarget = new EventTarget();
+	let controlPane;
 
 
 	let stopResponseFlag = false;
 	let stopResponseFlag = false;
 	let autoScroll = true;
 	let autoScroll = true;
@@ -279,6 +284,29 @@
 				await goto('/');
 				await goto('/');
 			}
 			}
 		}
 		}
+
+		showControls.subscribe(async (value) => {
+			if (controlPane && !$mobile) {
+				try {
+					if (value) {
+						controlPane.resize(
+							parseInt(localStorage.getItem('chat-controls-size') || '35')
+								? parseInt(localStorage.getItem('chat-controls-size') || '35')
+								: 35
+						);
+					} else {
+						controlPane.resize(0);
+					}
+				} catch (e) {
+					// ignore
+				}
+			}
+
+			if (!value) {
+				showCallOverlay.set(false);
+				showOverview.set(false);
+			}
+		});
 	});
 	});
 
 
 	onDestroy(() => {
 	onDestroy(() => {
@@ -1764,113 +1792,112 @@
 			{initNewChat}
 			{initNewChat}
 		/>
 		/>
 
 
-		{#if $banners.length > 0 && messages.length === 0 && !$chatId && selectedModels.length <= 1}
-			<div
-				class="absolute top-[4.25rem] w-full {$showSidebar
-					? 'md:max-w-[calc(100%-260px)]'
-					: ''} {$showControls ? 'lg:pr-[26rem]' : ''} z-20"
-			>
-				<div class=" flex flex-col gap-1 w-full">
-					{#each $banners.filter( (b) => (b.dismissible ? !JSON.parse(localStorage.getItem('dismissedBannerIds') ?? '[]').includes(b.id) : true) ) as banner}
-						<Banner
-							{banner}
-							on:dismiss={(e) => {
-								const bannerId = e.detail;
-
-								localStorage.setItem(
-									'dismissedBannerIds',
-									JSON.stringify(
-										[
-											bannerId,
-											...JSON.parse(localStorage.getItem('dismissedBannerIds') ?? '[]')
-										].filter((id) => $banners.find((b) => b.id === id))
-									)
-								);
+		<PaneGroup direction="horizontal" class="w-full h-full">
+			<Pane defaultSize={50} class="h-full flex w-full relative">
+				{#if $banners.length > 0 && messages.length === 0 && !$chatId && selectedModels.length <= 1}
+					<div class="absolute top-3 left-0 right-0 w-full z-20">
+						<div class=" flex flex-col gap-1 w-full">
+							{#each $banners.filter( (b) => (b.dismissible ? !JSON.parse(localStorage.getItem('dismissedBannerIds') ?? '[]').includes(b.id) : true) ) as banner}
+								<Banner
+									{banner}
+									on:dismiss={(e) => {
+										const bannerId = e.detail;
+
+										localStorage.setItem(
+											'dismissedBannerIds',
+											JSON.stringify(
+												[
+													bannerId,
+													...JSON.parse(localStorage.getItem('dismissedBannerIds') ?? '[]')
+												].filter((id) => $banners.find((b) => b.id === id))
+											)
+										);
+									}}
+								/>
+							{/each}
+						</div>
+					</div>
+				{/if}
+
+				<div class="flex flex-col flex-auto z-10 w-full">
+					<div
+						class=" pb-2.5 flex flex-col justify-between w-full flex-auto overflow-auto h-0 max-w-full z-10 scrollbar-hidden"
+						id="messages-container"
+						bind:this={messagesContainerElement}
+						on:scroll={(e) => {
+							autoScroll =
+								messagesContainerElement.scrollHeight - messagesContainerElement.scrollTop <=
+								messagesContainerElement.clientHeight + 5;
+						}}
+					>
+						<div class=" h-full w-full flex flex-col {chatIdProp ? 'py-4' : 'pt-2 pb-4'}">
+							<Messages
+								chatId={$chatId}
+								{selectedModels}
+								{processing}
+								bind:history
+								bind:messages
+								bind:autoScroll
+								bind:prompt
+								bottomPadding={files.length > 0}
+								{sendPrompt}
+								{continueGeneration}
+								{regenerateResponse}
+								{mergeResponses}
+								{chatActionHandler}
+								{showMessage}
+							/>
+						</div>
+					</div>
+
+					<div class="">
+						<MessageInput
+							bind:files
+							bind:prompt
+							bind:autoScroll
+							bind:selectedToolIds
+							bind:webSearchEnabled
+							bind:atSelectedModel
+							availableToolIds={selectedModelIds.reduce((a, e, i, arr) => {
+								const model = $models.find((m) => m.id === e);
+								if (model?.info?.meta?.toolIds ?? false) {
+									return [...new Set([...a, ...model.info.meta.toolIds])];
+								}
+								return a;
+							}, [])}
+							transparentBackground={$settings?.backgroundImageUrl ?? false}
+							{selectedModels}
+							{messages}
+							{submitPrompt}
+							{stopResponse}
+							on:call={async () => {
+								await showControls.set(true);
 							}}
 							}}
 						/>
 						/>
-					{/each}
+					</div>
 				</div>
 				</div>
-			</div>
-		{/if}
+			</Pane>
 
 
-		<div class="flex flex-col flex-auto z-10">
-			<div
-				class=" pb-2.5 flex flex-col justify-between w-full flex-auto overflow-auto h-0 max-w-full z-10 scrollbar-hidden {$showControls
-					? 'lg:pr-[26rem]'
-					: ''}"
-				id="messages-container"
-				bind:this={messagesContainerElement}
-				on:scroll={(e) => {
-					autoScroll =
-						messagesContainerElement.scrollHeight - messagesContainerElement.scrollTop <=
-						messagesContainerElement.clientHeight + 5;
-				}}
-			>
-				<div class=" h-full w-full flex flex-col {chatIdProp ? 'py-4' : 'pt-2 pb-4'}">
-					<Messages
-						chatId={$chatId}
-						{selectedModels}
-						{processing}
-						bind:history
-						bind:messages
-						bind:autoScroll
-						bind:prompt
-						bottomPadding={files.length > 0}
-						{sendPrompt}
-						{continueGeneration}
-						{regenerateResponse}
-						{mergeResponses}
-						{chatActionHandler}
-						{showMessage}
-					/>
-				</div>
-			</div>
-
-			<div class={$showControls ? 'lg:pr-[26rem]' : ''}>
-				<MessageInput
-					bind:files
-					bind:prompt
-					bind:autoScroll
-					bind:selectedToolIds
-					bind:webSearchEnabled
-					bind:atSelectedModel
-					availableToolIds={selectedModelIds.reduce((a, e, i, arr) => {
-						const model = $models.find((m) => m.id === e);
-						if (model?.info?.meta?.toolIds ?? false) {
-							return [...new Set([...a, ...model.info.meta.toolIds])];
-						}
-						return a;
-					}, [])}
-					transparentBackground={$settings?.backgroundImageUrl ?? false}
-					{selectedModels}
-					{messages}
-					{submitPrompt}
-					{stopResponse}
-					on:call={() => {
-						showControls.set(true);
-					}}
-				/>
-			</div>
-		</div>
+			<ChatControls
+				models={selectedModelIds.reduce((a, e, i, arr) => {
+					const model = $models.find((m) => m.id === e);
+					if (model) {
+						return [...a, model];
+					}
+					return a;
+				}, [])}
+				bind:history
+				bind:chatFiles
+				bind:params
+				bind:files
+				bind:pane={controlPane}
+				{submitPrompt}
+				{stopResponse}
+				{showMessage}
+				modelId={selectedModelIds?.at(0) ?? null}
+				chatId={$chatId}
+				{eventTarget}
+			/>
+		</PaneGroup>
 	</div>
 	</div>
 {/if}
 {/if}
-
-<ChatControls
-	models={selectedModelIds.reduce((a, e, i, arr) => {
-		const model = $models.find((m) => m.id === e);
-		if (model) {
-			return [...a, model];
-		}
-		return a;
-	}, [])}
-	bind:history
-	bind:chatFiles
-	bind:params
-	bind:files
-	{submitPrompt}
-	{stopResponse}
-	{showMessage}
-	modelId={selectedModelIds?.at(0) ?? null}
-	chatId={$chatId}
-	{eventTarget}
-/>

+ 94 - 61
src/lib/components/chat/ChatControls.svelte

@@ -10,6 +10,9 @@
 	import CallOverlay from './MessageInput/CallOverlay.svelte';
 	import CallOverlay from './MessageInput/CallOverlay.svelte';
 	import Drawer from '../common/Drawer.svelte';
 	import Drawer from '../common/Drawer.svelte';
 	import Overview from './Overview.svelte';
 	import Overview from './Overview.svelte';
+	import { Pane, PaneResizer } from 'paneforge';
+	import EllipsisVertical from '../icons/EllipsisVertical.svelte';
+	import { get } from 'svelte/store';
 
 
 	export let history;
 	export let history;
 	export let models = [];
 	export let models = [];
@@ -25,7 +28,9 @@
 	export let files;
 	export let files;
 	export let modelId;
 	export let modelId;
 
 
+	export let pane;
 	let largeScreen = false;
 	let largeScreen = false;
+
 	onMount(() => {
 	onMount(() => {
 		// listen to resize 1024px
 		// listen to resize 1024px
 		const mediaQuery = window.matchMedia('(min-width: 1024px)');
 		const mediaQuery = window.matchMedia('(min-width: 1024px)');
@@ -35,6 +40,7 @@
 				largeScreen = true;
 				largeScreen = true;
 			} else {
 			} else {
 				largeScreen = false;
 				largeScreen = false;
+				pane = null;
 			}
 			}
 		};
 		};
 
 
@@ -58,75 +64,32 @@
 
 
 <SvelteFlowProvider>
 <SvelteFlowProvider>
 	{#if !largeScreen}
 	{#if !largeScreen}
-		{#if $showCallOverlay}
-			<div class=" absolute w-full h-screen max-h-[100dvh] flex z-[999] overflow-hidden">
-				<div
-					class="absolute w-full h-screen max-h-[100dvh] bg-white text-gray-700 dark:bg-black dark:text-gray-300 flex justify-center"
-				>
-					<CallOverlay
-						bind:files
-						{submitPrompt}
-						{stopResponse}
-						{modelId}
-						{chatId}
-						{eventTarget}
-						on:close={() => {
-							showControls.set(false);
-						}}
-					/>
-				</div>
-			</div>
-		{:else if $showControls}
+		{#if $showControls}
 			<Drawer
 			<Drawer
 				show={$showControls}
 				show={$showControls}
 				on:close={() => {
 				on:close={() => {
 					showControls.set(false);
 					showControls.set(false);
 				}}
 				}}
 			>
 			>
-				<div class=" {$showOverview ? ' h-screen  w-screen' : 'px-6 py-4'} h-full">
-					{#if $showOverview}
-						<Overview
-							{history}
-							on:nodeclick={(e) => {
-								showMessage(e.detail.node.data.message);
-							}}
-							on:close={() => {
-								showControls.set(false);
-							}}
-						/>
-					{:else}
-						<Controls
-							on:close={() => {
-								showControls.set(false);
-							}}
-							{models}
-							bind:chatFiles
-							bind:params
-						/>
-					{/if}
-				</div>
-			</Drawer>
-		{/if}
-	{:else if $showControls}
-		<div class=" absolute bottom-0 right-0 z-20 h-full pointer-events-none">
-			<div class="pr-4 pt-14 pb-8 w-[26rem] h-full" in:slide={{ duration: 200, axis: 'x' }}>
 				<div
 				<div
-					class="w-full h-full {$showOverview && !$showCallOverlay
-						? ' '
-						: 'px-5 py-4 bg-white dark:shadow-lg dark:bg-gray-850  border border-gray-50 dark:border-gray-800'}  rounded-lg z-50 pointer-events-auto overflow-y-auto scrollbar-hidden"
+					class=" {$showCallOverlay || $showOverview ? ' h-screen  w-screen' : 'px-6 py-4'} h-full"
 				>
 				>
 					{#if $showCallOverlay}
 					{#if $showCallOverlay}
-						<CallOverlay
-							bind:files
-							{submitPrompt}
-							{stopResponse}
-							{modelId}
-							{chatId}
-							{eventTarget}
-							on:close={() => {
-								showControls.set(false);
-							}}
-						/>
+						<div
+							class=" h-full max-h-[100dvh] bg-white text-gray-700 dark:bg-black dark:text-gray-300 flex justify-center"
+						>
+							<CallOverlay
+								bind:files
+								{submitPrompt}
+								{stopResponse}
+								{modelId}
+								{chatId}
+								{eventTarget}
+								on:close={() => {
+									showControls.set(false);
+								}}
+							/>
+						</div>
 					{:else if $showOverview}
 					{:else if $showOverview}
 						<Overview
 						<Overview
 							{history}
 							{history}
@@ -148,7 +111,77 @@
 						/>
 						/>
 					{/if}
 					{/if}
 				</div>
 				</div>
+			</Drawer>
+		{/if}
+	{:else}
+		<!-- if $showControls -->
+		<PaneResizer class="relative flex w-2 items-center justify-center bg-background">
+			<div class="z-10 flex h-7 w-5 items-center justify-center rounded-sm">
+				<EllipsisVertical />
 			</div>
 			</div>
-		</div>
+		</PaneResizer>
+		<Pane
+			bind:pane
+			defaultSize={$showControls
+				? parseInt(localStorage.getItem('chat-controls-size') || '35')
+					? parseInt(localStorage.getItem('chat-controls-size') || '35')
+					: 35
+				: 0}
+			onResize={(size) => {
+				if (size === 0) {
+					showControls.set(false);
+				} else {
+					if (!$showControls) {
+						showControls.set(true);
+					}
+					localStorage.setItem('chat-controls-size', size);
+				}
+			}}
+		>
+			{#if $showControls}
+				<div class="pr-4 pb-8 flex max-h-full min-h-full">
+					<div
+						class="w-full {$showOverview && !$showCallOverlay
+							? ' '
+							: 'px-5 py-4 bg-white dark:shadow-lg dark:bg-gray-850  border border-gray-50 dark:border-gray-800'}  rounded-lg z-50 pointer-events-auto overflow-y-auto scrollbar-hidden"
+					>
+						{#if $showCallOverlay}
+							<div class="w-full h-full flex justify-center">
+								<CallOverlay
+									bind:files
+									{submitPrompt}
+									{stopResponse}
+									{modelId}
+									{chatId}
+									{eventTarget}
+									on:close={() => {
+										showControls.set(false);
+									}}
+								/>
+							</div>
+						{:else if $showOverview}
+							<Overview
+								{history}
+								on:nodeclick={(e) => {
+									showMessage(e.detail.node.data.message);
+								}}
+								on:close={() => {
+									showControls.set(false);
+								}}
+							/>
+						{:else}
+							<Controls
+								on:close={() => {
+									showControls.set(false);
+								}}
+								{models}
+								bind:chatFiles
+								bind:params
+							/>
+						{/if}
+					</div>
+				</div>
+			{/if}
+		</Pane>
 	{/if}
 	{/if}
 </SvelteFlowProvider>
 </SvelteFlowProvider>

+ 82 - 60
src/lib/components/chat/MessageInput/CallOverlay.svelte

@@ -220,7 +220,9 @@
 	};
 	};
 
 
 	const startRecording = async () => {
 	const startRecording = async () => {
-		audioStream = await navigator.mediaDevices.getUserMedia({ audio: true });
+		if (!audioStream) {
+			audioStream = await navigator.mediaDevices.getUserMedia({ audio: true });
+		}
 		mediaRecorder = new MediaRecorder(audioStream);
 		mediaRecorder = new MediaRecorder(audioStream);
 
 
 		mediaRecorder.onstart = () => {
 		mediaRecorder.onstart = () => {
@@ -236,7 +238,7 @@
 		};
 		};
 
 
 		mediaRecorder.onstop = (e) => {
 		mediaRecorder.onstop = (e) => {
-			console.log('Recording stopped', e);
+			console.log('Recording stopped', audioStream, e);
 			stopRecordingCallback();
 			stopRecordingCallback();
 		};
 		};
 
 
@@ -244,10 +246,11 @@
 	};
 	};
 
 
 	const stopAudioStream = async () => {
 	const stopAudioStream = async () => {
-		if (audioStream) {
-			const tracks = audioStream.getTracks();
-			tracks.forEach((track) => track.stop());
-		}
+		if (!audioStream) return;
+
+		audioStream.getAudioTracks().forEach(function (track) {
+			track.stop();
+		});
 
 
 		audioStream = null;
 		audioStream = null;
 	};
 	};
@@ -525,6 +528,60 @@
 		console.log(`Audio monitoring and playing stopped for message ID ${id}`);
 		console.log(`Audio monitoring and playing stopped for message ID ${id}`);
 	};
 	};
 
 
+	const chatStartHandler = async (e) => {
+		const { id } = e.detail;
+
+		chatStreaming = true;
+
+		if (currentMessageId !== id) {
+			console.log(`Received chat start event for message ID ${id}`);
+
+			currentMessageId = id;
+			if (audioAbortController) {
+				audioAbortController.abort();
+			}
+			audioAbortController = new AbortController();
+
+			assistantSpeaking = true;
+			// Start monitoring and playing audio for the message ID
+			monitorAndPlayAudio(id, audioAbortController.signal);
+		}
+	};
+
+	const chatEventHandler = async (e) => {
+		const { id, content } = e.detail;
+		// "id" here is message id
+		// if "id" is not the same as "currentMessageId" then do not process
+		// "content" here is a sentence from the assistant,
+		// there will be many sentences for the same "id"
+
+		if (currentMessageId === id) {
+			console.log(`Received chat event for message ID ${id}: ${content}`);
+
+			try {
+				if (messages[id] === undefined) {
+					messages[id] = [content];
+				} else {
+					messages[id].push(content);
+				}
+
+				console.log(content);
+
+				fetchAudio(content);
+			} catch (error) {
+				console.error('Failed to fetch or play audio:', error);
+			}
+		}
+	};
+
+	const chatFinishHandler = async (e) => {
+		const { id, content } = e.detail;
+		// "content" here is the entire message from the assistant
+		finishedMessages[id] = true;
+
+		chatStreaming = false;
+	};
+
 	onMount(async () => {
 	onMount(async () => {
 		const setWakeLock = async () => {
 		const setWakeLock = async () => {
 			try {
 			try {
@@ -558,65 +615,15 @@
 
 
 		startRecording();
 		startRecording();
 
 
-		const chatStartHandler = async (e) => {
-			const { id } = e.detail;
-
-			chatStreaming = true;
-
-			if (currentMessageId !== id) {
-				console.log(`Received chat start event for message ID ${id}`);
-
-				currentMessageId = id;
-				if (audioAbortController) {
-					audioAbortController.abort();
-				}
-				audioAbortController = new AbortController();
-
-				assistantSpeaking = true;
-				// Start monitoring and playing audio for the message ID
-				monitorAndPlayAudio(id, audioAbortController.signal);
-			}
-		};
-
-		const chatEventHandler = async (e) => {
-			const { id, content } = e.detail;
-			// "id" here is message id
-			// if "id" is not the same as "currentMessageId" then do not process
-			// "content" here is a sentence from the assistant,
-			// there will be many sentences for the same "id"
-
-			if (currentMessageId === id) {
-				console.log(`Received chat event for message ID ${id}: ${content}`);
-
-				try {
-					if (messages[id] === undefined) {
-						messages[id] = [content];
-					} else {
-						messages[id].push(content);
-					}
-
-					console.log(content);
-
-					fetchAudio(content);
-				} catch (error) {
-					console.error('Failed to fetch or play audio:', error);
-				}
-			}
-		};
-
-		const chatFinishHandler = async (e) => {
-			const { id, content } = e.detail;
-			// "content" here is the entire message from the assistant
-			finishedMessages[id] = true;
-
-			chatStreaming = false;
-		};
-
 		eventTarget.addEventListener('chat:start', chatStartHandler);
 		eventTarget.addEventListener('chat:start', chatStartHandler);
 		eventTarget.addEventListener('chat', chatEventHandler);
 		eventTarget.addEventListener('chat', chatEventHandler);
 		eventTarget.addEventListener('chat:finish', chatFinishHandler);
 		eventTarget.addEventListener('chat:finish', chatFinishHandler);
 
 
 		return async () => {
 		return async () => {
+			await stopAllAudio();
+
+			stopAudioStream();
+
 			eventTarget.removeEventListener('chat:start', chatStartHandler);
 			eventTarget.removeEventListener('chat:start', chatStartHandler);
 			eventTarget.removeEventListener('chat', chatEventHandler);
 			eventTarget.removeEventListener('chat', chatEventHandler);
 			eventTarget.removeEventListener('chat:finish', chatFinishHandler);
 			eventTarget.removeEventListener('chat:finish', chatFinishHandler);
@@ -633,6 +640,17 @@
 
 
 	onDestroy(async () => {
 	onDestroy(async () => {
 		await stopAllAudio();
 		await stopAllAudio();
+		stopAudioStream();
+
+		eventTarget.removeEventListener('chat:start', chatStartHandler);
+		eventTarget.removeEventListener('chat', chatEventHandler);
+		eventTarget.removeEventListener('chat:finish', chatFinishHandler);
+
+		audioAbortController.abort();
+		await tick();
+
+		await stopAllAudio();
+
 		await stopRecordingCallback(false);
 		await stopRecordingCallback(false);
 		await stopCamera();
 		await stopCamera();
 	});
 	});
@@ -924,6 +942,10 @@
 					on:click={async () => {
 					on:click={async () => {
 						await stopAudioStream();
 						await stopAudioStream();
 						await stopVideoStream();
 						await stopVideoStream();
+
+						console.log(audioStream);
+						console.log(cameraStream);
+
 						showCallOverlay.set(false);
 						showCallOverlay.set(false);
 						dispatch('close');
 						dispatch('close');
 					}}
 					}}

+ 9 - 1
src/lib/components/chat/MessageInput/VoiceRecording.svelte

@@ -44,6 +44,7 @@
 		return `${minutes}:${formattedSeconds}`;
 		return `${minutes}:${formattedSeconds}`;
 	};
 	};
 
 
+	let stream;
 	let speechRecognition;
 	let speechRecognition;
 
 
 	let mediaRecorder;
 	let mediaRecorder;
@@ -159,7 +160,7 @@
 	const startRecording = async () => {
 	const startRecording = async () => {
 		startDurationCounter();
 		startDurationCounter();
 
 
-		const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
+		stream = await navigator.mediaDevices.getUserMedia({ audio: true });
 		mediaRecorder = new MediaRecorder(stream);
 		mediaRecorder = new MediaRecorder(stream);
 		mediaRecorder.onstart = () => {
 		mediaRecorder.onstart = () => {
 			console.log('Recording started');
 			console.log('Recording started');
@@ -251,6 +252,13 @@
 		}
 		}
 		stopDurationCounter();
 		stopDurationCounter();
 		audioChunks = [];
 		audioChunks = [];
+
+		if (stream) {
+			const tracks = stream.getTracks();
+			tracks.forEach((track) => track.stop());
+		}
+
+		stream = null;
 	};
 	};
 
 
 	const confirmRecording = async () => {
 	const confirmRecording = async () => {

+ 1 - 1
src/lib/components/chat/Messages/Citations.svelte

@@ -48,7 +48,7 @@
 		{#each _citations as citation, idx}
 		{#each _citations as citation, idx}
 			<div class="flex gap-1 text-xs font-semibold">
 			<div class="flex gap-1 text-xs font-semibold">
 				<button
 				<button
-					class="flex dark:text-gray-300 py-1 px-1 bg-gray-50 hover:bg-gray-100 dark:bg-gray-850 dark:hover:bg-gray-800 transition rounded-xl"
+					class="flex dark:text-gray-300 py-1 px-1 bg-gray-50 hover:bg-gray-100 dark:bg-gray-850 dark:hover:bg-gray-800 transition rounded-xl max-w-96"
 					on:click={() => {
 					on:click={() => {
 						showCitationModal = true;
 						showCitationModal = true;
 						selectedCitation = citation;
 						selectedCitation = citation;

+ 2 - 2
src/lib/components/layout/Navbar.svelte

@@ -109,8 +109,8 @@
 					<Tooltip content={$i18n.t('Controls')}>
 					<Tooltip content={$i18n.t('Controls')}>
 						<button
 						<button
 							class=" flex cursor-pointer px-2 py-2 rounded-xl hover:bg-gray-50 dark:hover:bg-gray-850 transition"
 							class=" flex cursor-pointer px-2 py-2 rounded-xl hover:bg-gray-50 dark:hover:bg-gray-850 transition"
-							on:click={() => {
-								showControls.set(!$showControls);
+							on:click={async () => {
+								await showControls.set(!$showControls);
 							}}
 							}}
 							aria-label="Controls"
 							aria-label="Controls"
 						>
 						>