瀏覽代碼

Merge branch 'open-webui:dev' into dev

gabriel-ecegi 7 月之前
父節點
當前提交
54f6ae8fb5

+ 3 - 3
backend/open_webui/functions.py

@@ -65,12 +65,12 @@ def get_function_module_by_id(request: Request, pipe_id: str):
     return function_module
 
 
-async def get_function_models():
+async def get_function_models(request):
     pipes = Functions.get_functions_by_type("pipe", active_only=True)
     pipe_models = []
 
     for pipe in pipes:
-        function_module = get_function_module_by_id(pipe.id)
+        function_module = get_function_module_by_id(request, pipe.id)
 
         # Check if function is a manifold
         if hasattr(function_module, "pipes"):
@@ -253,7 +253,7 @@ async def generate_function_chat_completion(
         form_data = apply_model_system_prompt_to_body(params, form_data, user)
 
     pipe_id = get_pipe_id(form_data)
-    function_module = get_function_module_by_id(pipe_id)
+    function_module = get_function_module_by_id(request, pipe_id)
 
     pipe = function_module.pipe
     params = get_function_params(function_module, form_data, user, extra_params)

+ 4 - 2
backend/open_webui/retrieval/loaders/main.py

@@ -1,6 +1,7 @@
 import requests
 import logging
 import ftfy
+import sys
 
 from langchain_community.document_loaders import (
     BSHTMLLoader,
@@ -18,8 +19,9 @@ from langchain_community.document_loaders import (
     YoutubeLoader,
 )
 from langchain_core.documents import Document
-from open_webui.env import SRC_LOG_LEVELS
+from open_webui.env import SRC_LOG_LEVELS, GLOBAL_LOG_LEVEL
 
+logging.basicConfig(stream=sys.stdout, level=GLOBAL_LOG_LEVEL)
 log = logging.getLogger(__name__)
 log.setLevel(SRC_LOG_LEVELS["RAG"])
 
@@ -106,7 +108,7 @@ class TikaLoader:
             if "Content-Type" in raw_metadata:
                 headers["Content-Type"] = raw_metadata["Content-Type"]
 
-            log.info("Tika extracted text: %s", text)
+            log.debug("Tika extracted text: %s", text)
 
             return [Document(page_content=text, metadata=headers)]
         else:

+ 16 - 7
backend/open_webui/routers/files.py

@@ -5,6 +5,7 @@ from pathlib import Path
 from typing import Optional
 from pydantic import BaseModel
 import mimetypes
+from urllib.parse import quote
 
 from open_webui.storage.provider import Storage
 
@@ -222,11 +223,15 @@ async def get_file_content_by_id(id: str, user=Depends(get_verified_user)):
 
             # Check if the file already exists in the cache
             if file_path.is_file():
-                print(f"file_path: {file_path}")
+                # Handle Unicode filenames
+                filename = file.meta.get("name", file.filename)
+                encoded_filename = quote(filename)  # RFC5987 encoding
                 headers = {
-                    "Content-Disposition": f'attachment; filename="{file.meta.get("name", file.filename)}"'
+                    "Content-Disposition": f"attachment; filename*=UTF-8''{encoded_filename}"
                 }
+
                 return FileResponse(file_path, headers=headers)
+
             else:
                 raise HTTPException(
                     status_code=status.HTTP_404_NOT_FOUND,
@@ -283,16 +288,20 @@ async def get_file_content_by_id(id: str, user=Depends(get_verified_user)):
 
     if file and (file.user_id == user.id or user.role == "admin"):
         file_path = file.path
+
+        # Handle Unicode filenames
+        filename = file.meta.get("name", file.filename)
+        encoded_filename = quote(filename)  # RFC5987 encoding
+        headers = {
+            "Content-Disposition": f"attachment; filename*=UTF-8''{encoded_filename}"
+        }
+
         if file_path:
             file_path = Storage.get_file(file_path)
             file_path = Path(file_path)
 
             # Check if the file already exists in the cache
             if file_path.is_file():
-                print(f"file_path: {file_path}")
-                headers = {
-                    "Content-Disposition": f'attachment; filename="{file.meta.get("name", file.filename)}"'
-                }
                 return FileResponse(file_path, headers=headers)
             else:
                 raise HTTPException(
@@ -311,7 +320,7 @@ async def get_file_content_by_id(id: str, user=Depends(get_verified_user)):
             return StreamingResponse(
                 generator(),
                 media_type="text/plain",
-                headers={"Content-Disposition": f"attachment; filename={file_name}"},
+                headers=headers,
             )
     else:
         raise HTTPException(

+ 5 - 3
backend/open_webui/routers/tasks.py

@@ -186,9 +186,10 @@ async def generate_title(
     try:
         return await generate_chat_completion(request, form_data=payload, user=user)
     except Exception as e:
+        log.error("Exception occurred", exc_info=True)
         return JSONResponse(
             status_code=status.HTTP_400_BAD_REQUEST,
-            content={"detail": str(e)},
+            content={"detail": "An internal error has occurred."},
         )
 
 
@@ -248,9 +249,10 @@ async def generate_chat_tags(
     try:
         return await generate_chat_completion(request, form_data=payload, user=user)
     except Exception as e:
+        log.error(f"Error generating chat completion: {e}")
         return JSONResponse(
-            status_code=status.HTTP_400_BAD_REQUEST,
-            content={"detail": str(e)},
+            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
+            content={"detail": "An internal error has occurred."},
         )
 
 

+ 0 - 2
backend/open_webui/socket/main.py

@@ -1,5 +1,3 @@
-# TODO: move socket to webui app
-
 import asyncio
 import socketio
 import logging

+ 32 - 0
backend/open_webui/utils/middleware.py

@@ -362,7 +362,39 @@ async def chat_completion_files_handler(
     return body, {"sources": sources}
 
 
+def apply_params_to_form_data(form_data, model):
+    params = form_data.pop("params", {})
+    if model.get("ollama"):
+        form_data["options"] = params
+
+        if "format" in params:
+            form_data["format"] = params["format"]
+
+        if "keep_alive" in params:
+            form_data["keep_alive"] = params["keep_alive"]
+    else:
+        if "seed" in params:
+            form_data["seed"] = params["seed"]
+
+        if "stop" in params:
+            form_data["stop"] = params["stop"]
+
+        if "temperature" in params:
+            form_data["temperature"] = params["temperature"]
+
+        if "top_p" in params:
+            form_data["top_p"] = params["top_p"]
+
+        if "frequency_penalty" in params:
+            form_data["frequency_penalty"] = params["frequency_penalty"]
+
+    return form_data
+
+
 async def process_chat_payload(request, form_data, user, model):
+    form_data = apply_params_to_form_data(form_data, model)
+    log.debug(f"form_data: {form_data}")
+
     metadata = {
         "chat_id": form_data.pop("chat_id", None),
         "message_id": form_data.pop("id", None),

+ 1 - 1
backend/open_webui/utils/models.py

@@ -52,7 +52,7 @@ async def get_all_base_models(request: Request):
             for model in ollama_models["models"]
         ]
 
-    function_models = await get_function_models()
+    function_models = await get_function_models(request)
     models = function_models + openai_models + ollama_models
 
     return models

+ 23 - 428
src/lib/components/chat/Chat.svelte

@@ -885,7 +885,6 @@
 			return;
 		}
 
-		let _responses = [];
 		prompt = '';
 		await tick();
 
@@ -937,9 +936,7 @@
 		chatInput?.focus();
 
 		saveSessionSelectedModels();
-		_responses = await sendPrompt(userPrompt, userMessageId, { newChat: true });
-
-		return _responses;
+		await sendPrompt(userPrompt, userMessageId, { newChat: true });
 	};
 
 	const sendPrompt = async (
@@ -956,7 +953,6 @@
 			await initChatHandler();
 		}
 
-		let _responses: string[] = [];
 		// If modelId is provided, use it, else use selected model
 		let selectedModelIds = modelId
 			? [modelId]
@@ -1057,17 +1053,7 @@
 						await getWebSearchResults(model.id, parentId, responseMessageId);
 					}
 
-					let _response = null;
-
-					// if (model?.owned_by === 'ollama') {
-					// 	_response = await sendPromptOllama(model, prompt, responseMessageId, _chatId);
-					// } else if (model) {
-					// }
-
-					_response = await sendPromptOpenAI(model, prompt, responseMessageId, _chatId);
-
-					_responses.push(_response);
-
+					await sendPromptOpenAI(model, prompt, responseMessageId, _chatId);
 					if (chatEventEmitter) clearInterval(chatEventEmitter);
 				} else {
 					toast.error($i18n.t(`Model {{modelId}} not found`, { modelId }));
@@ -1077,389 +1063,6 @@
 
 		currentChatPage.set(1);
 		chats.set(await getChatList(localStorage.token, $currentChatPage));
-
-		return _responses;
-	};
-
-	const sendPromptOllama = async (model, userPrompt, responseMessageId, _chatId) => {
-		let _response: string | null = null;
-
-		const responseMessage = history.messages[responseMessageId];
-		const userMessage = history.messages[responseMessage.parentId];
-
-		// Wait until history/message have been updated
-		await tick();
-
-		// Scroll down
-		scrollToBottom();
-
-		const messagesBody = [
-			params?.system || $settings.system || (responseMessage?.userContext ?? null)
-				? {
-						role: 'system',
-						content: `${promptTemplate(
-							params?.system ?? $settings?.system ?? '',
-							$user.name,
-							$settings?.userLocation
-								? await getAndUpdateUserLocation(localStorage.token)
-								: undefined
-						)}${
-							(responseMessage?.userContext ?? null)
-								? `\n\nUser Context:\n${responseMessage?.userContext ?? ''}`
-								: ''
-						}`
-					}
-				: undefined,
-			...createMessagesList(responseMessageId)
-		]
-			.filter((message) => message?.content?.trim())
-			.map((message) => {
-				// Prepare the base message object
-				const baseMessage = {
-					role: message.role,
-					content: message?.merged?.content ?? message.content
-				};
-
-				// Extract and format image URLs if any exist
-				const imageUrls = message.files
-					?.filter((file) => file.type === 'image')
-					.map((file) => file.url.slice(file.url.indexOf(',') + 1));
-
-				// Add images array only if it contains elements
-				if (imageUrls && imageUrls.length > 0 && message.role === 'user') {
-					baseMessage.images = imageUrls;
-				}
-				return baseMessage;
-			});
-
-		let lastImageIndex = -1;
-
-		// Find the index of the last object with images
-		messagesBody.forEach((item, index) => {
-			if (item.images) {
-				lastImageIndex = index;
-			}
-		});
-
-		// Remove images from all but the last one
-		messagesBody.forEach((item, index) => {
-			if (index !== lastImageIndex) {
-				delete item.images;
-			}
-		});
-
-		let files = JSON.parse(JSON.stringify(chatFiles));
-		if (model?.info?.meta?.knowledge ?? false) {
-			// Only initialize and add status if knowledge exists
-			responseMessage.statusHistory = [
-				{
-					action: 'knowledge_search',
-					description: $i18n.t(`Searching Knowledge for "{{searchQuery}}"`, {
-						searchQuery: userMessage.content
-					}),
-					done: false
-				}
-			];
-			files.push(
-				...model.info.meta.knowledge.map((item) => {
-					if (item?.collection_name) {
-						return {
-							id: item.collection_name,
-							name: item.name,
-							legacy: true
-						};
-					} else if (item?.collection_names) {
-						return {
-							name: item.name,
-							type: 'collection',
-							collection_names: item.collection_names,
-							legacy: true
-						};
-					} else {
-						return item;
-					}
-				})
-			);
-			history.messages[responseMessageId] = responseMessage;
-		}
-		files.push(
-			...(userMessage?.files ?? []).filter((item) =>
-				['doc', 'file', 'collection'].includes(item.type)
-			),
-			...(responseMessage?.files ?? []).filter((item) => ['web_search_results'].includes(item.type))
-		);
-
-		// Remove duplicates
-		files = files.filter(
-			(item, index, array) =>
-				array.findIndex((i) => JSON.stringify(i) === JSON.stringify(item)) === index
-		);
-
-		scrollToBottom();
-
-		eventTarget.dispatchEvent(
-			new CustomEvent('chat:start', {
-				detail: {
-					id: responseMessageId
-				}
-			})
-		);
-
-		await tick();
-
-		const stream =
-			model?.info?.params?.stream_response ??
-			$settings?.params?.stream_response ??
-			params?.stream_response ??
-			true;
-
-		const [res, controller] = await generateChatCompletion(localStorage.token, {
-			stream: stream,
-			model: model.id,
-			messages: messagesBody,
-			format: $settings.requestFormat ?? undefined,
-			keep_alive: $settings.keepAlive ?? undefined,
-
-			tool_ids: selectedToolIds.length > 0 ? selectedToolIds : undefined,
-			files: files.length > 0 ? files : undefined,
-			session_id: $socket?.id,
-			chat_id: $chatId,
-			id: responseMessageId
-		});
-
-		if (res && res.ok) {
-			if (!stream) {
-				const response = await res.json();
-				console.log(response);
-
-				responseMessage.content = response.message.content;
-				responseMessage.info = {
-					eval_count: response.eval_count,
-					eval_duration: response.eval_duration,
-					load_duration: response.load_duration,
-					prompt_eval_count: response.prompt_eval_count,
-					prompt_eval_duration: response.prompt_eval_duration,
-					total_duration: response.total_duration
-				};
-				responseMessage.done = true;
-			} else {
-				console.log('controller', controller);
-
-				const reader = res.body
-					.pipeThrough(new TextDecoderStream())
-					.pipeThrough(splitStream('\n'))
-					.getReader();
-
-				while (true) {
-					const { value, done } = await reader.read();
-					if (done || stopResponseFlag || _chatId !== $chatId) {
-						responseMessage.done = true;
-						history.messages[responseMessageId] = responseMessage;
-
-						if (stopResponseFlag) {
-							controller.abort('User: Stop Response');
-						}
-
-						_response = responseMessage.content;
-						break;
-					}
-
-					try {
-						let lines = value.split('\n');
-
-						for (const line of lines) {
-							if (line !== '') {
-								console.log(line);
-								let data = JSON.parse(line);
-
-								if ('sources' in data) {
-									responseMessage.sources = data.sources;
-									// Only remove status if it was initially set
-									if (model?.info?.meta?.knowledge ?? false) {
-										responseMessage.statusHistory = responseMessage.statusHistory.filter(
-											(status) => status.action !== 'knowledge_search'
-										);
-									}
-									continue;
-								}
-
-								if ('detail' in data) {
-									throw data;
-								}
-
-								if (data.done == false) {
-									if (responseMessage.content == '' && data.message.content == '\n') {
-										continue;
-									} else {
-										responseMessage.content += data.message.content;
-
-										if (navigator.vibrate && ($settings?.hapticFeedback ?? false)) {
-											navigator.vibrate(5);
-										}
-
-										const messageContentParts = getMessageContentParts(
-											responseMessage.content,
-											$config?.audio?.tts?.split_on ?? 'punctuation'
-										);
-										messageContentParts.pop();
-
-										// dispatch only last sentence and make sure it hasn't been dispatched before
-										if (
-											messageContentParts.length > 0 &&
-											messageContentParts[messageContentParts.length - 1] !==
-												responseMessage.lastSentence
-										) {
-											responseMessage.lastSentence =
-												messageContentParts[messageContentParts.length - 1];
-											eventTarget.dispatchEvent(
-												new CustomEvent('chat', {
-													detail: {
-														id: responseMessageId,
-														content: messageContentParts[messageContentParts.length - 1]
-													}
-												})
-											);
-										}
-
-										history.messages[responseMessageId] = responseMessage;
-									}
-								} else {
-									responseMessage.done = true;
-
-									if (responseMessage.content == '') {
-										responseMessage.error = {
-											code: 400,
-											content: `Oops! No text generated from Ollama, Please try again.`
-										};
-									}
-
-									responseMessage.context = data.context ?? null;
-									responseMessage.info = {
-										total_duration: data.total_duration,
-										load_duration: data.load_duration,
-										sample_count: data.sample_count,
-										sample_duration: data.sample_duration,
-										prompt_eval_count: data.prompt_eval_count,
-										prompt_eval_duration: data.prompt_eval_duration,
-										eval_count: data.eval_count,
-										eval_duration: data.eval_duration
-									};
-
-									history.messages[responseMessageId] = responseMessage;
-
-									if ($settings.notificationEnabled && !document.hasFocus()) {
-										const notification = new Notification(`${model.id}`, {
-											body: responseMessage.content,
-											icon: `${WEBUI_BASE_URL}/static/favicon.png`
-										});
-									}
-
-									if ($settings?.responseAutoCopy ?? false) {
-										copyToClipboard(responseMessage.content);
-									}
-
-									if ($settings.responseAutoPlayback && !$showCallOverlay) {
-										await tick();
-										document.getElementById(`speak-button-${responseMessage.id}`)?.click();
-									}
-								}
-							}
-						}
-					} catch (error) {
-						console.log(error);
-						if ('detail' in error) {
-							toast.error(error.detail);
-						}
-						break;
-					}
-
-					if (autoScroll) {
-						scrollToBottom();
-					}
-				}
-			}
-		} else {
-			if (res !== null) {
-				const error = await res.json();
-				console.log(error);
-				if ('detail' in error) {
-					toast.error(error.detail);
-					responseMessage.error = { content: error.detail };
-				} else {
-					toast.error(error.error);
-					responseMessage.error = { content: error.error };
-				}
-			} else {
-				toast.error(
-					$i18n.t(`Uh-oh! There was an issue connecting to {{provider}}.`, { provider: 'Ollama' })
-				);
-				responseMessage.error = {
-					content: $i18n.t(`Uh-oh! There was an issue connecting to {{provider}}.`, {
-						provider: 'Ollama'
-					})
-				};
-			}
-			responseMessage.done = true;
-
-			if (responseMessage.statusHistory) {
-				responseMessage.statusHistory = responseMessage.statusHistory.filter(
-					(status) => status.action !== 'knowledge_search'
-				);
-			}
-		}
-		await saveChatHandler(_chatId);
-
-		history.messages[responseMessageId] = responseMessage;
-
-		await chatCompletedHandler(
-			_chatId,
-			model.id,
-			responseMessageId,
-			createMessagesList(responseMessageId)
-		);
-
-		stopResponseFlag = false;
-		await tick();
-
-		let lastMessageContentPart =
-			getMessageContentParts(
-				responseMessage.content,
-				$config?.audio?.tts?.split_on ?? 'punctuation'
-			)?.at(-1) ?? '';
-		if (lastMessageContentPart) {
-			eventTarget.dispatchEvent(
-				new CustomEvent('chat', {
-					detail: { id: responseMessageId, content: lastMessageContentPart }
-				})
-			);
-		}
-
-		eventTarget.dispatchEvent(
-			new CustomEvent('chat:finish', {
-				detail: {
-					id: responseMessageId,
-					content: responseMessage.content
-				}
-			})
-		);
-
-		if (autoScroll) {
-			scrollToBottom();
-		}
-
-		const messages = createMessagesList(responseMessageId);
-		if (messages.length == 2 && messages.at(-1).content !== '' && selectedModels[0] === model.id) {
-			window.history.replaceState(history.state, '', `/c/${_chatId}`);
-
-			const title = await generateChatTitle(messages);
-			await setChatTitle(_chatId, title);
-
-			if ($settings?.autoTags ?? true) {
-				await setChatTags(messages);
-			}
-		}
-
-		return _response;
 	};
 
 	const sendPromptOpenAI = async (model, userPrompt, responseMessageId, _chatId) => {
@@ -1582,21 +1185,21 @@
 									})
 						})),
 
-					// params: {
-					// 	...$settings?.params,
-					// 	...params,
-
-					// 	format: $settings.requestFormat ?? undefined,
-					// 	keep_alive: $settings.keepAlive ?? undefined,
-					// 	stop:
-					// 		(params?.stop ?? $settings?.params?.stop ?? undefined)
-					// 			? (
-					// 					params?.stop.split(',').map((token) => token.trim()) ?? $settings.params.stop
-					// 				).map((str) =>
-					// 					decodeURIComponent(JSON.parse('"' + str.replace(/\"/g, '\\"') + '"'))
-					// 				)
-					// 			: undefined
-					// },
+					params: {
+						...$settings?.params,
+						...params,
+
+						format: $settings.requestFormat ?? undefined,
+						keep_alive: $settings.keepAlive ?? undefined,
+						stop:
+							(params?.stop ?? $settings?.params?.stop ?? undefined)
+								? (
+										params?.stop.split(',').map((token) => token.trim()) ?? $settings.params.stop
+									).map((str) =>
+										decodeURIComponent(JSON.parse('"' + str.replace(/\"/g, '\\"') + '"'))
+									)
+								: undefined
+					},
 
 					tool_ids: selectedToolIds.length > 0 ? selectedToolIds : undefined,
 					files: files.length > 0 ? files : undefined,
@@ -1900,20 +1503,12 @@
 				.at(0);
 
 			if (model) {
-				if (model?.owned_by === 'openai') {
-					await sendPromptOpenAI(
-						model,
-						history.messages[responseMessage.parentId].content,
-						responseMessage.id,
-						_chatId
-					);
-				} else
-					await sendPromptOllama(
-						model,
-						history.messages[responseMessage.parentId].content,
-						responseMessage.id,
-						_chatId
-					);
+				await sendPromptOpenAI(
+					model,
+					history.messages[responseMessage.parentId].content,
+					responseMessage.id,
+					_chatId
+				);
 			}
 		}
 	};

+ 2 - 1
src/lib/components/layout/Sidebar/ChatItem.svelte

@@ -110,9 +110,10 @@
 		if (res) {
 			tags.set(await getAllTags(localStorage.token));
 			if ($chatId === id) {
+				await goto('/');
+
 				await chatId.set('');
 				await tick();
-				goto('/');
 			}
 
 			dispatch('change');