Jelajahi Sumber

Merge branch 'dev' into buroa/hybrid-search

Steven Kreitzer 1 tahun lalu
induk
melakukan
adb009f388

+ 14 - 3
CHANGELOG.md

@@ -5,13 +5,24 @@ All notable changes to this project will be documented in this file.
 The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
 and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
 
-## [0.1.121] - 2024-04-22
-
-### Added
+## [0.1.122] - 2024-04-24
 
 - **🛠️ Improved Embedding Model Support**: You can now use any embedding model `sentence_transformers` supports.
 - **🌟 Enhanced RAG Pipeline**: Added `BM25` hybrid searching with reranking model support using `sentence_transformers`.
 
+## [0.1.121] - 2024-04-24
+
+### Fixed
+
+- **🔧 Translation Issues**: Addressed various translation discrepancies.
+- **🔒 LiteLLM Security Fix**: Updated LiteLLM version to resolve a security vulnerability.
+- **🖥️ HTML Tag Display**: Rectified the issue where the '< br >' tag wasn't displaying correctly.
+- **🔗 WebSocket Connection**: Resolved the failure of WebSocket connection under HTTPS security for ComfyUI server.
+- **📜 FileReader Optimization**: Implemented FileReader initialization per image in multi-file drag & drop to ensure reusability.
+- **🏷️ Tag Display**: Corrected tag display inconsistencies.
+- **📦 Archived Chat Styling**: Fixed styling issues in archived chat.
+- **🔖 Safari Copy Button Bug**: Addressed the bug where the copy button failed to copy links in Safari.
+
 ## [0.1.120] - 2024-04-20
 
 ### Added

+ 19 - 13
backend/apps/images/main.py

@@ -35,8 +35,8 @@ from config import (
     ENABLE_IMAGE_GENERATION,
     AUTOMATIC1111_BASE_URL,
     COMFYUI_BASE_URL,
-    OPENAI_API_BASE_URL,
-    OPENAI_API_KEY,
+    IMAGES_OPENAI_API_BASE_URL,
+    IMAGES_OPENAI_API_KEY,
 )
 
 
@@ -58,8 +58,8 @@ app.add_middleware(
 app.state.ENGINE = ""
 app.state.ENABLED = ENABLE_IMAGE_GENERATION
 
-app.state.OPENAI_API_BASE_URL = OPENAI_API_BASE_URL
-app.state.OPENAI_API_KEY = OPENAI_API_KEY
+app.state.OPENAI_API_BASE_URL = IMAGES_OPENAI_API_BASE_URL
+app.state.OPENAI_API_KEY = IMAGES_OPENAI_API_KEY
 
 app.state.MODEL = ""
 
@@ -135,27 +135,33 @@ async def update_engine_url(
     }
 
 
-class OpenAIKeyUpdateForm(BaseModel):
+class OpenAIConfigUpdateForm(BaseModel):
+    url: str
     key: str
 
 
-@app.get("/key")
-async def get_openai_key(user=Depends(get_admin_user)):
-    return {"OPENAI_API_KEY": app.state.OPENAI_API_KEY}
+@app.get("/openai/config")
+async def get_openai_config(user=Depends(get_admin_user)):
+    return {
+        "OPENAI_API_BASE_URL": app.state.OPENAI_API_BASE_URL,
+        "OPENAI_API_KEY": app.state.OPENAI_API_KEY,
+    }
 
 
-@app.post("/key/update")
-async def update_openai_key(
-    form_data: OpenAIKeyUpdateForm, user=Depends(get_admin_user)
+@app.post("/openai/config/update")
+async def update_openai_config(
+    form_data: OpenAIConfigUpdateForm, user=Depends(get_admin_user)
 ):
-
     if form_data.key == "":
         raise HTTPException(status_code=400, detail=ERROR_MESSAGES.API_KEY_NOT_FOUND)
 
+    app.state.OPENAI_API_BASE_URL = form_data.url
     app.state.OPENAI_API_KEY = form_data.key
+
     return {
-        "OPENAI_API_KEY": app.state.OPENAI_API_KEY,
         "status": True,
+        "OPENAI_API_BASE_URL": app.state.OPENAI_API_BASE_URL,
+        "OPENAI_API_KEY": app.state.OPENAI_API_KEY,
     }
 
 

+ 23 - 8
backend/apps/litellm/main.py

@@ -1,3 +1,5 @@
+import sys
+
 from fastapi import FastAPI, Depends, HTTPException
 from fastapi.routing import APIRoute
 from fastapi.middleware.cors import CORSMiddleware
@@ -23,7 +25,13 @@ log = logging.getLogger(__name__)
 log.setLevel(SRC_LOG_LEVELS["LITELLM"])
 
 
-from config import MODEL_FILTER_ENABLED, MODEL_FILTER_LIST, DATA_DIR
+from config import (
+    MODEL_FILTER_ENABLED,
+    MODEL_FILTER_LIST,
+    DATA_DIR,
+    LITELLM_PROXY_PORT,
+    LITELLM_PROXY_HOST,
+)
 
 from litellm.utils import get_llm_provider
 
@@ -64,7 +72,7 @@ async def run_background_process(command):
         log.info(f"Executing command: {command}")
         # Execute the command and create a subprocess
         process = await asyncio.create_subprocess_exec(
-            *command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE
+            *command, stdout=subprocess.PIPE, stderr=subprocess.PIPE
         )
         background_process = process
         log.info("Subprocess started successfully.")
@@ -90,9 +98,17 @@ async def run_background_process(command):
 async def start_litellm_background():
     log.info("start_litellm_background")
     # Command to run in the background
-    command = (
-        "litellm --port 14365 --telemetry False --config ./data/litellm/config.yaml"
-    )
+    command = [
+        "litellm",
+        "--port",
+        str(LITELLM_PROXY_PORT),
+        "--host",
+        LITELLM_PROXY_HOST,
+        "--telemetry",
+        "False",
+        "--config",
+        LITELLM_CONFIG_DIR,
+    ]
 
     await run_background_process(command)
 
@@ -109,7 +125,6 @@ async def shutdown_litellm_background():
 
 @app.on_event("startup")
 async def startup_event():
-
     log.info("startup_event")
     # TODO: Check config.yaml file and create one
     asyncio.create_task(start_litellm_background())
@@ -186,7 +201,7 @@ async def get_models(user=Depends(get_current_user)):
     while not background_process:
         await asyncio.sleep(0.1)
 
-    url = "http://localhost:14365/v1"
+    url = f"http://localhost:{LITELLM_PROXY_PORT}/v1"
     r = None
     try:
         r = requests.request(method="GET", url=f"{url}/models")
@@ -289,7 +304,7 @@ async def delete_model_from_config(
 async def proxy(path: str, request: Request, user=Depends(get_verified_user)):
     body = await request.body()
 
-    url = "http://localhost:14365"
+    url = f"http://localhost:{LITELLM_PROXY_PORT}"
 
     target_url = f"{url}/{path}"
 

+ 15 - 0
backend/config.py

@@ -499,9 +499,24 @@ AUTOMATIC1111_BASE_URL = os.getenv("AUTOMATIC1111_BASE_URL", "")
 COMFYUI_BASE_URL = os.getenv("COMFYUI_BASE_URL", "")
 
 
+IMAGES_OPENAI_API_BASE_URL = os.getenv(
+    "IMAGES_OPENAI_API_BASE_URL", OPENAI_API_BASE_URL
+)
+IMAGES_OPENAI_API_KEY = os.getenv("IMAGES_OPENAI_API_KEY", OPENAI_API_KEY)
+
+
 ####################################
 # Audio
 ####################################
 
 AUDIO_OPENAI_API_BASE_URL = os.getenv("AUDIO_OPENAI_API_BASE_URL", OPENAI_API_BASE_URL)
 AUDIO_OPENAI_API_KEY = os.getenv("AUDIO_OPENAI_API_KEY", OPENAI_API_KEY)
+
+####################################
+# LiteLLM
+####################################
+
+LITELLM_PROXY_PORT = int(os.getenv("LITELLM_PROXY_PORT", "14365"))
+if LITELLM_PROXY_PORT < 0 or LITELLM_PROXY_PORT > 65535:
+    raise ValueError("Invalid port number for LITELLM_PROXY_PORT")
+LITELLM_PROXY_HOST = os.getenv("LITELLM_PROXY_HOST", "127.0.0.1")

+ 2 - 2
package-lock.json

@@ -1,12 +1,12 @@
 {
 	"name": "open-webui",
-	"version": "0.1.120",
+	"version": "0.1.121",
 	"lockfileVersion": 3,
 	"requires": true,
 	"packages": {
 		"": {
 			"name": "open-webui",
-			"version": "0.1.120",
+			"version": "0.1.121",
 			"dependencies": {
 				"@sveltejs/adapter-node": "^1.3.1",
 				"async": "^3.2.5",

+ 1 - 1
package.json

@@ -1,6 +1,6 @@
 {
 	"name": "open-webui",
-	"version": "0.1.120",
+	"version": "0.1.121",
 	"private": true,
 	"scripts": {
 		"dev": "vite dev --host",

+ 7 - 6
src/lib/apis/images/index.ts

@@ -72,10 +72,10 @@ export const updateImageGenerationConfig = async (
 	return res;
 };
 
-export const getOpenAIKey = async (token: string = '') => {
+export const getOpenAIConfig = async (token: string = '') => {
 	let error = null;
 
-	const res = await fetch(`${IMAGES_API_BASE_URL}/key`, {
+	const res = await fetch(`${IMAGES_API_BASE_URL}/openai/config`, {
 		method: 'GET',
 		headers: {
 			Accept: 'application/json',
@@ -101,13 +101,13 @@ export const getOpenAIKey = async (token: string = '') => {
 		throw error;
 	}
 
-	return res.OPENAI_API_KEY;
+	return res;
 };
 
-export const updateOpenAIKey = async (token: string = '', key: string) => {
+export const updateOpenAIConfig = async (token: string = '', url: string, key: string) => {
 	let error = null;
 
-	const res = await fetch(`${IMAGES_API_BASE_URL}/key/update`, {
+	const res = await fetch(`${IMAGES_API_BASE_URL}/openai/config/update`, {
 		method: 'POST',
 		headers: {
 			Accept: 'application/json',
@@ -115,6 +115,7 @@ export const updateOpenAIKey = async (token: string = '', key: string) => {
 			...(token && { authorization: `Bearer ${token}` })
 		},
 		body: JSON.stringify({
+			url: url,
 			key: key
 		})
 	})
@@ -136,7 +137,7 @@ export const updateOpenAIKey = async (token: string = '', key: string) => {
 		throw error;
 	}
 
-	return res.OPENAI_API_KEY;
+	return res;
 };
 
 export const getImageGenerationEngineUrls = async (token: string = '') => {

+ 9 - 7
src/lib/components/chat/Settings/Audio.svelte

@@ -75,14 +75,16 @@
 	};
 
 	const updateConfigHandler = async () => {
-		const res = await updateAudioConfig(localStorage.token, {
-			url: OpenAIUrl,
-			key: OpenAIKey
-		});
+		if (TTSEngine === 'openai') {
+			const res = await updateAudioConfig(localStorage.token, {
+				url: OpenAIUrl,
+				key: OpenAIKey
+			});
 
-		if (res) {
-			OpenAIUrl = res.OPENAI_API_BASE_URL;
-			OpenAIKey = res.OPENAI_API_KEY;
+			if (res) {
+				OpenAIUrl = res.OPENAI_API_BASE_URL;
+				OpenAIKey = res.OPENAI_API_KEY;
+			}
 		}
 	};
 

+ 54 - 21
src/lib/components/chat/Settings/Images.svelte

@@ -15,8 +15,8 @@
 		updateImageSize,
 		getImageSteps,
 		updateImageSteps,
-		getOpenAIKey,
-		updateOpenAIKey
+		getOpenAIConfig,
+		updateOpenAIConfig
 	} from '$lib/apis/images';
 	import { getBackendConfig } from '$lib/apis';
 	const dispatch = createEventDispatcher();
@@ -33,6 +33,7 @@
 	let AUTOMATIC1111_BASE_URL = '';
 	let COMFYUI_BASE_URL = '';
 
+	let OPENAI_API_BASE_URL = '';
 	let OPENAI_API_KEY = '';
 
 	let selectedModel = '';
@@ -131,7 +132,10 @@
 			AUTOMATIC1111_BASE_URL = URLS.AUTOMATIC1111_BASE_URL;
 			COMFYUI_BASE_URL = URLS.COMFYUI_BASE_URL;
 
-			OPENAI_API_KEY = await getOpenAIKey(localStorage.token);
+			const config = await getOpenAIConfig(localStorage.token);
+
+			OPENAI_API_KEY = config.OPENAI_API_KEY;
+			OPENAI_API_BASE_URL = config.OPENAI_API_BASE_URL;
 
 			imageSize = await getImageSize(localStorage.token);
 			steps = await getImageSteps(localStorage.token);
@@ -149,7 +153,7 @@
 		loading = true;
 
 		if (imageGenerationEngine === 'openai') {
-			await updateOpenAIKey(localStorage.token, OPENAI_API_KEY);
+			await updateOpenAIConfig(localStorage.token, OPENAI_API_BASE_URL, OPENAI_API_KEY);
 		}
 
 		await updateDefaultImageGenerationModel(localStorage.token, selectedModel);
@@ -300,13 +304,22 @@
 				</button>
 			</div>
 		{:else if imageGenerationEngine === 'openai'}
-			<div class=" mb-2.5 text-sm font-medium">{$i18n.t('OpenAI API Key')}</div>
-			<div class="flex w-full">
-				<div class="flex-1 mr-2">
+			<div>
+				<div class=" mb-1.5 text-sm font-medium">{$i18n.t('OpenAI API Config')}</div>
+
+				<div class="flex gap-2 mb-1">
+					<input
+						class="w-full rounded-lg py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
+						placeholder={$i18n.t('API Base URL')}
+						bind:value={OPENAI_API_BASE_URL}
+						required
+					/>
+
 					<input
 						class="w-full rounded-lg py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
-						placeholder={$i18n.t('Enter API Key')}
+						placeholder={$i18n.t('API Key')}
 						bind:value={OPENAI_API_KEY}
+						required
 					/>
 				</div>
 			</div>
@@ -319,19 +332,39 @@
 				<div class=" mb-2.5 text-sm font-medium">{$i18n.t('Set Default Model')}</div>
 				<div class="flex w-full">
 					<div class="flex-1 mr-2">
-						<select
-							class="w-full rounded-lg py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
-							bind:value={selectedModel}
-							placeholder={$i18n.t('Select a model')}
-							required
-						>
-							{#if !selectedModel}
-								<option value="" disabled selected>{$i18n.t('Select a model')}</option>
-							{/if}
-							{#each models ?? [] as model}
-								<option value={model.id} class="bg-gray-100 dark:bg-gray-700">{model.name}</option>
-							{/each}
-						</select>
+						{#if imageGenerationEngine === 'openai' && !OPENAI_API_BASE_URL.includes('https://api.openai.com')}
+							<div class="flex w-full">
+								<div class="flex-1">
+									<input
+										list="model-list"
+										class="w-full rounded-lg py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
+										bind:value={selectedModel}
+										placeholder="Select a model"
+									/>
+
+									<datalist id="model-list">
+										{#each models ?? [] as model}
+											<option value={model.id}>{model.name}</option>
+										{/each}
+									</datalist>
+								</div>
+							</div>
+						{:else}
+							<select
+								class="w-full rounded-lg py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
+								bind:value={selectedModel}
+								placeholder={$i18n.t('Select a model')}
+								required
+							>
+								{#if !selectedModel}
+									<option value="" disabled selected>{$i18n.t('Select a model')}</option>
+								{/if}
+								{#each models ?? [] as model}
+									<option value={model.id} class="bg-gray-100 dark:bg-gray-700">{model.name}</option
+									>
+								{/each}
+							</select>
+						{/if}
 					</div>
 				</div>
 			</div>

+ 119 - 129
src/lib/components/chat/Settings/Models.svelte

@@ -13,7 +13,7 @@
 		uploadModel
 	} from '$lib/apis/ollama';
 	import { WEBUI_API_BASE_URL, WEBUI_BASE_URL } from '$lib/constants';
-	import { WEBUI_NAME, models, user } from '$lib/stores';
+	import { WEBUI_NAME, models, MODEL_DOWNLOAD_POOL, user } from '$lib/stores';
 	import { splitStream } from '$lib/utils';
 	import { onMount, getContext } from 'svelte';
 	import { addLiteLLMModel, deleteLiteLLMModel, getLiteLLMModelInfo } from '$lib/apis/litellm';
@@ -50,12 +50,6 @@
 	let showExperimentalOllama = false;
 	let ollamaVersion = '';
 	const MAX_PARALLEL_DOWNLOADS = 3;
-	const modelDownloadQueue = queue(
-		(task: { modelName: string }, cb) =>
-			pullModelHandlerProcessor({ modelName: task.modelName, callback: cb }),
-		MAX_PARALLEL_DOWNLOADS
-	);
-	let modelDownloadStatus: Record<string, any> = {};
 
 	let modelTransferring = false;
 	let modelTag = '';
@@ -140,7 +134,8 @@
 
 	const pullModelHandler = async () => {
 		const sanitizedModelTag = modelTag.trim().replace(/^ollama\s+(run|pull)\s+/, '');
-		if (modelDownloadStatus[sanitizedModelTag]) {
+		console.log($MODEL_DOWNLOAD_POOL);
+		if ($MODEL_DOWNLOAD_POOL[sanitizedModelTag]) {
 			toast.error(
 				$i18n.t(`Model '{{modelTag}}' is already in queue for downloading.`, {
 					modelTag: sanitizedModelTag
@@ -148,40 +143,117 @@
 			);
 			return;
 		}
-		if (Object.keys(modelDownloadStatus).length === 3) {
+		if (Object.keys($MODEL_DOWNLOAD_POOL).length === MAX_PARALLEL_DOWNLOADS) {
 			toast.error(
 				$i18n.t('Maximum of 3 models can be downloaded simultaneously. Please try again later.')
 			);
 			return;
 		}
 
-		modelTransferring = true;
+		const res = await pullModel(localStorage.token, sanitizedModelTag, '0').catch((error) => {
+			toast.error(error);
+			return null;
+		});
+
+		if (res) {
+			const reader = res.body
+				.pipeThrough(new TextDecoderStream())
+				.pipeThrough(splitStream('\n'))
+				.getReader();
 
-		modelDownloadQueue.push(
-			{ modelName: sanitizedModelTag },
-			async (data: { modelName: string; success: boolean; error?: Error }) => {
-				const { modelName } = data;
-				// Remove the downloaded model
-				delete modelDownloadStatus[modelName];
+			while (true) {
+				try {
+					const { value, done } = await reader.read();
+					if (done) break;
 
-				modelDownloadStatus = { ...modelDownloadStatus };
+					let lines = value.split('\n');
+
+					for (const line of lines) {
+						if (line !== '') {
+							let data = JSON.parse(line);
+							console.log(data);
+							if (data.error) {
+								throw data.error;
+							}
+							if (data.detail) {
+								throw data.detail;
+							}
+
+							if (data.id) {
+								MODEL_DOWNLOAD_POOL.set({
+									...$MODEL_DOWNLOAD_POOL,
+									[sanitizedModelTag]: {
+										...$MODEL_DOWNLOAD_POOL[sanitizedModelTag],
+										requestId: data.id,
+										reader,
+										done: false
+									}
+								});
+								console.log(data);
+							}
+
+							if (data.status) {
+								if (data.digest) {
+									let downloadProgress = 0;
+									if (data.completed) {
+										downloadProgress = Math.round((data.completed / data.total) * 1000) / 10;
+									} else {
+										downloadProgress = 100;
+									}
 
-				if (!data.success) {
-					toast.error(data.error);
-				} else {
-					toast.success(
-						$i18n.t(`Model '{{modelName}}' has been successfully downloaded.`, { modelName })
-					);
+									MODEL_DOWNLOAD_POOL.set({
+										...$MODEL_DOWNLOAD_POOL,
+										[sanitizedModelTag]: {
+											...$MODEL_DOWNLOAD_POOL[sanitizedModelTag],
+											pullProgress: downloadProgress,
+											digest: data.digest
+										}
+									});
+								} else {
+									toast.success(data.status);
 
-					const notification = new Notification($WEBUI_NAME, {
-						body: $i18n.t(`Model '{{modelName}}' has been successfully downloaded.`, { modelName }),
-						icon: `${WEBUI_BASE_URL}/static/favicon.png`
-					});
+									MODEL_DOWNLOAD_POOL.set({
+										...$MODEL_DOWNLOAD_POOL,
+										[sanitizedModelTag]: {
+											...$MODEL_DOWNLOAD_POOL[sanitizedModelTag],
+											done: data.status === 'success'
+										}
+									});
+								}
+							}
+						}
+					}
+				} catch (error) {
+					console.log(error);
+					if (typeof error !== 'string') {
+						error = error.message;
+					}
 
-					models.set(await getModels());
+					toast.error(error);
+					// opts.callback({ success: false, error, modelName: opts.modelName });
 				}
 			}
-		);
+
+			console.log($MODEL_DOWNLOAD_POOL[sanitizedModelTag]);
+
+			if ($MODEL_DOWNLOAD_POOL[sanitizedModelTag].done) {
+				toast.success(
+					$i18n.t(`Model '{{modelName}}' has been successfully downloaded.`, {
+						modelName: sanitizedModelTag
+					})
+				);
+
+				models.set(await getModels(localStorage.token));
+			} else {
+				toast.error('Download canceled');
+			}
+
+			delete $MODEL_DOWNLOAD_POOL[sanitizedModelTag];
+
+			MODEL_DOWNLOAD_POOL.set({
+				...$MODEL_DOWNLOAD_POOL
+			});
+		}
 
 		modelTag = '';
 		modelTransferring = false;
@@ -352,88 +424,18 @@
 		models.set(await getModels());
 	};
 
-	const pullModelHandlerProcessor = async (opts: { modelName: string; callback: Function }) => {
-		const res = await pullModel(localStorage.token, opts.modelName, selectedOllamaUrlIdx).catch(
-			(error) => {
-				opts.callback({ success: false, error, modelName: opts.modelName });
-				return null;
-			}
-		);
-
-		if (res) {
-			const reader = res.body
-				.pipeThrough(new TextDecoderStream())
-				.pipeThrough(splitStream('\n'))
-				.getReader();
-
-			while (true) {
-				try {
-					const { value, done } = await reader.read();
-					if (done) break;
-
-					let lines = value.split('\n');
-
-					for (const line of lines) {
-						if (line !== '') {
-							let data = JSON.parse(line);
-							console.log(data);
-							if (data.error) {
-								throw data.error;
-							}
-							if (data.detail) {
-								throw data.detail;
-							}
-
-							if (data.id) {
-								modelDownloadStatus[opts.modelName] = {
-									...modelDownloadStatus[opts.modelName],
-									requestId: data.id,
-									reader,
-									done: false
-								};
-								console.log(data);
-							}
-
-							if (data.status) {
-								if (data.digest) {
-									let downloadProgress = 0;
-									if (data.completed) {
-										downloadProgress = Math.round((data.completed / data.total) * 1000) / 10;
-									} else {
-										downloadProgress = 100;
-									}
-									modelDownloadStatus[opts.modelName] = {
-										...modelDownloadStatus[opts.modelName],
-										pullProgress: downloadProgress,
-										digest: data.digest
-									};
-								} else {
-									toast.success(data.status);
-
-									modelDownloadStatus[opts.modelName] = {
-										...modelDownloadStatus[opts.modelName],
-										done: data.status === 'success'
-									};
-								}
-							}
-						}
-					}
-				} catch (error) {
-					console.log(error);
-					if (typeof error !== 'string') {
-						error = error.message;
-					}
-					opts.callback({ success: false, error, modelName: opts.modelName });
-				}
-			}
-
-			console.log(modelDownloadStatus[opts.modelName]);
+	const cancelModelPullHandler = async (model: string) => {
+		const { reader, requestId } = $MODEL_DOWNLOAD_POOL[model];
+		if (reader) {
+			await reader.cancel();
 
-			if (modelDownloadStatus[opts.modelName].done) {
-				opts.callback({ success: true, modelName: opts.modelName });
-			} else {
-				opts.callback({ success: false, error: 'Download canceled', modelName: opts.modelName });
-			}
+			await cancelOllamaRequest(localStorage.token, requestId);
+			delete $MODEL_DOWNLOAD_POOL[model];
+			MODEL_DOWNLOAD_POOL.set({
+				...$MODEL_DOWNLOAD_POOL
+			});
+			await deleteModel(localStorage.token, model);
+			toast.success(`${model} download has been canceled`);
 		}
 	};
 
@@ -503,18 +505,6 @@
 		ollamaVersion = await getOllamaVersion(localStorage.token).catch((error) => false);
 		liteLLMModelInfo = await getLiteLLMModelInfo(localStorage.token);
 	});
-
-	const cancelModelPullHandler = async (model: string) => {
-		const { reader, requestId } = modelDownloadStatus[model];
-		if (reader) {
-			await reader.cancel();
-
-			await cancelOllamaRequest(localStorage.token, requestId);
-			delete modelDownloadStatus[model];
-			await deleteModel(localStorage.token, model);
-			toast.success(`${model} download has been canceled`);
-		}
-	};
 </script>
 
 <div class="flex flex-col h-full justify-between text-sm">
@@ -643,9 +633,9 @@
 							>
 						</div>
 
-						{#if Object.keys(modelDownloadStatus).length > 0}
-							{#each Object.keys(modelDownloadStatus) as model}
-								{#if 'pullProgress' in modelDownloadStatus[model]}
+						{#if Object.keys($MODEL_DOWNLOAD_POOL).length > 0}
+							{#each Object.keys($MODEL_DOWNLOAD_POOL) as model}
+								{#if 'pullProgress' in $MODEL_DOWNLOAD_POOL[model]}
 									<div class="flex flex-col">
 										<div class="font-medium mb-1">{model}</div>
 										<div class="">
@@ -655,10 +645,10 @@
 														class="dark:bg-gray-600 bg-gray-500 text-xs font-medium text-gray-100 text-center p-0.5 leading-none rounded-full"
 														style="width: {Math.max(
 															15,
-															modelDownloadStatus[model].pullProgress ?? 0
+															$MODEL_DOWNLOAD_POOL[model].pullProgress ?? 0
 														)}%"
 													>
-														{modelDownloadStatus[model].pullProgress ?? 0}%
+														{$MODEL_DOWNLOAD_POOL[model].pullProgress ?? 0}%
 													</div>
 												</div>
 
@@ -689,9 +679,9 @@
 													</button>
 												</Tooltip>
 											</div>
-											{#if 'digest' in modelDownloadStatus[model]}
+											{#if 'digest' in $MODEL_DOWNLOAD_POOL[model]}
 												<div class="mt-1 text-xs dark:text-gray-500" style="font-size: 0.5rem;">
-													{modelDownloadStatus[model].digest}
+													{$MODEL_DOWNLOAD_POOL[model].digest}
 												</div>
 											{/if}
 										</div>

+ 1 - 1
src/lib/i18n/locales/nl-NL/translation.json

@@ -62,7 +62,7 @@
 	"Click here to check other modelfiles.": "Klik hier om andere modelfiles te controleren.",
 	"Click here to select": "Klik hier om te selecteren",
 	"Click here to select documents.": "Klik hier om documenten te selecteren",
-	"click here.": "click here.",
+	"click here.": "klik hier.",
 	"Click on the user role button to change a user's role.": "Klik op de gebruikersrol knop om de rol van een gebruiker te wijzigen.",
 	"Close": "Sluiten",
 	"Collection": "Verzameling",

+ 0 - 0
src/lib/i18n/locales/pl-pl/translation.json → src/lib/i18n/locales/pl-PL/translation.json


+ 13 - 13
src/lib/i18n/locales/ru-RU/translation.json

@@ -2,39 +2,39 @@
 	"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.": "'s', 'm', 'h', 'd', 'w' или '-1' для не истечение.",
 	"(Beta)": "(бета)",
 	"(e.g. `sh webui.sh --api`)": "(например: `sh webui.sh --api`)",
-	"(latest)": "(новый)",
-	"{{modelName}} is thinking...": "{{modelName}} это думает...",
+	"(latest)": "(последний)",
+	"{{modelName}} is thinking...": "{{modelName}} думает...",
 	"{{webUIName}} Backend Required": "{{webUIName}} бэкенд требуемый",
-	"a user": "юзер",
-	"About": "Относительно",
+	"a user": "пользователь",
+	"About": "Об",
 	"Account": "Аккаунт",
 	"Action": "Действие",
 	"Add a model": "Добавьте модель",
-	"Add a model tag name": "Добавьте тэг модели имя",
-	"Add a short description about what this modelfile does": "Добавьте краткое описание, что делает этот моделифайл",
-	"Add a short title for this prompt": "Добавьте краткое название для этого взаимодействия",
+	"Add a model tag name": "Добавьте имя тэга модели",
+	"Add a short description about what this modelfile does": "Добавьте краткое описание, что делает этот моделфайл",
+	"Add a short title for this prompt": "Добавьте краткий заголовок для этого ввода",
 	"Add a tag": "Добавьте тэг",
 	"Add Docs": "Добавьте документы",
 	"Add Files": "Добавьте файлы",
-	"Add message": "Добавьте message",
+	"Add message": "Добавьте сообщение",
 	"add tags": "Добавьте тэгы",
-	"Adjusting these settings will apply changes universally to all users.": "Регулирующий этих настроек приведет к изменениям для все юзеры.",
+	"Adjusting these settings will apply changes universally to all users.": "Регулирующий этих настроек приведет к изменениям для все пользователей.",
 	"admin": "админ",
 	"Admin Panel": "Панель админ",
 	"Admin Settings": "Настройки админ",
 	"Advanced Parameters": "Расширенные Параметры",
 	"all": "всё",
-	"All Users": "Всё юзеры",
-	"Allow": "Дозволять",
+	"All Users": "Все пользователи",
+	"Allow": "Разрешить",
 	"Allow Chat Deletion": "Дозволять удаление чат",
 	"alphanumeric characters and hyphens": "буквенно цифровые символы и дефисы",
-	"Already have an account?": "у вас есть аккаунт уже?",
+	"Already have an account?": "у вас уже есть аккаунт?",
 	"an assistant": "ассистент",
 	"and": "и",
 	"API Base URL": "Базовый адрес API",
 	"API Key": "Ключ API",
 	"API RPM": "API RPM",
-	"are allowed - Activate this command by typing": "разрешено - активируйте эту команду набором",
+	"are allowed - Activate this command by typing": "разрешено - активируйте эту команду вводом",
 	"Are you sure?": "Вы уверены?",
 	"Audio": "Аудио",
 	"Auto-playback response": "Автоматическое воспроизведение ответа",

+ 1 - 0
static/manifest.json

@@ -0,0 +1 @@
+{}