Browse Source

i18n(common): add i18n translation

orenzhang 4 months ago
parent
commit
92fb1109b6
56 changed files with 315 additions and 16 deletions
  1. 1 1
      src/lib/components/AddConnectionModal.svelte
  2. 4 4
      src/lib/components/admin/Settings/Documents.svelte
  3. 2 2
      src/lib/components/admin/Settings/WebSearch.svelte
  4. 9 4
      src/lib/components/channel/Messages.svelte
  5. 1 1
      src/lib/components/chat/Messages/CodeBlock.svelte
  6. 2 2
      src/lib/components/common/FileItemModal.svelte
  7. 2 2
      src/lib/components/workspace/common/AccessControl.svelte
  8. 6 0
      src/lib/i18n/locales/ar-BH/translation.json
  9. 6 0
      src/lib/i18n/locales/bg-BG/translation.json
  10. 6 0
      src/lib/i18n/locales/bn-BD/translation.json
  11. 6 0
      src/lib/i18n/locales/ca-ES/translation.json
  12. 6 0
      src/lib/i18n/locales/ceb-PH/translation.json
  13. 6 0
      src/lib/i18n/locales/cs-CZ/translation.json
  14. 6 0
      src/lib/i18n/locales/da-DK/translation.json
  15. 6 0
      src/lib/i18n/locales/de-DE/translation.json
  16. 6 0
      src/lib/i18n/locales/dg-DG/translation.json
  17. 6 0
      src/lib/i18n/locales/el-GR/translation.json
  18. 6 0
      src/lib/i18n/locales/en-GB/translation.json
  19. 6 0
      src/lib/i18n/locales/en-US/translation.json
  20. 6 0
      src/lib/i18n/locales/es-ES/translation.json
  21. 6 0
      src/lib/i18n/locales/eu-ES/translation.json
  22. 6 0
      src/lib/i18n/locales/fa-IR/translation.json
  23. 6 0
      src/lib/i18n/locales/fi-FI/translation.json
  24. 6 0
      src/lib/i18n/locales/fr-CA/translation.json
  25. 6 0
      src/lib/i18n/locales/fr-FR/translation.json
  26. 6 0
      src/lib/i18n/locales/he-IL/translation.json
  27. 6 0
      src/lib/i18n/locales/hi-IN/translation.json
  28. 6 0
      src/lib/i18n/locales/hr-HR/translation.json
  29. 6 0
      src/lib/i18n/locales/hu-HU/translation.json
  30. 6 0
      src/lib/i18n/locales/id-ID/translation.json
  31. 6 0
      src/lib/i18n/locales/ie-GA/translation.json
  32. 6 0
      src/lib/i18n/locales/it-IT/translation.json
  33. 6 0
      src/lib/i18n/locales/ja-JP/translation.json
  34. 6 0
      src/lib/i18n/locales/ka-GE/translation.json
  35. 6 0
      src/lib/i18n/locales/ko-KR/translation.json
  36. 6 0
      src/lib/i18n/locales/lt-LT/translation.json
  37. 6 0
      src/lib/i18n/locales/ms-MY/translation.json
  38. 6 0
      src/lib/i18n/locales/nb-NO/translation.json
  39. 6 0
      src/lib/i18n/locales/nl-NL/translation.json
  40. 6 0
      src/lib/i18n/locales/pa-IN/translation.json
  41. 6 0
      src/lib/i18n/locales/pl-PL/translation.json
  42. 6 0
      src/lib/i18n/locales/pt-BR/translation.json
  43. 6 0
      src/lib/i18n/locales/pt-PT/translation.json
  44. 6 0
      src/lib/i18n/locales/ro-RO/translation.json
  45. 6 0
      src/lib/i18n/locales/ru-RU/translation.json
  46. 6 0
      src/lib/i18n/locales/sk-SK/translation.json
  47. 6 0
      src/lib/i18n/locales/sr-RS/translation.json
  48. 6 0
      src/lib/i18n/locales/sv-SE/translation.json
  49. 6 0
      src/lib/i18n/locales/th-TH/translation.json
  50. 6 0
      src/lib/i18n/locales/tk-TW/translation.json
  51. 6 0
      src/lib/i18n/locales/tr-TR/translation.json
  52. 6 0
      src/lib/i18n/locales/uk-UA/translation.json
  53. 6 0
      src/lib/i18n/locales/ur-PK/translation.json
  54. 6 0
      src/lib/i18n/locales/vi-VN/translation.json
  55. 6 0
      src/lib/i18n/locales/zh-CN/translation.json
  56. 6 0
      src/lib/i18n/locales/zh-TW/translation.json

+ 1 - 1
src/lib/components/AddConnectionModal.svelte

@@ -179,7 +179,7 @@
 								</div>
 								</div>
 							</div>
 							</div>
 
 
-							<Tooltip content="Verify Connection" className="self-end -mb-1">
+							<Tooltip content={$i18n.t('Verify Connection')} className="self-end -mb-1">
 								<button
 								<button
 									class="self-center p-1 bg-transparent hover:bg-gray-100 dark:bg-gray-900 dark:hover:bg-gray-850 rounded-lg transition"
 									class="self-center p-1 bg-transparent hover:bg-gray-100 dark:bg-gray-900 dark:hover:bg-gray-850 rounded-lg transition"
 									on:click={() => {
 									on:click={() => {

+ 4 - 4
src/lib/components/admin/Settings/Documents.svelte

@@ -387,8 +387,8 @@
 					<div class="flex items-center relative">
 					<div class="flex items-center relative">
 						<Tooltip
 						<Tooltip
 							content={BYPASS_EMBEDDING_AND_RETRIEVAL
 							content={BYPASS_EMBEDDING_AND_RETRIEVAL
-								? 'Inject the entire content as context for comprehensive processing, this is recommended for complex queries.'
-								: 'Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.'}
+								? $i18n.t('Inject the entire content as context for comprehensive processing, this is recommended for complex queries.')
+								: $i18n.t('Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.')}
 						>
 						>
 							<Switch bind:state={BYPASS_EMBEDDING_AND_RETRIEVAL} />
 							<Switch bind:state={BYPASS_EMBEDDING_AND_RETRIEVAL} />
 						</Tooltip>
 						</Tooltip>
@@ -625,8 +625,8 @@
 						<div class="flex items-center relative">
 						<div class="flex items-center relative">
 							<Tooltip
 							<Tooltip
 								content={RAG_FULL_CONTEXT
 								content={RAG_FULL_CONTEXT
-									? 'Inject entire contents as context for comprehensive processing, this is recommended for complex queries.'
-									: 'Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.'}
+									? $i18n.t('Inject the entire content as context for comprehensive processing, this is recommended for complex queries.')
+									: $i18n.t('Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.')}
 							>
 							>
 								<Switch bind:state={RAG_FULL_CONTEXT} />
 								<Switch bind:state={RAG_FULL_CONTEXT} />
 							</Tooltip>
 							</Tooltip>

+ 2 - 2
src/lib/components/admin/Settings/WebSearch.svelte

@@ -462,8 +462,8 @@
 						<div class="flex items-center relative">
 						<div class="flex items-center relative">
 							<Tooltip
 							<Tooltip
 								content={webConfig.BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL
 								content={webConfig.BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL
-									? 'Inject the entire content as context for comprehensive processing, this is recommended for complex queries.'
-									: 'Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.'}
+									? $i18n.t('Inject the entire content as context for comprehensive processing, this is recommended for complex queries.')
+									: $i18n.t('Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.')}
 							>
 							>
 								<Switch bind:state={webConfig.BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL} />
 								<Switch bind:state={webConfig.BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL} />
 							</Tooltip>
 							</Tooltip>

+ 9 - 4
src/lib/components/channel/Messages.svelte

@@ -73,10 +73,15 @@
 						<div class="text-2xl font-medium capitalize">{channel.name}</div>
 						<div class="text-2xl font-medium capitalize">{channel.name}</div>
 
 
 						<div class=" text-gray-500">
 						<div class=" text-gray-500">
-							This channel was created on {dayjs(channel.created_at / 1000000).format(
-								'MMMM D, YYYY'
-							)}. This is the very beginning of the {channel.name}
-							channel.
+							{
+								$i18n.t(
+									'This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.',
+									{
+										createdAt: dayjs(channel.created_at / 1000000).format('MMMM D, YYYY'),
+										channelName: channel.name,
+									}
+								)
+							}
 						</div>
 						</div>
 					</div>
 					</div>
 				{:else}
 				{:else}

+ 1 - 1
src/lib/components/chat/Messages/CodeBlock.svelte

@@ -441,7 +441,7 @@
 
 
 					{#if ($config?.features?.enable_code_execution ?? true) && (lang.toLowerCase() === 'python' || lang.toLowerCase() === 'py' || (lang === '' && checkPythonCode(code)))}
 					{#if ($config?.features?.enable_code_execution ?? true) && (lang.toLowerCase() === 'python' || lang.toLowerCase() === 'py' || (lang === '' && checkPythonCode(code)))}
 						{#if executing}
 						{#if executing}
-							<div class="run-code-button bg-none border-none p-1 cursor-not-allowed">Running</div>
+							<div class="run-code-button bg-none border-none p-1 cursor-not-allowed">{$i18n.t('Running')}</div>
 						{:else if run}
 						{:else if run}
 							<button
 							<button
 								class="flex gap-1 items-center run-code-button bg-none border-none bg-gray-50 hover:bg-gray-100 dark:bg-gray-850 dark:hover:bg-gray-800 transition rounded-md px-1.5 py-0.5"
 								class="flex gap-1 items-center run-code-button bg-none border-none bg-gray-50 hover:bg-gray-100 dark:bg-gray-850 dark:hover:bg-gray-800 transition rounded-md px-1.5 py-0.5"

+ 2 - 2
src/lib/components/common/FileItemModal.svelte

@@ -87,8 +87,8 @@
 						<div>
 						<div>
 							<Tooltip
 							<Tooltip
 								content={enableFullContent
 								content={enableFullContent
-									? 'Inject the entire content as context for comprehensive processing, this is recommended for complex queries.'
-									: 'Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.'}
+									? $i18n.t('Inject the entire content as context for comprehensive processing, this is recommended for complex queries.')
+									: $i18n.t('Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.')}
 							>
 							>
 								<div class="flex items-center gap-1.5 text-xs">
 								<div class="flex items-center gap-1.5 text-xs">
 									{#if enableFullContent}
 									{#if enableFullContent}

+ 2 - 2
src/lib/components/workspace/common/AccessControl.svelte

@@ -113,8 +113,8 @@
 						}
 						}
 					}}
 					}}
 				>
 				>
-					<option class=" text-gray-700" value="private" selected>Private</option>
-					<option class=" text-gray-700" value="public" selected>Public</option>
+					<option class=" text-gray-700" value="private" selected>{$i18n.t('Private')}</option>
+					<option class=" text-gray-700" value="public" selected>{$i18n.t('Public')}</option>
 				</select>
 				</select>
 
 
 				<div class=" text-xs text-gray-400 font-medium">
 				<div class=" text-xs text-gray-400 font-medium">

+ 6 - 0
src/lib/i18n/locales/ar-BH/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "الإفتراضي Prompt الاقتراحات",
 	"Default Prompt Suggestions": "الإفتراضي Prompt الاقتراحات",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to ALL": "",
 	"Default to ALL": "",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "الإفتراضي صلاحيات المستخدم",
 	"Default User Role": "الإفتراضي صلاحيات المستخدم",
 	"Delete": "حذف",
 	"Delete": "حذف",
 	"Delete a model": "حذف الموديل",
 	"Delete a model": "حذف الموديل",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "قم بتضمين علامة `-api` عند تشغيل Stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "قم بتضمين علامة `-api` عند تشغيل Stable-diffusion-webui",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "معلومات",
 	"Info": "معلومات",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "إدخال الأوامر",
 	"Input commands": "إدخال الأوامر",
 	"Install from Github URL": "التثبيت من عنوان URL لجيثب",
 	"Install from Github URL": "التثبيت من عنوان URL لجيثب",
 	"Instant Auto-Send After Voice Transcription": "",
 	"Instant Auto-Send After Voice Transcription": "",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "أخر 30 يوم",
 	"Previous 30 days": "أخر 30 يوم",
 	"Previous 7 days": "أخر 7 أيام",
 	"Previous 7 days": "أخر 7 أيام",
+	"Private": "",
 	"Profile Image": "صورة الملف الشخصي",
 	"Profile Image": "صورة الملف الشخصي",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "موجه (على سبيل المثال: أخبرني بحقيقة ممتعة عن الإمبراطورية الرومانية)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "موجه (على سبيل المثال: أخبرني بحقيقة ممتعة عن الإمبراطورية الرومانية)",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "",
 	"Prompt updated successfully": "",
 	"Prompts": "مطالبات",
 	"Prompts": "مطالبات",
 	"Prompts Access": "",
 	"Prompts Access": "",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Ollama.com \"{{searchValue}}\" أسحب من ",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Ollama.com \"{{searchValue}}\" أسحب من ",
 	"Pull a model from Ollama.com": "Ollama.com سحب الموديل من ",
 	"Pull a model from Ollama.com": "Ollama.com سحب الموديل من ",
 	"Query Generation Prompt": "",
 	"Query Generation Prompt": "",
@@ -1009,6 +1013,7 @@
 	"Theme": "الثيم",
 	"Theme": "الثيم",
 	"Thinking...": "",
 	"Thinking...": "",
 	"This action cannot be undone. Do you wish to continue?": "",
 	"This action cannot be undone. Do you wish to continue?": "",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "وهذا يضمن حفظ محادثاتك القيمة بشكل آمن في قاعدة بياناتك الخلفية. شكرًا لك!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "وهذا يضمن حفظ محادثاتك القيمة بشكل آمن في قاعدة بياناتك الخلفية. شكرًا لك!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "",
 	"Valves updated successfully": "",
 	"variable": "المتغير",
 	"variable": "المتغير",
 	"variable to have them replaced with clipboard content.": "متغير لاستبدالها بمحتوى الحافظة.",
 	"variable to have them replaced with clipboard content.": "متغير لاستبدالها بمحتوى الحافظة.",
+	"Verify Connection": "",
 	"Version": "إصدار",
 	"Version": "إصدار",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"View Replies": "",
 	"View Replies": "",

+ 6 - 0
src/lib/i18n/locales/bg-BG/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "Промпт Предложения по подразбиране",
 	"Default Prompt Suggestions": "Промпт Предложения по подразбиране",
 	"Default to 389 or 636 if TLS is enabled": "По подразбиране 389 или 636, ако TLS е активиран",
 	"Default to 389 or 636 if TLS is enabled": "По подразбиране 389 или 636, ако TLS е активиран",
 	"Default to ALL": "По подразбиране за ВСИЧКИ",
 	"Default to ALL": "По подразбиране за ВСИЧКИ",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "Роля на потребителя по подразбиране",
 	"Default User Role": "Роля на потребителя по подразбиране",
 	"Delete": "Изтриване",
 	"Delete": "Изтриване",
 	"Delete a model": "Изтриване на модел",
 	"Delete a model": "Изтриване на модел",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "",
 	"Include `--api` flag when running stable-diffusion-webui": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Информация",
 	"Info": "Информация",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "Въведете команди",
 	"Input commands": "Въведете команди",
 	"Install from Github URL": "Инсталиране от URL адреса на Github",
 	"Install from Github URL": "Инсталиране от URL адреса на Github",
 	"Instant Auto-Send After Voice Transcription": "Незабавно автоматично изпращане след гласова транскрипция",
 	"Instant Auto-Send After Voice Transcription": "Незабавно автоматично изпращане след гласова транскрипция",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "Наказание за присъствие",
 	"Presence Penalty": "Наказание за присъствие",
 	"Previous 30 days": "Предишните 30 дни",
 	"Previous 30 days": "Предишните 30 дни",
 	"Previous 7 days": "Предишните 7 дни",
 	"Previous 7 days": "Предишните 7 дни",
+	"Private": "",
 	"Profile Image": "Профилна снимка",
 	"Profile Image": "Профилна снимка",
 	"Prompt": "Промпт",
 	"Prompt": "Промпт",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Промпт (напр. Кажи ми забавен факт за Римската империя)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Промпт (напр. Кажи ми забавен факт за Римската империя)",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "Промптът е актуализиран успешно",
 	"Prompt updated successfully": "Промптът е актуализиран успешно",
 	"Prompts": "Промптове",
 	"Prompts": "Промптове",
 	"Prompts Access": "Достъп до промптове",
 	"Prompts Access": "Достъп до промптове",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Извади \"{{searchValue}}\" от Ollama.com",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Извади \"{{searchValue}}\" от Ollama.com",
 	"Pull a model from Ollama.com": "Издърпайте модел от Ollama.com",
 	"Pull a model from Ollama.com": "Издърпайте модел от Ollama.com",
 	"Query Generation Prompt": "Промпт за генериране на запитвания",
 	"Query Generation Prompt": "Промпт за генериране на запитвания",
@@ -1009,6 +1013,7 @@
 	"Theme": "Тема",
 	"Theme": "Тема",
 	"Thinking...": "Мисля...",
 	"Thinking...": "Мисля...",
 	"This action cannot be undone. Do you wish to continue?": "Това действие не може да бъде отменено. Желаете ли да продължите?",
 	"This action cannot be undone. Do you wish to continue?": "Това действие не може да бъде отменено. Желаете ли да продължите?",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Това гарантира, че ценните ви разговори се запазват сигурно във вашата бекенд база данни. Благодарим ви!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Това гарантира, че ценните ви разговори се запазват сигурно във вашата бекенд база данни. Благодарим ви!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Това е експериментална функция, може да не работи според очакванията и подлежи на промяна по всяко време.",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Това е експериментална функция, може да не работи според очакванията и подлежи на промяна по всяко време.",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "Клапаните са актуализирани успешно",
 	"Valves updated successfully": "Клапаните са актуализирани успешно",
 	"variable": "променлива",
 	"variable": "променлива",
 	"variable to have them replaced with clipboard content.": "променлива, за да бъдат заменени със съдържанието от клипборда.",
 	"variable to have them replaced with clipboard content.": "променлива, за да бъдат заменени със съдържанието от клипборда.",
+	"Verify Connection": "",
 	"Version": "Версия",
 	"Version": "Версия",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Версия {{selectedVersion}} от {{totalVersions}}",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Версия {{selectedVersion}} от {{totalVersions}}",
 	"View Replies": "Преглед на отговорите",
 	"View Replies": "Преглед на отговорите",

+ 6 - 0
src/lib/i18n/locales/bn-BD/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "ডিফল্ট প্রম্পট সাজেশন",
 	"Default Prompt Suggestions": "ডিফল্ট প্রম্পট সাজেশন",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to ALL": "",
 	"Default to ALL": "",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "ইউজারের ডিফল্ট পদবি",
 	"Default User Role": "ইউজারের ডিফল্ট পদবি",
 	"Delete": "মুছে ফেলুন",
 	"Delete": "মুছে ফেলুন",
 	"Delete a model": "একটি মডেল মুছে ফেলুন",
 	"Delete a model": "একটি মডেল মুছে ফেলুন",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "stable-diffusion-webui চালু করার সময় `--api` ফ্ল্যাগ সংযুক্ত করুন",
 	"Include `--api` flag when running stable-diffusion-webui": "stable-diffusion-webui চালু করার সময় `--api` ফ্ল্যাগ সংযুক্ত করুন",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "তথ্য",
 	"Info": "তথ্য",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "ইনপুট কমান্ডস",
 	"Input commands": "ইনপুট কমান্ডস",
 	"Install from Github URL": "Github URL থেকে ইনস্টল করুন",
 	"Install from Github URL": "Github URL থেকে ইনস্টল করুন",
 	"Instant Auto-Send After Voice Transcription": "",
 	"Instant Auto-Send After Voice Transcription": "",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "পূর্ব ৩০ দিন",
 	"Previous 30 days": "পূর্ব ৩০ দিন",
 	"Previous 7 days": "পূর্ব ৭ দিন",
 	"Previous 7 days": "পূর্ব ৭ দিন",
+	"Private": "",
 	"Profile Image": "প্রোফাইল ইমেজ",
 	"Profile Image": "প্রোফাইল ইমেজ",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "প্রম্প্ট (উদাহরণস্বরূপ, আমি রোমান ইমপার্টের সম্পর্কে একটি উপস্থিতি জানতে বল)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "প্রম্প্ট (উদাহরণস্বরূপ, আমি রোমান ইমপার্টের সম্পর্কে একটি উপস্থিতি জানতে বল)",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "",
 	"Prompt updated successfully": "",
 	"Prompts": "প্রম্পটসমূহ",
 	"Prompts": "প্রম্পটসমূহ",
 	"Prompts Access": "",
 	"Prompts Access": "",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Ollama.com থেকে \"{{searchValue}}\" টানুন",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Ollama.com থেকে \"{{searchValue}}\" টানুন",
 	"Pull a model from Ollama.com": "Ollama.com থেকে একটি টেনে আনুন আনুন",
 	"Pull a model from Ollama.com": "Ollama.com থেকে একটি টেনে আনুন আনুন",
 	"Query Generation Prompt": "",
 	"Query Generation Prompt": "",
@@ -1009,6 +1013,7 @@
 	"Theme": "থিম",
 	"Theme": "থিম",
 	"Thinking...": "",
 	"Thinking...": "",
 	"This action cannot be undone. Do you wish to continue?": "",
 	"This action cannot be undone. Do you wish to continue?": "",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "এটা নিশ্চিত করে যে, আপনার গুরুত্বপূর্ণ আলোচনা নিরাপদে আপনার ব্যাকএন্ড ডেটাবেজে সংরক্ষিত আছে। ধন্যবাদ!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "এটা নিশ্চিত করে যে, আপনার গুরুত্বপূর্ণ আলোচনা নিরাপদে আপনার ব্যাকএন্ড ডেটাবেজে সংরক্ষিত আছে। ধন্যবাদ!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "",
 	"Valves updated successfully": "",
 	"variable": "ভেরিয়েবল",
 	"variable": "ভেরিয়েবল",
 	"variable to have them replaced with clipboard content.": "ক্লিপবোর্ডের কন্টেন্ট দিয়ে যেই ভেরিয়েবল রিপ্লেস করা যাবে।",
 	"variable to have them replaced with clipboard content.": "ক্লিপবোর্ডের কন্টেন্ট দিয়ে যেই ভেরিয়েবল রিপ্লেস করা যাবে।",
+	"Verify Connection": "",
 	"Version": "ভার্সন",
 	"Version": "ভার্সন",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"View Replies": "",
 	"View Replies": "",

+ 6 - 0
src/lib/i18n/locales/ca-ES/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "Suggeriments d'indicació per defecte",
 	"Default Prompt Suggestions": "Suggeriments d'indicació per defecte",
 	"Default to 389 or 636 if TLS is enabled": "Per defecte 389 o 636 si TLS està habilitat",
 	"Default to 389 or 636 if TLS is enabled": "Per defecte 389 o 636 si TLS està habilitat",
 	"Default to ALL": "Per defecte TOTS",
 	"Default to ALL": "Per defecte TOTS",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "Rol d'usuari per defecte",
 	"Default User Role": "Rol d'usuari per defecte",
 	"Delete": "Eliminar",
 	"Delete": "Eliminar",
 	"Delete a model": "Eliminar un model",
 	"Delete a model": "Eliminar un model",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "Inclou `--api` quan executis stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Inclou `--api` quan executis stable-diffusion-webui",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "Influeix amb la rapidesa amb què l'algoritme respon als comentaris del text generat. Una taxa d'aprenentatge més baixa donarà lloc a ajustos més lents, mentre que una taxa d'aprenentatge més alta farà que l'algorisme sigui més sensible.",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "Influeix amb la rapidesa amb què l'algoritme respon als comentaris del text generat. Una taxa d'aprenentatge més baixa donarà lloc a ajustos més lents, mentre que una taxa d'aprenentatge més alta farà que l'algorisme sigui més sensible.",
 	"Info": "Informació",
 	"Info": "Informació",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "Entra comandes",
 	"Input commands": "Entra comandes",
 	"Install from Github URL": "Instal·lar des de l'URL de Github",
 	"Install from Github URL": "Instal·lar des de l'URL de Github",
 	"Instant Auto-Send After Voice Transcription": "Enviament automàtic després de la transcripció de veu",
 	"Instant Auto-Send After Voice Transcription": "Enviament automàtic després de la transcripció de veu",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "Penalització de presència",
 	"Presence Penalty": "Penalització de presència",
 	"Previous 30 days": "30 dies anteriors",
 	"Previous 30 days": "30 dies anteriors",
 	"Previous 7 days": "7 dies anteriors",
 	"Previous 7 days": "7 dies anteriors",
+	"Private": "",
 	"Profile Image": "Imatge de perfil",
 	"Profile Image": "Imatge de perfil",
 	"Prompt": "Indicació",
 	"Prompt": "Indicació",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Indicació (p.ex. Digues-me quelcom divertit sobre l'Imperi Romà)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Indicació (p.ex. Digues-me quelcom divertit sobre l'Imperi Romà)",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "Indicació actualitzada correctament",
 	"Prompt updated successfully": "Indicació actualitzada correctament",
 	"Prompts": "Indicacions",
 	"Prompts": "Indicacions",
 	"Prompts Access": "Accés a les indicacions",
 	"Prompts Access": "Accés a les indicacions",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Obtenir \"{{searchValue}}\" de Ollama.com",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Obtenir \"{{searchValue}}\" de Ollama.com",
 	"Pull a model from Ollama.com": "Obtenir un model d'Ollama.com",
 	"Pull a model from Ollama.com": "Obtenir un model d'Ollama.com",
 	"Query Generation Prompt": "Indicació per a generació de consulta",
 	"Query Generation Prompt": "Indicació per a generació de consulta",
@@ -1009,6 +1013,7 @@
 	"Theme": "Tema",
 	"Theme": "Tema",
 	"Thinking...": "Pensant...",
 	"Thinking...": "Pensant...",
 	"This action cannot be undone. Do you wish to continue?": "Aquesta acció no es pot desfer. Vols continuar?",
 	"This action cannot be undone. Do you wish to continue?": "Aquesta acció no es pot desfer. Vols continuar?",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Això assegura que les teves converses valuoses queden desades de manera segura a la teva base de dades. Gràcies!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Això assegura que les teves converses valuoses queden desades de manera segura a la teva base de dades. Gràcies!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Aquesta és una funció experimental, és possible que no funcioni com s'espera i està subjecta a canvis en qualsevol moment.",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Aquesta és una funció experimental, és possible que no funcioni com s'espera i està subjecta a canvis en qualsevol moment.",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "Aquesta opció controla quants tokens es conserven en actualitzar el context. Per exemple, si s'estableix en 2, es conservaran els darrers 2 tokens del context de conversa. Preservar el context pot ajudar a mantenir la continuïtat d'una conversa, però pot reduir la capacitat de respondre a nous temes.",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "Aquesta opció controla quants tokens es conserven en actualitzar el context. Per exemple, si s'estableix en 2, es conservaran els darrers 2 tokens del context de conversa. Preservar el context pot ajudar a mantenir la continuïtat d'una conversa, però pot reduir la capacitat de respondre a nous temes.",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "Valves actualitat correctament",
 	"Valves updated successfully": "Valves actualitat correctament",
 	"variable": "variable",
 	"variable": "variable",
 	"variable to have them replaced with clipboard content.": "variable per tenir-les reemplaçades amb el contingut del porta-retalls.",
 	"variable to have them replaced with clipboard content.": "variable per tenir-les reemplaçades amb el contingut del porta-retalls.",
+	"Verify Connection": "",
 	"Version": "Versió",
 	"Version": "Versió",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Versió {{selectedVersion}} de {{totalVersions}}",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Versió {{selectedVersion}} de {{totalVersions}}",
 	"View Replies": "Veure les respostes",
 	"View Replies": "Veure les respostes",

+ 6 - 0
src/lib/i18n/locales/ceb-PH/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "Default nga prompt nga mga sugyot",
 	"Default Prompt Suggestions": "Default nga prompt nga mga sugyot",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to ALL": "",
 	"Default to ALL": "",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "Default nga Papel sa Gumagamit",
 	"Default User Role": "Default nga Papel sa Gumagamit",
 	"Delete": "",
 	"Delete": "",
 	"Delete a model": "Pagtangtang sa usa ka template",
 	"Delete a model": "Pagtangtang sa usa ka template",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "Iapil ang `--api` nga bandila kung nagdagan nga stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Iapil ang `--api` nga bandila kung nagdagan nga stable-diffusion-webui",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "",
 	"Info": "",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "Pagsulod sa input commands",
 	"Input commands": "Pagsulod sa input commands",
 	"Install from Github URL": "",
 	"Install from Github URL": "",
 	"Instant Auto-Send After Voice Transcription": "",
 	"Instant Auto-Send After Voice Transcription": "",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "",
 	"Previous 30 days": "",
 	"Previous 7 days": "",
 	"Previous 7 days": "",
+	"Private": "",
 	"Profile Image": "",
 	"Profile Image": "",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "",
 	"Prompt updated successfully": "",
 	"Prompts": "Mga aghat",
 	"Prompts": "Mga aghat",
 	"Prompts Access": "",
 	"Prompts Access": "",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "",
 	"Pull a model from Ollama.com": "Pagkuha ug template gikan sa Ollama.com",
 	"Pull a model from Ollama.com": "Pagkuha ug template gikan sa Ollama.com",
 	"Query Generation Prompt": "",
 	"Query Generation Prompt": "",
@@ -1009,6 +1013,7 @@
 	"Theme": "Tema",
 	"Theme": "Tema",
 	"Thinking...": "",
 	"Thinking...": "",
 	"This action cannot be undone. Do you wish to continue?": "",
 	"This action cannot be undone. Do you wish to continue?": "",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Kini nagsiguro nga ang imong bililhon nga mga panag-istoryahanay luwas nga natipig sa imong backend database. ",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Kini nagsiguro nga ang imong bililhon nga mga panag-istoryahanay luwas nga natipig sa imong backend database. ",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "",
 	"Valves updated successfully": "",
 	"variable": "variable",
 	"variable": "variable",
 	"variable to have them replaced with clipboard content.": "variable aron pulihan kini sa mga sulud sa clipboard.",
 	"variable to have them replaced with clipboard content.": "variable aron pulihan kini sa mga sulud sa clipboard.",
+	"Verify Connection": "",
 	"Version": "Bersyon",
 	"Version": "Bersyon",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"View Replies": "",
 	"View Replies": "",

+ 6 - 0
src/lib/i18n/locales/cs-CZ/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "Výchozí návrhy promptů",
 	"Default Prompt Suggestions": "Výchozí návrhy promptů",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to ALL": "",
 	"Default to ALL": "",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "Výchozí uživatelská role",
 	"Default User Role": "Výchozí uživatelská role",
 	"Delete": "Smazat",
 	"Delete": "Smazat",
 	"Delete a model": "Odstranit model.",
 	"Delete a model": "Odstranit model.",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "Při spuštění stable-diffusion-webui zahrňte příznak `--api`.",
 	"Include `--api` flag when running stable-diffusion-webui": "Při spuštění stable-diffusion-webui zahrňte příznak `--api`.",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Info",
 	"Info": "Info",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "Vstupní příkazy",
 	"Input commands": "Vstupní příkazy",
 	"Install from Github URL": "Instalace z URL adresy Githubu",
 	"Install from Github URL": "Instalace z URL adresy Githubu",
 	"Instant Auto-Send After Voice Transcription": "Okamžité automatické odeslání po přepisu hlasu",
 	"Instant Auto-Send After Voice Transcription": "Okamžité automatické odeslání po přepisu hlasu",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "Předchozích 30 dnů",
 	"Previous 30 days": "Předchozích 30 dnů",
 	"Previous 7 days": "Předchozích 7 dní",
 	"Previous 7 days": "Předchozích 7 dní",
+	"Private": "",
 	"Profile Image": "Profilový obrázek",
 	"Profile Image": "Profilový obrázek",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (např. Řekni mi zábavný fakt o Římské říši)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (např. Řekni mi zábavný fakt o Římské říši)",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "",
 	"Prompt updated successfully": "",
 	"Prompts": "Prompty",
 	"Prompts": "Prompty",
 	"Prompts Access": "",
 	"Prompts Access": "",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Stáhněte \"{{searchValue}}\" z Ollama.com",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Stáhněte \"{{searchValue}}\" z Ollama.com",
 	"Pull a model from Ollama.com": "Stáhněte model z Ollama.com",
 	"Pull a model from Ollama.com": "Stáhněte model z Ollama.com",
 	"Query Generation Prompt": "",
 	"Query Generation Prompt": "",
@@ -1009,6 +1013,7 @@
 	"Theme": "Téma",
 	"Theme": "Téma",
 	"Thinking...": "Přemýšlím...",
 	"Thinking...": "Přemýšlím...",
 	"This action cannot be undone. Do you wish to continue?": "Tuto akci nelze vrátit zpět. Přejete si pokračovat?",
 	"This action cannot be undone. Do you wish to continue?": "Tuto akci nelze vrátit zpět. Přejete si pokračovat?",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "To zajišťuje, že vaše cenné konverzace jsou bezpečně uloženy ve vaší backendové databázi. Děkujeme!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "To zajišťuje, že vaše cenné konverzace jsou bezpečně uloženy ve vaší backendové databázi. Děkujeme!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Jedná se o experimentální funkci, nemusí fungovat podle očekávání a může být kdykoliv změněna.",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Jedná se o experimentální funkci, nemusí fungovat podle očekávání a může být kdykoliv změněna.",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "Ventily byly úspěšně aktualizovány.",
 	"Valves updated successfully": "Ventily byly úspěšně aktualizovány.",
 	"variable": "proměnná",
 	"variable": "proměnná",
 	"variable to have them replaced with clipboard content.": "proměnnou, aby byl jejich obsah nahrazen obsahem schránky.",
 	"variable to have them replaced with clipboard content.": "proměnnou, aby byl jejich obsah nahrazen obsahem schránky.",
+	"Verify Connection": "",
 	"Version": "Verze",
 	"Version": "Verze",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Verze {{selectedVersion}} z {{totalVersions}}",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Verze {{selectedVersion}} z {{totalVersions}}",
 	"View Replies": "",
 	"View Replies": "",

+ 6 - 0
src/lib/i18n/locales/da-DK/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "Standardforslag til prompt",
 	"Default Prompt Suggestions": "Standardforslag til prompt",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to ALL": "",
 	"Default to ALL": "",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "Brugers rolle som standard",
 	"Default User Role": "Brugers rolle som standard",
 	"Delete": "Slet",
 	"Delete": "Slet",
 	"Delete a model": "Slet en model",
 	"Delete a model": "Slet en model",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "Inkluder `--api` flag, når du kører stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Inkluder `--api` flag, når du kører stable-diffusion-webui",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Info",
 	"Info": "Info",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "Inputkommandoer",
 	"Input commands": "Inputkommandoer",
 	"Install from Github URL": "Installer fra Github URL",
 	"Install from Github URL": "Installer fra Github URL",
 	"Instant Auto-Send After Voice Transcription": "Øjeblikkelig automatisk afsendelse efter stemmetransskription",
 	"Instant Auto-Send After Voice Transcription": "Øjeblikkelig automatisk afsendelse efter stemmetransskription",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "Seneste 30 dage",
 	"Previous 30 days": "Seneste 30 dage",
 	"Previous 7 days": "Seneste 7 dage",
 	"Previous 7 days": "Seneste 7 dage",
+	"Private": "",
 	"Profile Image": "Profilbillede",
 	"Profile Image": "Profilbillede",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (f.eks. Fortæl mig en sjov kendsgerning om Romerriget)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (f.eks. Fortæl mig en sjov kendsgerning om Romerriget)",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "",
 	"Prompt updated successfully": "",
 	"Prompts": "Prompts",
 	"Prompts": "Prompts",
 	"Prompts Access": "",
 	"Prompts Access": "",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Hent \"{{searchValue}}\" fra Ollama.com",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Hent \"{{searchValue}}\" fra Ollama.com",
 	"Pull a model from Ollama.com": "Hent en model fra Ollama.com",
 	"Pull a model from Ollama.com": "Hent en model fra Ollama.com",
 	"Query Generation Prompt": "",
 	"Query Generation Prompt": "",
@@ -1009,6 +1013,7 @@
 	"Theme": "Tema",
 	"Theme": "Tema",
 	"Thinking...": "Tænker...",
 	"Thinking...": "Tænker...",
 	"This action cannot be undone. Do you wish to continue?": "Denne handling kan ikke fortrydes. Vil du fortsætte?",
 	"This action cannot be undone. Do you wish to continue?": "Denne handling kan ikke fortrydes. Vil du fortsætte?",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Dette sikrer, at dine værdifulde samtaler gemmes sikkert i din backend-database. Tak!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Dette sikrer, at dine værdifulde samtaler gemmes sikkert i din backend-database. Tak!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Dette er en eksperimentel funktion, den fungerer muligvis ikke som forventet og kan ændres når som helst.",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Dette er en eksperimentel funktion, den fungerer muligvis ikke som forventet og kan ændres når som helst.",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "Ventiler opdateret.",
 	"Valves updated successfully": "Ventiler opdateret.",
 	"variable": "variabel",
 	"variable": "variabel",
 	"variable to have them replaced with clipboard content.": "variabel for at få dem erstattet med indholdet af udklipsholderen.",
 	"variable to have them replaced with clipboard content.": "variabel for at få dem erstattet med indholdet af udklipsholderen.",
+	"Verify Connection": "",
 	"Version": "Version",
 	"Version": "Version",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Version {{selectedVersion}} af {{totalVersions}}",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Version {{selectedVersion}} af {{totalVersions}}",
 	"View Replies": "",
 	"View Replies": "",

+ 6 - 0
src/lib/i18n/locales/de-DE/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "Prompt-Vorschläge",
 	"Default Prompt Suggestions": "Prompt-Vorschläge",
 	"Default to 389 or 636 if TLS is enabled": "Standardmäßig auf 389 oder 636 setzen, wenn TLS aktiviert ist",
 	"Default to 389 or 636 if TLS is enabled": "Standardmäßig auf 389 oder 636 setzen, wenn TLS aktiviert ist",
 	"Default to ALL": "Standardmäßig auf ALLE setzen",
 	"Default to ALL": "Standardmäßig auf ALLE setzen",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "Standardbenutzerrolle",
 	"Default User Role": "Standardbenutzerrolle",
 	"Delete": "Löschen",
 	"Delete": "Löschen",
 	"Delete a model": "Ein Modell löschen",
 	"Delete a model": "Ein Modell löschen",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "Fügen Sie beim Ausführen von stable-diffusion-webui die Option `--api` hinzu",
 	"Include `--api` flag when running stable-diffusion-webui": "Fügen Sie beim Ausführen von stable-diffusion-webui die Option `--api` hinzu",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Info",
 	"Info": "Info",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "Eingabebefehle",
 	"Input commands": "Eingabebefehle",
 	"Install from Github URL": "Installiere von der Github-URL",
 	"Install from Github URL": "Installiere von der Github-URL",
 	"Instant Auto-Send After Voice Transcription": "Spracherkennung direkt absenden",
 	"Instant Auto-Send After Voice Transcription": "Spracherkennung direkt absenden",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "Vorherige 30 Tage",
 	"Previous 30 days": "Vorherige 30 Tage",
 	"Previous 7 days": "Vorherige 7 Tage",
 	"Previous 7 days": "Vorherige 7 Tage",
+	"Private": "",
 	"Profile Image": "Profilbild",
 	"Profile Image": "Profilbild",
 	"Prompt": "Prompt",
 	"Prompt": "Prompt",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (z. B. \"Erzähle mir eine interessante Tatsache über das Römische Reich\")",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (z. B. \"Erzähle mir eine interessante Tatsache über das Römische Reich\")",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "Prompt erfolgreich aktualisiert",
 	"Prompt updated successfully": "Prompt erfolgreich aktualisiert",
 	"Prompts": "Prompts",
 	"Prompts": "Prompts",
 	"Prompts Access": "Prompt-Zugriff",
 	"Prompts Access": "Prompt-Zugriff",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "\"{{searchValue}}\" von Ollama.com beziehen",
 	"Pull \"{{searchValue}}\" from Ollama.com": "\"{{searchValue}}\" von Ollama.com beziehen",
 	"Pull a model from Ollama.com": "Modell von Ollama.com beziehen",
 	"Pull a model from Ollama.com": "Modell von Ollama.com beziehen",
 	"Query Generation Prompt": "Abfragegenerierungsprompt",
 	"Query Generation Prompt": "Abfragegenerierungsprompt",
@@ -1009,6 +1013,7 @@
 	"Theme": "Design",
 	"Theme": "Design",
 	"Thinking...": "Denke nach...",
 	"Thinking...": "Denke nach...",
 	"This action cannot be undone. Do you wish to continue?": "Diese Aktion kann nicht rückgängig gemacht werden. Möchten Sie fortfahren?",
 	"This action cannot be undone. Do you wish to continue?": "Diese Aktion kann nicht rückgängig gemacht werden. Möchten Sie fortfahren?",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Dies stellt sicher, dass Ihre wertvollen Unterhaltungen sicher in Ihrer Backend-Datenbank gespeichert werden. Vielen Dank!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Dies stellt sicher, dass Ihre wertvollen Unterhaltungen sicher in Ihrer Backend-Datenbank gespeichert werden. Vielen Dank!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Dies ist eine experimentelle Funktion, sie funktioniert möglicherweise nicht wie erwartet und kann jederzeit geändert werden.",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Dies ist eine experimentelle Funktion, sie funktioniert möglicherweise nicht wie erwartet und kann jederzeit geändert werden.",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "Valves erfolgreich aktualisiert",
 	"Valves updated successfully": "Valves erfolgreich aktualisiert",
 	"variable": "Variable",
 	"variable": "Variable",
 	"variable to have them replaced with clipboard content.": "Variable, um den Inhalt der Zwischenablage beim Nutzen des Prompts zu ersetzen.",
 	"variable to have them replaced with clipboard content.": "Variable, um den Inhalt der Zwischenablage beim Nutzen des Prompts zu ersetzen.",
+	"Verify Connection": "",
 	"Version": "Version",
 	"Version": "Version",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Version {{selectedVersion}} von {{totalVersions}}",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Version {{selectedVersion}} von {{totalVersions}}",
 	"View Replies": "Antworten anzeigen",
 	"View Replies": "Antworten anzeigen",

+ 6 - 0
src/lib/i18n/locales/dg-DG/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "Default Prompt Suggestions",
 	"Default Prompt Suggestions": "Default Prompt Suggestions",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to ALL": "",
 	"Default to ALL": "",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "Default User Role",
 	"Default User Role": "Default User Role",
 	"Delete": "",
 	"Delete": "",
 	"Delete a model": "Delete a model",
 	"Delete a model": "Delete a model",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "Include `--api` flag when running stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Include `--api` flag when running stable-diffusion-webui",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "",
 	"Info": "",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "Input commands",
 	"Input commands": "Input commands",
 	"Install from Github URL": "",
 	"Install from Github URL": "",
 	"Instant Auto-Send After Voice Transcription": "",
 	"Instant Auto-Send After Voice Transcription": "",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "",
 	"Previous 30 days": "",
 	"Previous 7 days": "",
 	"Previous 7 days": "",
+	"Private": "",
 	"Profile Image": "",
 	"Profile Image": "",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "",
 	"Prompt updated successfully": "",
 	"Prompts": "Promptos",
 	"Prompts": "Promptos",
 	"Prompts Access": "",
 	"Prompts Access": "",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "",
 	"Pull a model from Ollama.com": "Pull a wowdel from Ollama.com",
 	"Pull a model from Ollama.com": "Pull a wowdel from Ollama.com",
 	"Query Generation Prompt": "",
 	"Query Generation Prompt": "",
@@ -1009,6 +1013,7 @@
 	"Theme": "Theme much theme",
 	"Theme": "Theme much theme",
 	"Thinking...": "",
 	"Thinking...": "",
 	"This action cannot be undone. Do you wish to continue?": "",
 	"This action cannot be undone. Do you wish to continue?": "",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "This ensures that your valuable conversations are securely saved to your backend database. Thank you! Much secure!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "This ensures that your valuable conversations are securely saved to your backend database. Thank you! Much secure!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "",
 	"Valves updated successfully": "",
 	"variable": "variable very variable",
 	"variable": "variable very variable",
 	"variable to have them replaced with clipboard content.": "variable to have them replaced with clipboard content. Very replace.",
 	"variable to have them replaced with clipboard content.": "variable to have them replaced with clipboard content. Very replace.",
+	"Verify Connection": "",
 	"Version": "Version much version",
 	"Version": "Version much version",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"View Replies": "",
 	"View Replies": "",

+ 6 - 0
src/lib/i18n/locales/el-GR/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "Προεπιλεγμένες Προτάσεις Προτροπής",
 	"Default Prompt Suggestions": "Προεπιλεγμένες Προτάσεις Προτροπής",
 	"Default to 389 or 636 if TLS is enabled": "Προεπιλογή στο 389 ή 636 εάν είναι ενεργοποιημένο το TLS",
 	"Default to 389 or 636 if TLS is enabled": "Προεπιλογή στο 389 ή 636 εάν είναι ενεργοποιημένο το TLS",
 	"Default to ALL": "Προεπιλογή σε ΟΛΑ",
 	"Default to ALL": "Προεπιλογή σε ΟΛΑ",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "Προεπιλεγμένος Ρόλος Χρήστη",
 	"Default User Role": "Προεπιλεγμένος Ρόλος Χρήστη",
 	"Delete": "Διαγραφή",
 	"Delete": "Διαγραφή",
 	"Delete a model": "Διαγραφή ενός μοντέλου",
 	"Delete a model": "Διαγραφή ενός μοντέλου",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "Συμπεριλάβετε το flag `--api` όταν τρέχετε το stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Συμπεριλάβετε το flag `--api` όταν τρέχετε το stable-diffusion-webui",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Πληροφορίες",
 	"Info": "Πληροφορίες",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "Εισαγωγή εντολών",
 	"Input commands": "Εισαγωγή εντολών",
 	"Install from Github URL": "Εγκατάσταση από URL Github",
 	"Install from Github URL": "Εγκατάσταση από URL Github",
 	"Instant Auto-Send After Voice Transcription": "Άμεση Αυτόματη Αποστολή μετά τη μεταγραφή φωνής",
 	"Instant Auto-Send After Voice Transcription": "Άμεση Αυτόματη Αποστολή μετά τη μεταγραφή φωνής",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "Προηγούμενες 30 ημέρες",
 	"Previous 30 days": "Προηγούμενες 30 ημέρες",
 	"Previous 7 days": "Προηγούμενες 7 ημέρες",
 	"Previous 7 days": "Προηγούμενες 7 ημέρες",
+	"Private": "",
 	"Profile Image": "Εικόνα Προφίλ",
 	"Profile Image": "Εικόνα Προφίλ",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Προτροπή (π.χ. Πες μου ένα διασκεδαστικό γεγονός για την Ρωμαϊκή Αυτοκρατορία)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Προτροπή (π.χ. Πες μου ένα διασκεδαστικό γεγονός για την Ρωμαϊκή Αυτοκρατορία)",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "Η προτροπή ενημερώθηκε με επιτυχία",
 	"Prompt updated successfully": "Η προτροπή ενημερώθηκε με επιτυχία",
 	"Prompts": "Προτροπές",
 	"Prompts": "Προτροπές",
 	"Prompts Access": "Πρόσβαση Προτροπών",
 	"Prompts Access": "Πρόσβαση Προτροπών",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Τραβήξτε \"{{searchValue}}\" από το Ollama.com",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Τραβήξτε \"{{searchValue}}\" από το Ollama.com",
 	"Pull a model from Ollama.com": "Τραβήξτε ένα μοντέλο από το Ollama.com",
 	"Pull a model from Ollama.com": "Τραβήξτε ένα μοντέλο από το Ollama.com",
 	"Query Generation Prompt": "Προτροπή Δημιουργίας Ερωτήσεων",
 	"Query Generation Prompt": "Προτροπή Δημιουργίας Ερωτήσεων",
@@ -1009,6 +1013,7 @@
 	"Theme": "Θέμα",
 	"Theme": "Θέμα",
 	"Thinking...": "Σκέφτομαι...",
 	"Thinking...": "Σκέφτομαι...",
 	"This action cannot be undone. Do you wish to continue?": "Αυτή η ενέργεια δεν μπορεί να αναιρεθεί. Θέλετε να συνεχίσετε;",
 	"This action cannot be undone. Do you wish to continue?": "Αυτή η ενέργεια δεν μπορεί να αναιρεθεί. Θέλετε να συνεχίσετε;",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Αυτό διασφαλίζει ότι οι πολύτιμες συνομιλίες σας αποθηκεύονται με ασφάλεια στη βάση δεδομένων backend σας. Ευχαριστούμε!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Αυτό διασφαλίζει ότι οι πολύτιμες συνομιλίες σας αποθηκεύονται με ασφάλεια στη βάση δεδομένων backend σας. Ευχαριστούμε!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Αυτή είναι μια πειραματική λειτουργία, μπορεί να μην λειτουργεί όπως αναμένεται και υπόκειται σε αλλαγές οποιαδήποτε στιγμή.",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Αυτή είναι μια πειραματική λειτουργία, μπορεί να μην λειτουργεί όπως αναμένεται και υπόκειται σε αλλαγές οποιαδήποτε στιγμή.",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "Οι βαλβίδες ενημερώθηκαν με επιτυχία",
 	"Valves updated successfully": "Οι βαλβίδες ενημερώθηκαν με επιτυχία",
 	"variable": "μεταβλητή",
 	"variable": "μεταβλητή",
 	"variable to have them replaced with clipboard content.": "μεταβλητή να αντικατασταθούν με το περιεχόμενο του πρόχειρου.",
 	"variable to have them replaced with clipboard content.": "μεταβλητή να αντικατασταθούν με το περιεχόμενο του πρόχειρου.",
+	"Verify Connection": "",
 	"Version": "Έκδοση",
 	"Version": "Έκδοση",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Έκδοση {{selectedVersion}} από {{totalVersions}}",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Έκδοση {{selectedVersion}} από {{totalVersions}}",
 	"View Replies": "",
 	"View Replies": "",

+ 6 - 0
src/lib/i18n/locales/en-GB/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "",
 	"Default Prompt Suggestions": "",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to ALL": "",
 	"Default to ALL": "",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "",
 	"Default User Role": "",
 	"Delete": "",
 	"Delete": "",
 	"Delete a model": "",
 	"Delete a model": "",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "",
 	"Include `--api` flag when running stable-diffusion-webui": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "",
 	"Info": "",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "",
 	"Input commands": "",
 	"Install from Github URL": "",
 	"Install from Github URL": "",
 	"Instant Auto-Send After Voice Transcription": "",
 	"Instant Auto-Send After Voice Transcription": "",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "",
 	"Previous 30 days": "",
 	"Previous 7 days": "",
 	"Previous 7 days": "",
+	"Private": "",
 	"Profile Image": "",
 	"Profile Image": "",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "",
 	"Prompt updated successfully": "",
 	"Prompts": "",
 	"Prompts": "",
 	"Prompts Access": "",
 	"Prompts Access": "",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "",
 	"Pull a model from Ollama.com": "",
 	"Pull a model from Ollama.com": "",
 	"Query Generation Prompt": "",
 	"Query Generation Prompt": "",
@@ -1009,6 +1013,7 @@
 	"Theme": "",
 	"Theme": "",
 	"Thinking...": "",
 	"Thinking...": "",
 	"This action cannot be undone. Do you wish to continue?": "",
 	"This action cannot be undone. Do you wish to continue?": "",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "",
 	"Valves updated successfully": "",
 	"variable": "",
 	"variable": "",
 	"variable to have them replaced with clipboard content.": "",
 	"variable to have them replaced with clipboard content.": "",
+	"Verify Connection": "",
 	"Version": "",
 	"Version": "",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"View Replies": "",
 	"View Replies": "",

+ 6 - 0
src/lib/i18n/locales/en-US/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "",
 	"Default Prompt Suggestions": "",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to ALL": "",
 	"Default to ALL": "",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "",
 	"Default User Role": "",
 	"Delete": "",
 	"Delete": "",
 	"Delete a model": "",
 	"Delete a model": "",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "",
 	"Include `--api` flag when running stable-diffusion-webui": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "",
 	"Info": "",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "",
 	"Input commands": "",
 	"Install from Github URL": "",
 	"Install from Github URL": "",
 	"Instant Auto-Send After Voice Transcription": "",
 	"Instant Auto-Send After Voice Transcription": "",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "",
 	"Previous 30 days": "",
 	"Previous 7 days": "",
 	"Previous 7 days": "",
+	"Private": "",
 	"Profile Image": "",
 	"Profile Image": "",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "",
 	"Prompt updated successfully": "",
 	"Prompts": "",
 	"Prompts": "",
 	"Prompts Access": "",
 	"Prompts Access": "",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "",
 	"Pull a model from Ollama.com": "",
 	"Pull a model from Ollama.com": "",
 	"Query Generation Prompt": "",
 	"Query Generation Prompt": "",
@@ -1009,6 +1013,7 @@
 	"Theme": "",
 	"Theme": "",
 	"Thinking...": "",
 	"Thinking...": "",
 	"This action cannot be undone. Do you wish to continue?": "",
 	"This action cannot be undone. Do you wish to continue?": "",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "",
 	"Valves updated successfully": "",
 	"variable": "",
 	"variable": "",
 	"variable to have them replaced with clipboard content.": "",
 	"variable to have them replaced with clipboard content.": "",
+	"Verify Connection": "",
 	"Version": "",
 	"Version": "",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"View Replies": "",
 	"View Replies": "",

+ 6 - 0
src/lib/i18n/locales/es-ES/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "Sugerencias de mensajes por defecto",
 	"Default Prompt Suggestions": "Sugerencias de mensajes por defecto",
 	"Default to 389 or 636 if TLS is enabled": "Predeterminado a 389 o 636 si TLS está habilitado",
 	"Default to 389 or 636 if TLS is enabled": "Predeterminado a 389 o 636 si TLS está habilitado",
 	"Default to ALL": "Predeterminado a TODOS",
 	"Default to ALL": "Predeterminado a TODOS",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "Rol por defecto para usuarios",
 	"Default User Role": "Rol por defecto para usuarios",
 	"Delete": "Borrar",
 	"Delete": "Borrar",
 	"Delete a model": "Borra un modelo",
 	"Delete a model": "Borra un modelo",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "Incluir el indicador `--api` al ejecutar stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Incluir el indicador `--api` al ejecutar stable-diffusion-webui",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Información",
 	"Info": "Información",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "Ingresar comandos",
 	"Input commands": "Ingresar comandos",
 	"Install from Github URL": "Instalar desde la URL de Github",
 	"Install from Github URL": "Instalar desde la URL de Github",
 	"Instant Auto-Send After Voice Transcription": "Auto-Enviar Después de la Transcripción de Voz",
 	"Instant Auto-Send After Voice Transcription": "Auto-Enviar Después de la Transcripción de Voz",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "Últimos 30 días",
 	"Previous 30 days": "Últimos 30 días",
 	"Previous 7 days": "Últimos 7 días",
 	"Previous 7 days": "Últimos 7 días",
+	"Private": "",
 	"Profile Image": "Imagen de perfil",
 	"Profile Image": "Imagen de perfil",
 	"Prompt": "Prompt",
 	"Prompt": "Prompt",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (por ejemplo, cuéntame una cosa divertida sobre el Imperio Romano)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (por ejemplo, cuéntame una cosa divertida sobre el Imperio Romano)",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "Prompt actualizado exitosamente",
 	"Prompt updated successfully": "Prompt actualizado exitosamente",
 	"Prompts": "Prompts",
 	"Prompts": "Prompts",
 	"Prompts Access": "Acceso a Prompts",
 	"Prompts Access": "Acceso a Prompts",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Extraer \"{{searchValue}}\" de Ollama.com",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Extraer \"{{searchValue}}\" de Ollama.com",
 	"Pull a model from Ollama.com": "Obtener un modelo de Ollama.com",
 	"Pull a model from Ollama.com": "Obtener un modelo de Ollama.com",
 	"Query Generation Prompt": "Prompt de generación de consulta",
 	"Query Generation Prompt": "Prompt de generación de consulta",
@@ -1009,6 +1013,7 @@
 	"Theme": "Tema",
 	"Theme": "Tema",
 	"Thinking...": "Pensando...",
 	"Thinking...": "Pensando...",
 	"This action cannot be undone. Do you wish to continue?": "Esta acción no se puede deshacer. ¿Desea continuar?",
 	"This action cannot be undone. Do you wish to continue?": "Esta acción no se puede deshacer. ¿Desea continuar?",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Esto garantiza que sus valiosas conversaciones se guarden de forma segura en su base de datos en el backend. ¡Gracias!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Esto garantiza que sus valiosas conversaciones se guarden de forma segura en su base de datos en el backend. ¡Gracias!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Esta es una característica experimental que puede no funcionar como se esperaba y está sujeto a cambios en cualquier momento.",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Esta es una característica experimental que puede no funcionar como se esperaba y está sujeto a cambios en cualquier momento.",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "Valves actualizados con éxito",
 	"Valves updated successfully": "Valves actualizados con éxito",
 	"variable": "variable",
 	"variable": "variable",
 	"variable to have them replaced with clipboard content.": "variable para reemplazarlos con el contenido del portapapeles.",
 	"variable to have them replaced with clipboard content.": "variable para reemplazarlos con el contenido del portapapeles.",
+	"Verify Connection": "",
 	"Version": "Versión",
 	"Version": "Versión",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Versión {{selectedVersion}} de {{totalVersions}}",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Versión {{selectedVersion}} de {{totalVersions}}",
 	"View Replies": "Ver respuestas",
 	"View Replies": "Ver respuestas",

+ 6 - 0
src/lib/i18n/locales/eu-ES/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "Prompt Iradokizun Lehenetsiak",
 	"Default Prompt Suggestions": "Prompt Iradokizun Lehenetsiak",
 	"Default to 389 or 636 if TLS is enabled": "Lehenetsi 389 edo 636 TLS gaituta badago",
 	"Default to 389 or 636 if TLS is enabled": "Lehenetsi 389 edo 636 TLS gaituta badago",
 	"Default to ALL": "Lehenetsi GUZTIAK",
 	"Default to ALL": "Lehenetsi GUZTIAK",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "Erabiltzaile Rol Lehenetsia",
 	"Default User Role": "Erabiltzaile Rol Lehenetsia",
 	"Delete": "Ezabatu",
 	"Delete": "Ezabatu",
 	"Delete a model": "Ezabatu eredu bat",
 	"Delete a model": "Ezabatu eredu bat",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "Sartu `--api` bandera stable-diffusion-webui exekutatzean",
 	"Include `--api` flag when running stable-diffusion-webui": "Sartu `--api` bandera stable-diffusion-webui exekutatzean",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Informazioa",
 	"Info": "Informazioa",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "Sartu komandoak",
 	"Input commands": "Sartu komandoak",
 	"Install from Github URL": "Instalatu Github URLtik",
 	"Install from Github URL": "Instalatu Github URLtik",
 	"Instant Auto-Send After Voice Transcription": "Bidalketa Automatiko Berehalakoa Ahots Transkripzioaren Ondoren",
 	"Instant Auto-Send After Voice Transcription": "Bidalketa Automatiko Berehalakoa Ahots Transkripzioaren Ondoren",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "Aurreko 30 egunak",
 	"Previous 30 days": "Aurreko 30 egunak",
 	"Previous 7 days": "Aurreko 7 egunak",
 	"Previous 7 days": "Aurreko 7 egunak",
+	"Private": "",
 	"Profile Image": "Profil irudia",
 	"Profile Image": "Profil irudia",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt-a (adib. Kontatu datu dibertigarri bat Erromatar Inperioari buruz)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt-a (adib. Kontatu datu dibertigarri bat Erromatar Inperioari buruz)",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "Prompt-a ongi eguneratu da",
 	"Prompt updated successfully": "Prompt-a ongi eguneratu da",
 	"Prompts": "Prompt-ak",
 	"Prompts": "Prompt-ak",
 	"Prompts Access": "Prompt sarbidea",
 	"Prompts Access": "Prompt sarbidea",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Ekarri \"{{searchValue}}\" Ollama.com-etik",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Ekarri \"{{searchValue}}\" Ollama.com-etik",
 	"Pull a model from Ollama.com": "Ekarri modelo bat Ollama.com-etik",
 	"Pull a model from Ollama.com": "Ekarri modelo bat Ollama.com-etik",
 	"Query Generation Prompt": "Kontsulta sortzeko prompt-a",
 	"Query Generation Prompt": "Kontsulta sortzeko prompt-a",
@@ -1009,6 +1013,7 @@
 	"Theme": "Gaia",
 	"Theme": "Gaia",
 	"Thinking...": "Pentsatzen...",
 	"Thinking...": "Pentsatzen...",
 	"This action cannot be undone. Do you wish to continue?": "Ekintza hau ezin da desegin. Jarraitu nahi duzu?",
 	"This action cannot be undone. Do you wish to continue?": "Ekintza hau ezin da desegin. Jarraitu nahi duzu?",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Honek zure elkarrizketa baliotsuak modu seguruan zure backend datu-basean gordeko direla ziurtatzen du. Eskerrik asko!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Honek zure elkarrizketa baliotsuak modu seguruan zure backend datu-basean gordeko direla ziurtatzen du. Eskerrik asko!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Hau funtzionalitate esperimental bat da, baliteke espero bezala ez funtzionatzea eta edozein unetan aldaketak izatea.",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Hau funtzionalitate esperimental bat da, baliteke espero bezala ez funtzionatzea eta edozein unetan aldaketak izatea.",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "Balbulak ongi eguneratu dira",
 	"Valves updated successfully": "Balbulak ongi eguneratu dira",
 	"variable": "aldagaia",
 	"variable": "aldagaia",
 	"variable to have them replaced with clipboard content.": "aldagaia arbeleko edukiarekin ordezkatzeko.",
 	"variable to have them replaced with clipboard content.": "aldagaia arbeleko edukiarekin ordezkatzeko.",
+	"Verify Connection": "",
 	"Version": "Bertsioa",
 	"Version": "Bertsioa",
 	"Version {{selectedVersion}} of {{totalVersions}}": "{{totalVersions}}-tik {{selectedVersion}}. bertsioa",
 	"Version {{selectedVersion}} of {{totalVersions}}": "{{totalVersions}}-tik {{selectedVersion}}. bertsioa",
 	"View Replies": "",
 	"View Replies": "",

+ 6 - 0
src/lib/i18n/locales/fa-IR/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "پیشنهادات پرامپت پیش فرض",
 	"Default Prompt Suggestions": "پیشنهادات پرامپت پیش فرض",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to ALL": "",
 	"Default to ALL": "",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "نقش کاربر پیش فرض",
 	"Default User Role": "نقش کاربر پیش فرض",
 	"Delete": "حذف",
 	"Delete": "حذف",
 	"Delete a model": "حذف یک مدل",
 	"Delete a model": "حذف یک مدل",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "فلگ `--api` را هنکام اجرای stable-diffusion-webui استفاده کنید.",
 	"Include `--api` flag when running stable-diffusion-webui": "فلگ `--api` را هنکام اجرای stable-diffusion-webui استفاده کنید.",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "اطلاعات",
 	"Info": "اطلاعات",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "ورودی دستورات",
 	"Input commands": "ورودی دستورات",
 	"Install from Github URL": "نصب از ادرس Github",
 	"Install from Github URL": "نصب از ادرس Github",
 	"Instant Auto-Send After Voice Transcription": "",
 	"Instant Auto-Send After Voice Transcription": "",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "30 روز قبل",
 	"Previous 30 days": "30 روز قبل",
 	"Previous 7 days": "7 روز قبل",
 	"Previous 7 days": "7 روز قبل",
+	"Private": "",
 	"Profile Image": "تصویر پروفایل",
 	"Profile Image": "تصویر پروفایل",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "پیشنهاد (برای مثال: به من بگوید چیزی که برای من یک کاربرد داره درباره ایران)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "پیشنهاد (برای مثال: به من بگوید چیزی که برای من یک کاربرد داره درباره ایران)",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "",
 	"Prompt updated successfully": "",
 	"Prompts": "پرامپت\u200cها",
 	"Prompts": "پرامپت\u200cها",
 	"Prompts Access": "",
 	"Prompts Access": "",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "بازگرداندن \"{{searchValue}}\" از Ollama.com",
 	"Pull \"{{searchValue}}\" from Ollama.com": "بازگرداندن \"{{searchValue}}\" از Ollama.com",
 	"Pull a model from Ollama.com": "دریافت یک مدل از Ollama.com",
 	"Pull a model from Ollama.com": "دریافت یک مدل از Ollama.com",
 	"Query Generation Prompt": "",
 	"Query Generation Prompt": "",
@@ -1009,6 +1013,7 @@
 	"Theme": "پوسته",
 	"Theme": "پوسته",
 	"Thinking...": "در حال فکر...",
 	"Thinking...": "در حال فکر...",
 	"This action cannot be undone. Do you wish to continue?": "این اقدام قابل بازگردانی نیست. برای ادامه اطمینان دارید؟",
 	"This action cannot be undone. Do you wish to continue?": "این اقدام قابل بازگردانی نیست. برای ادامه اطمینان دارید؟",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "این تضمین می کند که مکالمات ارزشمند شما به طور ایمن در پایگاه داده بکند ذخیره می شود. تشکر!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "این تضمین می کند که مکالمات ارزشمند شما به طور ایمن در پایگاه داده بکند ذخیره می شود. تشکر!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "",
 	"Valves updated successfully": "",
 	"variable": "متغیر",
 	"variable": "متغیر",
 	"variable to have them replaced with clipboard content.": "متغیر برای جایگزینی آنها با محتوای بریده\u200cدان.",
 	"variable to have them replaced with clipboard content.": "متغیر برای جایگزینی آنها با محتوای بریده\u200cدان.",
+	"Verify Connection": "",
 	"Version": "نسخه",
 	"Version": "نسخه",
 	"Version {{selectedVersion}} of {{totalVersions}}": "نسخهٔ {{selectedVersion}} از {{totalVersions}}",
 	"Version {{selectedVersion}} of {{totalVersions}}": "نسخهٔ {{selectedVersion}} از {{totalVersions}}",
 	"View Replies": "",
 	"View Replies": "",

+ 6 - 0
src/lib/i18n/locales/fi-FI/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "Oletuskehotteiden ehdotukset",
 	"Default Prompt Suggestions": "Oletuskehotteiden ehdotukset",
 	"Default to 389 or 636 if TLS is enabled": "Oletus 389 tai 636, jos TLS on käytössä",
 	"Default to 389 or 636 if TLS is enabled": "Oletus 389 tai 636, jos TLS on käytössä",
 	"Default to ALL": "Oletus KAIKKI",
 	"Default to ALL": "Oletus KAIKKI",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "Oletuskäyttäjärooli",
 	"Default User Role": "Oletuskäyttäjärooli",
 	"Delete": "Poista",
 	"Delete": "Poista",
 	"Delete a model": "Poista malli",
 	"Delete a model": "Poista malli",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "Sisällytä `--api`-lippu ajettaessa stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Sisällytä `--api`-lippu ajettaessa stable-diffusion-webui",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Tiedot",
 	"Info": "Tiedot",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "Syötekäskyt",
 	"Input commands": "Syötekäskyt",
 	"Install from Github URL": "Asenna Github-URL:stä",
 	"Install from Github URL": "Asenna Github-URL:stä",
 	"Instant Auto-Send After Voice Transcription": "Heti automaattinen lähetys äänitunnistuksen jälkeen",
 	"Instant Auto-Send After Voice Transcription": "Heti automaattinen lähetys äänitunnistuksen jälkeen",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "Edelliset 30 päivää",
 	"Previous 30 days": "Edelliset 30 päivää",
 	"Previous 7 days": "Edelliset 7 päivää",
 	"Previous 7 days": "Edelliset 7 päivää",
+	"Private": "",
 	"Profile Image": "Profiilikuva",
 	"Profile Image": "Profiilikuva",
 	"Prompt": "Kehote",
 	"Prompt": "Kehote",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Kehote (esim. Kerro hauska fakta Rooman valtakunnasta)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Kehote (esim. Kerro hauska fakta Rooman valtakunnasta)",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "Kehote päivitetty onnistuneesti",
 	"Prompt updated successfully": "Kehote päivitetty onnistuneesti",
 	"Prompts": "Kehotteet",
 	"Prompts": "Kehotteet",
 	"Prompts Access": "Kehoitteiden käyttöoikeudet",
 	"Prompts Access": "Kehoitteiden käyttöoikeudet",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Lataa \"{{searchValue}}\" Ollama.comista",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Lataa \"{{searchValue}}\" Ollama.comista",
 	"Pull a model from Ollama.com": "Lataa malli Ollama.comista",
 	"Pull a model from Ollama.com": "Lataa malli Ollama.comista",
 	"Query Generation Prompt": "Kyselytulosten luontikehote",
 	"Query Generation Prompt": "Kyselytulosten luontikehote",
@@ -1009,6 +1013,7 @@
 	"Theme": "Teema",
 	"Theme": "Teema",
 	"Thinking...": "Ajattelee...",
 	"Thinking...": "Ajattelee...",
 	"This action cannot be undone. Do you wish to continue?": "Tätä toimintoa ei voi peruuttaa. Haluatko jatkaa?",
 	"This action cannot be undone. Do you wish to continue?": "Tätä toimintoa ei voi peruuttaa. Haluatko jatkaa?",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Tämä varmistaa, että arvokkaat keskustelusi tallennetaan turvallisesti backend-tietokantaasi. Kiitos!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Tämä varmistaa, että arvokkaat keskustelusi tallennetaan turvallisesti backend-tietokantaasi. Kiitos!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Tämä on kokeellinen ominaisuus, se ei välttämättä toimi odotetulla tavalla ja se voi muuttua milloin tahansa.",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Tämä on kokeellinen ominaisuus, se ei välttämättä toimi odotetulla tavalla ja se voi muuttua milloin tahansa.",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "Venttiilit päivitetty onnistuneesti",
 	"Valves updated successfully": "Venttiilit päivitetty onnistuneesti",
 	"variable": "muuttuja",
 	"variable": "muuttuja",
 	"variable to have them replaced with clipboard content.": "muuttuja korvataan leikepöydän sisällöllä.",
 	"variable to have them replaced with clipboard content.": "muuttuja korvataan leikepöydän sisällöllä.",
+	"Verify Connection": "",
 	"Version": "Versio",
 	"Version": "Versio",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Versio {{selectedVersion}} / {{totalVersions}}",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Versio {{selectedVersion}} / {{totalVersions}}",
 	"View Replies": "Näytä vastaukset",
 	"View Replies": "Näytä vastaukset",

+ 6 - 0
src/lib/i18n/locales/fr-CA/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "Suggestions de prompts par défaut",
 	"Default Prompt Suggestions": "Suggestions de prompts par défaut",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to ALL": "",
 	"Default to ALL": "",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "Rôle utilisateur par défaut",
 	"Default User Role": "Rôle utilisateur par défaut",
 	"Delete": "Supprimer",
 	"Delete": "Supprimer",
 	"Delete a model": "Supprimer un modèle",
 	"Delete a model": "Supprimer un modèle",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "Inclure le drapeau `--api` lorsque vous exécutez stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Inclure le drapeau `--api` lorsque vous exécutez stable-diffusion-webui",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Info",
 	"Info": "Info",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "Entrez les commandes",
 	"Input commands": "Entrez les commandes",
 	"Install from Github URL": "Installer depuis l'URL GitHub",
 	"Install from Github URL": "Installer depuis l'URL GitHub",
 	"Instant Auto-Send After Voice Transcription": "Envoi automatique instantané après transcription vocale",
 	"Instant Auto-Send After Voice Transcription": "Envoi automatique instantané après transcription vocale",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "30 derniers jours",
 	"Previous 30 days": "30 derniers jours",
 	"Previous 7 days": "7 derniers jours",
 	"Previous 7 days": "7 derniers jours",
+	"Private": "",
 	"Profile Image": "Image de profil",
 	"Profile Image": "Image de profil",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (par ex. Dites-moi un fait amusant à propos de l'Empire romain)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (par ex. Dites-moi un fait amusant à propos de l'Empire romain)",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "",
 	"Prompt updated successfully": "",
 	"Prompts": "Prompts",
 	"Prompts": "Prompts",
 	"Prompts Access": "",
 	"Prompts Access": "",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Récupérer « {{searchValue}} » depuis Ollama.com",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Récupérer « {{searchValue}} » depuis Ollama.com",
 	"Pull a model from Ollama.com": "Télécharger un modèle depuis Ollama.com",
 	"Pull a model from Ollama.com": "Télécharger un modèle depuis Ollama.com",
 	"Query Generation Prompt": "",
 	"Query Generation Prompt": "",
@@ -1009,6 +1013,7 @@
 	"Theme": "Thème",
 	"Theme": "Thème",
 	"Thinking...": "En train de réfléchir...",
 	"Thinking...": "En train de réfléchir...",
 	"This action cannot be undone. Do you wish to continue?": "Cette action ne peut pas être annulée. Souhaitez-vous continuer ?",
 	"This action cannot be undone. Do you wish to continue?": "Cette action ne peut pas être annulée. Souhaitez-vous continuer ?",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Cela garantit que vos conversations précieuses soient sauvegardées en toute sécurité dans votre base de données backend. Merci !",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Cela garantit que vos conversations précieuses soient sauvegardées en toute sécurité dans votre base de données backend. Merci !",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Il s'agit d'une fonctionnalité expérimentale, elle peut ne pas fonctionner comme prévu et est sujette à modification à tout moment.",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Il s'agit d'une fonctionnalité expérimentale, elle peut ne pas fonctionner comme prévu et est sujette à modification à tout moment.",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "Les vannes ont été mises à jour avec succès",
 	"Valves updated successfully": "Les vannes ont été mises à jour avec succès",
 	"variable": "variable",
 	"variable": "variable",
 	"variable to have them replaced with clipboard content.": "variable pour qu'elles soient remplacées par le contenu du presse-papiers.",
 	"variable to have them replaced with clipboard content.": "variable pour qu'elles soient remplacées par le contenu du presse-papiers.",
+	"Verify Connection": "",
 	"Version": "Version améliorée",
 	"Version": "Version améliorée",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"View Replies": "",
 	"View Replies": "",

+ 6 - 0
src/lib/i18n/locales/fr-FR/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "Suggestions de prompts par défaut",
 	"Default Prompt Suggestions": "Suggestions de prompts par défaut",
 	"Default to 389 or 636 if TLS is enabled": "Par défaut à 389 ou 636 si TLS est activé",
 	"Default to 389 or 636 if TLS is enabled": "Par défaut à 389 ou 636 si TLS est activé",
 	"Default to ALL": "Par défaut à TOUS",
 	"Default to ALL": "Par défaut à TOUS",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "Rôle utilisateur par défaut",
 	"Default User Role": "Rôle utilisateur par défaut",
 	"Delete": "Supprimer",
 	"Delete": "Supprimer",
 	"Delete a model": "Supprimer un modèle",
 	"Delete a model": "Supprimer un modèle",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "Inclure le drapeau `--api` lorsque vous exécutez stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Inclure le drapeau `--api` lorsque vous exécutez stable-diffusion-webui",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Info",
 	"Info": "Info",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "Commandes d'entrée",
 	"Input commands": "Commandes d'entrée",
 	"Install from Github URL": "Installer depuis une URL GitHub",
 	"Install from Github URL": "Installer depuis une URL GitHub",
 	"Instant Auto-Send After Voice Transcription": "Envoi automatique après la transcription",
 	"Instant Auto-Send After Voice Transcription": "Envoi automatique après la transcription",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "Pénalité de présence",
 	"Presence Penalty": "Pénalité de présence",
 	"Previous 30 days": "30 derniers jours",
 	"Previous 30 days": "30 derniers jours",
 	"Previous 7 days": "7 derniers jours",
 	"Previous 7 days": "7 derniers jours",
+	"Private": "",
 	"Profile Image": "Image de profil",
 	"Profile Image": "Image de profil",
 	"Prompt": "Prompt",
 	"Prompt": "Prompt",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (par ex. Dites-moi un fait amusant à propos de l'Empire romain)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (par ex. Dites-moi un fait amusant à propos de l'Empire romain)",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "Prompt mis à jour avec succès",
 	"Prompt updated successfully": "Prompt mis à jour avec succès",
 	"Prompts": "Prompts",
 	"Prompts": "Prompts",
 	"Prompts Access": "Accès aux prompts",
 	"Prompts Access": "Accès aux prompts",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Récupérer « {{searchValue}} » depuis Ollama.com",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Récupérer « {{searchValue}} » depuis Ollama.com",
 	"Pull a model from Ollama.com": "Télécharger un modèle depuis Ollama.com",
 	"Pull a model from Ollama.com": "Télécharger un modèle depuis Ollama.com",
 	"Query Generation Prompt": "Prompt de génération de requête",
 	"Query Generation Prompt": "Prompt de génération de requête",
@@ -1009,6 +1013,7 @@
 	"Theme": "Thème",
 	"Theme": "Thème",
 	"Thinking...": "En train de réfléchir...",
 	"Thinking...": "En train de réfléchir...",
 	"This action cannot be undone. Do you wish to continue?": "Cette action ne peut pas être annulée. Souhaitez-vous continuer ?",
 	"This action cannot be undone. Do you wish to continue?": "Cette action ne peut pas être annulée. Souhaitez-vous continuer ?",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Cela garantit que vos conversations précieuses soient sauvegardées en toute sécurité dans votre base de données backend. Merci !",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Cela garantit que vos conversations précieuses soient sauvegardées en toute sécurité dans votre base de données backend. Merci !",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Il s'agit d'une fonctionnalité expérimentale, elle peut ne pas fonctionner comme prévu et est sujette à modification à tout moment.",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Il s'agit d'une fonctionnalité expérimentale, elle peut ne pas fonctionner comme prévu et est sujette à modification à tout moment.",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "Les vannes ont été mises à jour avec succès",
 	"Valves updated successfully": "Les vannes ont été mises à jour avec succès",
 	"variable": "variable",
 	"variable": "variable",
 	"variable to have them replaced with clipboard content.": "variable pour qu'elles soient remplacées par le contenu du presse-papiers.",
 	"variable to have them replaced with clipboard content.": "variable pour qu'elles soient remplacées par le contenu du presse-papiers.",
+	"Verify Connection": "",
 	"Version": "Version:",
 	"Version": "Version:",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Version {{selectedVersion}} de {{totalVersions}}",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Version {{selectedVersion}} de {{totalVersions}}",
 	"View Replies": "Voir les réponses",
 	"View Replies": "Voir les réponses",

+ 6 - 0
src/lib/i18n/locales/he-IL/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "הצעות ברירת מחדל לפקודות",
 	"Default Prompt Suggestions": "הצעות ברירת מחדל לפקודות",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to ALL": "",
 	"Default to ALL": "",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "תפקיד משתמש ברירת מחדל",
 	"Default User Role": "תפקיד משתמש ברירת מחדל",
 	"Delete": "מחק",
 	"Delete": "מחק",
 	"Delete a model": "מחק מודל",
 	"Delete a model": "מחק מודל",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "כלול את הדגל `--api` בעת הרצת stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "כלול את הדגל `--api` בעת הרצת stable-diffusion-webui",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "מידע",
 	"Info": "מידע",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "פקודות קלט",
 	"Input commands": "פקודות קלט",
 	"Install from Github URL": "התקן מכתובת URL של Github",
 	"Install from Github URL": "התקן מכתובת URL של Github",
 	"Instant Auto-Send After Voice Transcription": "",
 	"Instant Auto-Send After Voice Transcription": "",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "30 הימים הקודמים",
 	"Previous 30 days": "30 הימים הקודמים",
 	"Previous 7 days": "7 הימים הקודמים",
 	"Previous 7 days": "7 הימים הקודמים",
+	"Private": "",
 	"Profile Image": "תמונת פרופיל",
 	"Profile Image": "תמונת פרופיל",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "פקודה (למשל, ספר לי עובדה מעניינת על האימפריה הרומית)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "פקודה (למשל, ספר לי עובדה מעניינת על האימפריה הרומית)",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "",
 	"Prompt updated successfully": "",
 	"Prompts": "פקודות",
 	"Prompts": "פקודות",
 	"Prompts Access": "",
 	"Prompts Access": "",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "משוך \"{{searchValue}}\" מ-Ollama.com",
 	"Pull \"{{searchValue}}\" from Ollama.com": "משוך \"{{searchValue}}\" מ-Ollama.com",
 	"Pull a model from Ollama.com": "משוך מודל מ-Ollama.com",
 	"Pull a model from Ollama.com": "משוך מודל מ-Ollama.com",
 	"Query Generation Prompt": "",
 	"Query Generation Prompt": "",
@@ -1009,6 +1013,7 @@
 	"Theme": "נושא",
 	"Theme": "נושא",
 	"Thinking...": "",
 	"Thinking...": "",
 	"This action cannot be undone. Do you wish to continue?": "",
 	"This action cannot be undone. Do you wish to continue?": "",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "פעולה זו מבטיחה שהשיחות בעלות הערך שלך יישמרו באופן מאובטח במסד הנתונים העורפי שלך. תודה!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "פעולה זו מבטיחה שהשיחות בעלות הערך שלך יישמרו באופן מאובטח במסד הנתונים העורפי שלך. תודה!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "",
 	"Valves updated successfully": "",
 	"variable": "משתנה",
 	"variable": "משתנה",
 	"variable to have them replaced with clipboard content.": "משתנה להחליפו ב- clipboard תוכן.",
 	"variable to have them replaced with clipboard content.": "משתנה להחליפו ב- clipboard תוכן.",
+	"Verify Connection": "",
 	"Version": "גרסה",
 	"Version": "גרסה",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"View Replies": "",
 	"View Replies": "",

+ 6 - 0
src/lib/i18n/locales/hi-IN/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "डिफ़ॉल्ट प्रॉम्प्ट सुझाव",
 	"Default Prompt Suggestions": "डिफ़ॉल्ट प्रॉम्प्ट सुझाव",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to ALL": "",
 	"Default to ALL": "",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "डिफ़ॉल्ट उपयोगकर्ता भूमिका",
 	"Default User Role": "डिफ़ॉल्ट उपयोगकर्ता भूमिका",
 	"Delete": "डिलीट",
 	"Delete": "डिलीट",
 	"Delete a model": "एक मॉडल हटाएँ",
 	"Delete a model": "एक मॉडल हटाएँ",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "stable-diffusion-webui चलाते समय `--api` ध्वज शामिल करें",
 	"Include `--api` flag when running stable-diffusion-webui": "stable-diffusion-webui चलाते समय `--api` ध्वज शामिल करें",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "सूचना-विषयक",
 	"Info": "सूचना-विषयक",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "इनपुट क命",
 	"Input commands": "इनपुट क命",
 	"Install from Github URL": "Github URL से इंस्टॉल करें",
 	"Install from Github URL": "Github URL से इंस्टॉल करें",
 	"Instant Auto-Send After Voice Transcription": "",
 	"Instant Auto-Send After Voice Transcription": "",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "पिछले 30 दिन",
 	"Previous 30 days": "पिछले 30 दिन",
 	"Previous 7 days": "पिछले 7 दिन",
 	"Previous 7 days": "पिछले 7 दिन",
+	"Private": "",
 	"Profile Image": "प्रोफ़ाइल छवि",
 	"Profile Image": "प्रोफ़ाइल छवि",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "प्रॉम्प्ट (उदाहरण के लिए मुझे रोमन साम्राज्य के बारे में एक मजेदार तथ्य बताएं)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "प्रॉम्प्ट (उदाहरण के लिए मुझे रोमन साम्राज्य के बारे में एक मजेदार तथ्य बताएं)",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "",
 	"Prompt updated successfully": "",
 	"Prompts": "प्रॉम्प्ट",
 	"Prompts": "प्रॉम्प्ट",
 	"Prompts Access": "",
 	"Prompts Access": "",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "\"{{searchValue}}\" को Ollama.com से खींचें",
 	"Pull \"{{searchValue}}\" from Ollama.com": "\"{{searchValue}}\" को Ollama.com से खींचें",
 	"Pull a model from Ollama.com": "Ollama.com से एक मॉडल खींचें",
 	"Pull a model from Ollama.com": "Ollama.com से एक मॉडल खींचें",
 	"Query Generation Prompt": "",
 	"Query Generation Prompt": "",
@@ -1009,6 +1013,7 @@
 	"Theme": "थीम",
 	"Theme": "थीम",
 	"Thinking...": "",
 	"Thinking...": "",
 	"This action cannot be undone. Do you wish to continue?": "",
 	"This action cannot be undone. Do you wish to continue?": "",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "यह सुनिश्चित करता है कि आपकी मूल्यवान बातचीत आपके बैकएंड डेटाबेस में सुरक्षित रूप से सहेजी गई है। धन्यवाद!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "यह सुनिश्चित करता है कि आपकी मूल्यवान बातचीत आपके बैकएंड डेटाबेस में सुरक्षित रूप से सहेजी गई है। धन्यवाद!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "",
 	"Valves updated successfully": "",
 	"variable": "वेरिएबल",
 	"variable": "वेरिएबल",
 	"variable to have them replaced with clipboard content.": "उन्हें क्लिपबोर्ड सामग्री से बदलने के लिए वेरिएबल।",
 	"variable to have them replaced with clipboard content.": "उन्हें क्लिपबोर्ड सामग्री से बदलने के लिए वेरिएबल।",
+	"Verify Connection": "",
 	"Version": "संस्करण",
 	"Version": "संस्करण",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"View Replies": "",
 	"View Replies": "",

+ 6 - 0
src/lib/i18n/locales/hr-HR/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "Zadani prijedlozi prompta",
 	"Default Prompt Suggestions": "Zadani prijedlozi prompta",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to ALL": "",
 	"Default to ALL": "",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "Zadana korisnička uloga",
 	"Default User Role": "Zadana korisnička uloga",
 	"Delete": "Izbriši",
 	"Delete": "Izbriši",
 	"Delete a model": "Izbriši model",
 	"Delete a model": "Izbriši model",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "Uključite zastavicu `--api` prilikom pokretanja stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Uključite zastavicu `--api` prilikom pokretanja stable-diffusion-webui",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Informacije",
 	"Info": "Informacije",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "Unos naredbi",
 	"Input commands": "Unos naredbi",
 	"Install from Github URL": "Instaliraj s Github URL-a",
 	"Install from Github URL": "Instaliraj s Github URL-a",
 	"Instant Auto-Send After Voice Transcription": "Trenutačno automatsko slanje nakon glasovne transkripcije",
 	"Instant Auto-Send After Voice Transcription": "Trenutačno automatsko slanje nakon glasovne transkripcije",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "Prethodnih 30 dana",
 	"Previous 30 days": "Prethodnih 30 dana",
 	"Previous 7 days": "Prethodnih 7 dana",
 	"Previous 7 days": "Prethodnih 7 dana",
+	"Private": "",
 	"Profile Image": "Profilna slika",
 	"Profile Image": "Profilna slika",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (npr. Reci mi zanimljivost o Rimskom carstvu)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (npr. Reci mi zanimljivost o Rimskom carstvu)",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "",
 	"Prompt updated successfully": "",
 	"Prompts": "Prompti",
 	"Prompts": "Prompti",
 	"Prompts Access": "",
 	"Prompts Access": "",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Povucite \"{{searchValue}}\" s Ollama.com",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Povucite \"{{searchValue}}\" s Ollama.com",
 	"Pull a model from Ollama.com": "Povucite model s Ollama.com",
 	"Pull a model from Ollama.com": "Povucite model s Ollama.com",
 	"Query Generation Prompt": "",
 	"Query Generation Prompt": "",
@@ -1009,6 +1013,7 @@
 	"Theme": "Tema",
 	"Theme": "Tema",
 	"Thinking...": "Razmišljam",
 	"Thinking...": "Razmišljam",
 	"This action cannot be undone. Do you wish to continue?": "",
 	"This action cannot be undone. Do you wish to continue?": "",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Ovo osigurava da su vaši vrijedni razgovori sigurno spremljeni u bazu podataka. Hvala vam!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Ovo osigurava da su vaši vrijedni razgovori sigurno spremljeni u bazu podataka. Hvala vam!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Ovo je eksperimentalna značajka, možda neće funkcionirati prema očekivanjima i podložna je promjenama u bilo kojem trenutku.",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Ovo je eksperimentalna značajka, možda neće funkcionirati prema očekivanjima i podložna je promjenama u bilo kojem trenutku.",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "",
 	"Valves updated successfully": "",
 	"variable": "varijabla",
 	"variable": "varijabla",
 	"variable to have them replaced with clipboard content.": "varijabla za zamjenu sadržajem međuspremnika.",
 	"variable to have them replaced with clipboard content.": "varijabla za zamjenu sadržajem međuspremnika.",
+	"Verify Connection": "",
 	"Version": "Verzija",
 	"Version": "Verzija",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"View Replies": "",
 	"View Replies": "",

+ 6 - 0
src/lib/i18n/locales/hu-HU/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "Alapértelmezett prompt javaslatok",
 	"Default Prompt Suggestions": "Alapértelmezett prompt javaslatok",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to ALL": "",
 	"Default to ALL": "",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "Alapértelmezett felhasználói szerep",
 	"Default User Role": "Alapértelmezett felhasználói szerep",
 	"Delete": "Törlés",
 	"Delete": "Törlés",
 	"Delete a model": "Modell törlése",
 	"Delete a model": "Modell törlése",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "Add hozzá a `--api` kapcsolót a stable-diffusion-webui futtatásakor",
 	"Include `--api` flag when running stable-diffusion-webui": "Add hozzá a `--api` kapcsolót a stable-diffusion-webui futtatásakor",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Információ",
 	"Info": "Információ",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "Beviteli parancsok",
 	"Input commands": "Beviteli parancsok",
 	"Install from Github URL": "Telepítés Github URL-ről",
 	"Install from Github URL": "Telepítés Github URL-ről",
 	"Instant Auto-Send After Voice Transcription": "Azonnali automatikus küldés hangfelismerés után",
 	"Instant Auto-Send After Voice Transcription": "Azonnali automatikus küldés hangfelismerés után",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "Előző 30 nap",
 	"Previous 30 days": "Előző 30 nap",
 	"Previous 7 days": "Előző 7 nap",
 	"Previous 7 days": "Előző 7 nap",
+	"Private": "",
 	"Profile Image": "Profilkép",
 	"Profile Image": "Profilkép",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (pl. Mondj egy érdekes tényt a Római Birodalomról)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (pl. Mondj egy érdekes tényt a Római Birodalomról)",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "",
 	"Prompt updated successfully": "",
 	"Prompts": "Promptok",
 	"Prompts": "Promptok",
 	"Prompts Access": "",
 	"Prompts Access": "",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "\"{{searchValue}}\" letöltése az Ollama.com-ról",
 	"Pull \"{{searchValue}}\" from Ollama.com": "\"{{searchValue}}\" letöltése az Ollama.com-ról",
 	"Pull a model from Ollama.com": "Modell letöltése az Ollama.com-ról",
 	"Pull a model from Ollama.com": "Modell letöltése az Ollama.com-ról",
 	"Query Generation Prompt": "",
 	"Query Generation Prompt": "",
@@ -1009,6 +1013,7 @@
 	"Theme": "Téma",
 	"Theme": "Téma",
 	"Thinking...": "Gondolkodik...",
 	"Thinking...": "Gondolkodik...",
 	"This action cannot be undone. Do you wish to continue?": "Ez a művelet nem vonható vissza. Szeretné folytatni?",
 	"This action cannot be undone. Do you wish to continue?": "Ez a művelet nem vonható vissza. Szeretné folytatni?",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Ez biztosítja, hogy értékes beszélgetései biztonságosan mentésre kerüljenek a backend adatbázisban. Köszönjük!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Ez biztosítja, hogy értékes beszélgetései biztonságosan mentésre kerüljenek a backend adatbázisban. Köszönjük!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Ez egy kísérleti funkció, lehet, hogy nem a várt módon működik és bármikor változhat.",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Ez egy kísérleti funkció, lehet, hogy nem a várt módon működik és bármikor változhat.",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "Szelepek sikeresen frissítve",
 	"Valves updated successfully": "Szelepek sikeresen frissítve",
 	"variable": "változó",
 	"variable": "változó",
 	"variable to have them replaced with clipboard content.": "változó, hogy a vágólap tartalmával helyettesítse őket.",
 	"variable to have them replaced with clipboard content.": "változó, hogy a vágólap tartalmával helyettesítse őket.",
+	"Verify Connection": "",
 	"Version": "Verzió",
 	"Version": "Verzió",
 	"Version {{selectedVersion}} of {{totalVersions}}": "{{selectedVersion}}. verzió a {{totalVersions}}-ból",
 	"Version {{selectedVersion}} of {{totalVersions}}": "{{selectedVersion}}. verzió a {{totalVersions}}-ból",
 	"View Replies": "",
 	"View Replies": "",

+ 6 - 0
src/lib/i18n/locales/id-ID/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "Saran Permintaan Default",
 	"Default Prompt Suggestions": "Saran Permintaan Default",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to ALL": "",
 	"Default to ALL": "",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "Peran Pengguna Default",
 	"Default User Role": "Peran Pengguna Default",
 	"Delete": "Menghapus",
 	"Delete": "Menghapus",
 	"Delete a model": "Menghapus model",
 	"Delete a model": "Menghapus model",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "Sertakan bendera `--api` saat menjalankan stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Sertakan bendera `--api` saat menjalankan stable-diffusion-webui",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Info",
 	"Info": "Info",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "Perintah masukan",
 	"Input commands": "Perintah masukan",
 	"Install from Github URL": "Instal dari URL Github",
 	"Install from Github URL": "Instal dari URL Github",
 	"Instant Auto-Send After Voice Transcription": "Kirim Otomatis Instan Setelah Transkripsi Suara",
 	"Instant Auto-Send After Voice Transcription": "Kirim Otomatis Instan Setelah Transkripsi Suara",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "30 hari sebelumnya",
 	"Previous 30 days": "30 hari sebelumnya",
 	"Previous 7 days": "7 hari sebelumnya",
 	"Previous 7 days": "7 hari sebelumnya",
+	"Private": "",
 	"Profile Image": "Gambar Profil",
 	"Profile Image": "Gambar Profil",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Permintaan (mis. Ceritakan sebuah fakta menarik tentang Kekaisaran Romawi)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Permintaan (mis. Ceritakan sebuah fakta menarik tentang Kekaisaran Romawi)",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "",
 	"Prompt updated successfully": "",
 	"Prompts": "Prompt",
 	"Prompts": "Prompt",
 	"Prompts Access": "",
 	"Prompts Access": "",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Tarik \"{{searchValue}}\" dari Ollama.com",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Tarik \"{{searchValue}}\" dari Ollama.com",
 	"Pull a model from Ollama.com": "Tarik model dari Ollama.com",
 	"Pull a model from Ollama.com": "Tarik model dari Ollama.com",
 	"Query Generation Prompt": "",
 	"Query Generation Prompt": "",
@@ -1009,6 +1013,7 @@
 	"Theme": "Tema",
 	"Theme": "Tema",
 	"Thinking...": "Berpikir",
 	"Thinking...": "Berpikir",
 	"This action cannot be undone. Do you wish to continue?": "Tindakan ini tidak dapat dibatalkan. Apakah Anda ingin melanjutkan?",
 	"This action cannot be undone. Do you wish to continue?": "Tindakan ini tidak dapat dibatalkan. Apakah Anda ingin melanjutkan?",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Ini akan memastikan bahwa percakapan Anda yang berharga disimpan dengan aman ke basis data backend. Terima kasih!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Ini akan memastikan bahwa percakapan Anda yang berharga disimpan dengan aman ke basis data backend. Terima kasih!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Ini adalah fitur eksperimental, mungkin tidak berfungsi seperti yang diharapkan dan dapat berubah sewaktu-waktu.",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Ini adalah fitur eksperimental, mungkin tidak berfungsi seperti yang diharapkan dan dapat berubah sewaktu-waktu.",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "Katup berhasil diperbarui",
 	"Valves updated successfully": "Katup berhasil diperbarui",
 	"variable": "variabel",
 	"variable": "variabel",
 	"variable to have them replaced with clipboard content.": "variabel untuk diganti dengan konten papan klip.",
 	"variable to have them replaced with clipboard content.": "variabel untuk diganti dengan konten papan klip.",
+	"Verify Connection": "",
 	"Version": "Versi",
 	"Version": "Versi",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"View Replies": "",
 	"View Replies": "",

+ 6 - 0
src/lib/i18n/locales/ie-GA/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "Moltaí Leid Réamhshocraithe",
 	"Default Prompt Suggestions": "Moltaí Leid Réamhshocraithe",
 	"Default to 389 or 636 if TLS is enabled": "Réamhshocrú go 389 nó 636 má tá TLS cumasaithe",
 	"Default to 389 or 636 if TLS is enabled": "Réamhshocrú go 389 nó 636 má tá TLS cumasaithe",
 	"Default to ALL": "Réamhshocrú do GACH",
 	"Default to ALL": "Réamhshocrú do GACH",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "Ról Úsáideora Réamhshocraithe",
 	"Default User Role": "Ról Úsáideora Réamhshocraithe",
 	"Delete": "Scrios",
 	"Delete": "Scrios",
 	"Delete a model": "Scrios múnla",
 	"Delete a model": "Scrios múnla",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "Cuir bratach `--api` san áireamh agus webui cobhsaí-scaipthe á rith",
 	"Include `--api` flag when running stable-diffusion-webui": "Cuir bratach `--api` san áireamh agus webui cobhsaí-scaipthe á rith",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Eolas",
 	"Info": "Eolas",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "Orduithe ionchuir",
 	"Input commands": "Orduithe ionchuir",
 	"Install from Github URL": "Suiteáil ó Github URL",
 	"Install from Github URL": "Suiteáil ó Github URL",
 	"Instant Auto-Send After Voice Transcription": "Seoladh Uathoibríoch Láithreach Tar éis",
 	"Instant Auto-Send After Voice Transcription": "Seoladh Uathoibríoch Láithreach Tar éis",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "Pionós Láithreacht",
 	"Presence Penalty": "Pionós Láithreacht",
 	"Previous 30 days": "30 lá roimhe seo",
 	"Previous 30 days": "30 lá roimhe seo",
 	"Previous 7 days": "7 lá roimhe seo",
 	"Previous 7 days": "7 lá roimhe seo",
+	"Private": "",
 	"Profile Image": "Íomhá Próifíl",
 	"Profile Image": "Íomhá Próifíl",
 	"Prompt": "Leid",
 	"Prompt": "Leid",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Leid (m.sh. inis dom fíric spraíúil faoin Impireacht Rómhánach)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Leid (m.sh. inis dom fíric spraíúil faoin Impireacht Rómhánach)",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "D'éirigh leis an leid a nuashonrú",
 	"Prompt updated successfully": "D'éirigh leis an leid a nuashonrú",
 	"Prompts": "Leabhair",
 	"Prompts": "Leabhair",
 	"Prompts Access": "Rochtain ar Chuirí",
 	"Prompts Access": "Rochtain ar Chuirí",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Tarraing \"{{searchValue}}\" ó Ollama.com",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Tarraing \"{{searchValue}}\" ó Ollama.com",
 	"Pull a model from Ollama.com": "Tarraing múnla ó Ollama.com",
 	"Pull a model from Ollama.com": "Tarraing múnla ó Ollama.com",
 	"Query Generation Prompt": "Cuirí Ginearáil Ceisteanna",
 	"Query Generation Prompt": "Cuirí Ginearáil Ceisteanna",
@@ -1009,6 +1013,7 @@
 	"Theme": "Téama",
 	"Theme": "Téama",
 	"Thinking...": "Ag smaoineamh...",
 	"Thinking...": "Ag smaoineamh...",
 	"This action cannot be undone. Do you wish to continue?": "Ní féidir an gníomh seo a chur ar ais. Ar mhaith leat leanúint ar aghaidh?",
 	"This action cannot be undone. Do you wish to continue?": "Ní féidir an gníomh seo a chur ar ais. Ar mhaith leat leanúint ar aghaidh?",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Cinntíonn sé seo go sábhálfar do chomhráite luachmhara go daingean i do bhunachar sonraí cúltaca Go raibh maith agat!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Cinntíonn sé seo go sábhálfar do chomhráite luachmhara go daingean i do bhunachar sonraí cúltaca Go raibh maith agat!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Is gné turgnamhach í seo, b'fhéidir nach bhfeidhmeoidh sé mar a bhíothas ag súil leis agus tá sé faoi réir athraithe ag am ar bith.",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Is gné turgnamhach í seo, b'fhéidir nach bhfeidhmeoidh sé mar a bhíothas ag súil leis agus tá sé faoi réir athraithe ag am ar bith.",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "Comhlaí nuashonraíodh",
 	"Valves updated successfully": "Comhlaí nuashonraíodh",
 	"variable": "athraitheach",
 	"variable": "athraitheach",
 	"variable to have them replaced with clipboard content.": "athróg chun ábhar gearrthaisce a chur in ionad iad.",
 	"variable to have them replaced with clipboard content.": "athróg chun ábhar gearrthaisce a chur in ionad iad.",
+	"Verify Connection": "",
 	"Version": "Leagan",
 	"Version": "Leagan",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Leagan {{selectedVersion}} de {{totalVersions}}",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Leagan {{selectedVersion}} de {{totalVersions}}",
 	"View Replies": "Féach ar Fhreagraí",
 	"View Replies": "Féach ar Fhreagraí",

+ 6 - 0
src/lib/i18n/locales/it-IT/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "Suggerimenti prompt predefiniti",
 	"Default Prompt Suggestions": "Suggerimenti prompt predefiniti",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to ALL": "",
 	"Default to ALL": "",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "Ruolo utente predefinito",
 	"Default User Role": "Ruolo utente predefinito",
 	"Delete": "Elimina",
 	"Delete": "Elimina",
 	"Delete a model": "Elimina un modello",
 	"Delete a model": "Elimina un modello",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "Includi il flag `--api` quando esegui stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Includi il flag `--api` quando esegui stable-diffusion-webui",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Informazioni",
 	"Info": "Informazioni",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "Comandi di input",
 	"Input commands": "Comandi di input",
 	"Install from Github URL": "Eseguire l'installazione dall'URL di Github",
 	"Install from Github URL": "Eseguire l'installazione dall'URL di Github",
 	"Instant Auto-Send After Voice Transcription": "",
 	"Instant Auto-Send After Voice Transcription": "",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "Ultimi 30 giorni",
 	"Previous 30 days": "Ultimi 30 giorni",
 	"Previous 7 days": "Ultimi 7 giorni",
 	"Previous 7 days": "Ultimi 7 giorni",
+	"Private": "",
 	"Profile Image": "Immagine del profilo",
 	"Profile Image": "Immagine del profilo",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (ad esempio Dimmi un fatto divertente sull'Impero Romano)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (ad esempio Dimmi un fatto divertente sull'Impero Romano)",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "",
 	"Prompt updated successfully": "",
 	"Prompts": "Prompt",
 	"Prompts": "Prompt",
 	"Prompts Access": "",
 	"Prompts Access": "",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Estrai \"{{searchValue}}\" da Ollama.com",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Estrai \"{{searchValue}}\" da Ollama.com",
 	"Pull a model from Ollama.com": "Estrai un modello da Ollama.com",
 	"Pull a model from Ollama.com": "Estrai un modello da Ollama.com",
 	"Query Generation Prompt": "",
 	"Query Generation Prompt": "",
@@ -1009,6 +1013,7 @@
 	"Theme": "Tema",
 	"Theme": "Tema",
 	"Thinking...": "",
 	"Thinking...": "",
 	"This action cannot be undone. Do you wish to continue?": "",
 	"This action cannot be undone. Do you wish to continue?": "",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Ciò garantisce che le tue preziose conversazioni siano salvate in modo sicuro nel tuo database backend. Grazie!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Ciò garantisce che le tue preziose conversazioni siano salvate in modo sicuro nel tuo database backend. Grazie!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "",
 	"Valves updated successfully": "",
 	"variable": "variabile",
 	"variable": "variabile",
 	"variable to have them replaced with clipboard content.": "variabile per farli sostituire con il contenuto degli appunti.",
 	"variable to have them replaced with clipboard content.": "variabile per farli sostituire con il contenuto degli appunti.",
+	"Verify Connection": "",
 	"Version": "Versione",
 	"Version": "Versione",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"View Replies": "",
 	"View Replies": "",

+ 6 - 0
src/lib/i18n/locales/ja-JP/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "デフォルトのプロンプトの提案",
 	"Default Prompt Suggestions": "デフォルトのプロンプトの提案",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to ALL": "",
 	"Default to ALL": "",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "デフォルトのユーザー役割",
 	"Default User Role": "デフォルトのユーザー役割",
 	"Delete": "削除",
 	"Delete": "削除",
 	"Delete a model": "モデルを削除",
 	"Delete a model": "モデルを削除",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "stable-diffusion-webuiを実行する際に`--api`フラグを含める",
 	"Include `--api` flag when running stable-diffusion-webui": "stable-diffusion-webuiを実行する際に`--api`フラグを含める",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "情報",
 	"Info": "情報",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "入力コマンド",
 	"Input commands": "入力コマンド",
 	"Install from Github URL": "Github URLからインストール",
 	"Install from Github URL": "Github URLからインストール",
 	"Instant Auto-Send After Voice Transcription": "",
 	"Instant Auto-Send After Voice Transcription": "",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "前の30日間",
 	"Previous 30 days": "前の30日間",
 	"Previous 7 days": "前の7日間",
 	"Previous 7 days": "前の7日間",
+	"Private": "",
 	"Profile Image": "プロフィール画像",
 	"Profile Image": "プロフィール画像",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "プロンプト(例:ローマ帝国についての楽しい事を教えてください)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "プロンプト(例:ローマ帝国についての楽しい事を教えてください)",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "",
 	"Prompt updated successfully": "",
 	"Prompts": "プロンプト",
 	"Prompts": "プロンプト",
 	"Prompts Access": "",
 	"Prompts Access": "",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Ollama.com から \"{{searchValue}}\" をプル",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Ollama.com から \"{{searchValue}}\" をプル",
 	"Pull a model from Ollama.com": "Ollama.com からモデルをプル",
 	"Pull a model from Ollama.com": "Ollama.com からモデルをプル",
 	"Query Generation Prompt": "",
 	"Query Generation Prompt": "",
@@ -1009,6 +1013,7 @@
 	"Theme": "テーマ",
 	"Theme": "テーマ",
 	"Thinking...": "思考中...",
 	"Thinking...": "思考中...",
 	"This action cannot be undone. Do you wish to continue?": "このアクションは取り消し不可です。続けますか?",
 	"This action cannot be undone. Do you wish to continue?": "このアクションは取り消し不可です。続けますか?",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "これは、貴重な会話がバックエンドデータベースに安全に保存されることを保証します。ありがとうございます!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "これは、貴重な会話がバックエンドデータベースに安全に保存されることを保証します。ありがとうございます!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "実験的機能であり正常動作しない場合があります。",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "実験的機能であり正常動作しない場合があります。",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "",
 	"Valves updated successfully": "",
 	"variable": "変数",
 	"variable": "変数",
 	"variable to have them replaced with clipboard content.": "クリップボードの内容に置き換える変数。",
 	"variable to have them replaced with clipboard content.": "クリップボードの内容に置き換える変数。",
+	"Verify Connection": "",
 	"Version": "バージョン",
 	"Version": "バージョン",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"View Replies": "",
 	"View Replies": "",

+ 6 - 0
src/lib/i18n/locales/ka-GE/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "ნაგულისხმევი მოთხოვნის მინიშნებები",
 	"Default Prompt Suggestions": "ნაგულისხმევი მოთხოვნის მინიშნებები",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to ALL": "",
 	"Default to ALL": "",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "მომხმარებლის ნაგულისხმევი როლი",
 	"Default User Role": "მომხმარებლის ნაგულისხმევი როლი",
 	"Delete": "წაშლა",
 	"Delete": "წაშლა",
 	"Delete a model": "მოდელის წაშლა",
 	"Delete a model": "მოდელის წაშლა",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "`--api` ალმის ჩასმა stable-diffusion-webui-ის გამოყენებისას",
 	"Include `--api` flag when running stable-diffusion-webui": "`--api` ალმის ჩასმა stable-diffusion-webui-ის გამოყენებისას",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "ინფორმაცია",
 	"Info": "ინფორმაცია",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "შეიყვანეთ ბრძანებები",
 	"Input commands": "შეიყვანეთ ბრძანებები",
 	"Install from Github URL": "დაყენება Github-ის ბმულიდან",
 	"Install from Github URL": "დაყენება Github-ის ბმულიდან",
 	"Instant Auto-Send After Voice Transcription": "",
 	"Instant Auto-Send After Voice Transcription": "",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "წინა 30 დღე",
 	"Previous 30 days": "წინა 30 დღე",
 	"Previous 7 days": "წინა 7 დღე",
 	"Previous 7 days": "წინა 7 დღე",
+	"Private": "",
 	"Profile Image": "პროფილის სურათი",
 	"Profile Image": "პროფილის სურათი",
 	"Prompt": "ბრძანების შეყვანის შეხსენება",
 	"Prompt": "ბრძანების შეყვანის შეხსენება",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (მაგ. მითხარი სახალისო ფაქტი რომის იმპერიის შესახებ)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (მაგ. მითხარი სახალისო ფაქტი რომის იმპერიის შესახებ)",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "",
 	"Prompt updated successfully": "",
 	"Prompts": "მოთხოვნები",
 	"Prompts": "მოთხოვნები",
 	"Prompts Access": "",
 	"Prompts Access": "",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "\"{{searchValue}}\"-ის გადმოწერა Ollama.com-იდან",
 	"Pull \"{{searchValue}}\" from Ollama.com": "\"{{searchValue}}\"-ის გადმოწერა Ollama.com-იდან",
 	"Pull a model from Ollama.com": "მოდელის გადმოწერა Ollama.com-დან",
 	"Pull a model from Ollama.com": "მოდელის გადმოწერა Ollama.com-დან",
 	"Query Generation Prompt": "",
 	"Query Generation Prompt": "",
@@ -1009,6 +1013,7 @@
 	"Theme": "თემა",
 	"Theme": "თემა",
 	"Thinking...": "",
 	"Thinking...": "",
 	"This action cannot be undone. Do you wish to continue?": "",
 	"This action cannot be undone. Do you wish to continue?": "",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "ეს უზრუნველყოფს, რომ თქვენი ღირებული საუბრები უსაფრთხოდ შეინახება თქვენს უკანაბოლო მონაცემთა ბაზაში. მადლობა!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "ეს უზრუნველყოფს, რომ თქვენი ღირებული საუბრები უსაფრთხოდ შეინახება თქვენს უკანაბოლო მონაცემთა ბაზაში. მადლობა!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "",
 	"Valves updated successfully": "",
 	"variable": "ცვლადი",
 	"variable": "ცვლადი",
 	"variable to have them replaced with clipboard content.": "ცვლადი მისი ბუფერის მნიშვნელობით ჩასანაცვლებლად.",
 	"variable to have them replaced with clipboard content.": "ცვლადი მისი ბუფერის მნიშვნელობით ჩასანაცვლებლად.",
+	"Verify Connection": "",
 	"Version": "ვერსია",
 	"Version": "ვერსია",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"View Replies": "",
 	"View Replies": "",

+ 6 - 0
src/lib/i18n/locales/ko-KR/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "기본 프롬프트 제안",
 	"Default Prompt Suggestions": "기본 프롬프트 제안",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to ALL": "",
 	"Default to ALL": "",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "기본 사용자 역할",
 	"Default User Role": "기본 사용자 역할",
 	"Delete": "삭제",
 	"Delete": "삭제",
 	"Delete a model": "모델 삭제",
 	"Delete a model": "모델 삭제",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "stable-diffusion-webui를 실행 시 `--api` 플래그를 포함하세요",
 	"Include `--api` flag when running stable-diffusion-webui": "stable-diffusion-webui를 실행 시 `--api` 플래그를 포함하세요",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "정보",
 	"Info": "정보",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "명령어 입력",
 	"Input commands": "명령어 입력",
 	"Install from Github URL": "Github URL에서 설치",
 	"Install from Github URL": "Github URL에서 설치",
 	"Instant Auto-Send After Voice Transcription": "음성 변환 후 즉시 자동 전송",
 	"Instant Auto-Send After Voice Transcription": "음성 변환 후 즉시 자동 전송",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "이전 30일",
 	"Previous 30 days": "이전 30일",
 	"Previous 7 days": "이전 7일",
 	"Previous 7 days": "이전 7일",
+	"Private": "",
 	"Profile Image": "프로필 이미지",
 	"Profile Image": "프로필 이미지",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "프롬프트 (예: 로마 황제에 대해 재미있는 사실을 알려주세요)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "프롬프트 (예: 로마 황제에 대해 재미있는 사실을 알려주세요)",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "성공적으로 프롬프트를 수정했습니다",
 	"Prompt updated successfully": "성공적으로 프롬프트를 수정했습니다",
 	"Prompts": "프롬프트",
 	"Prompts": "프롬프트",
 	"Prompts Access": "프롬프트 접근",
 	"Prompts Access": "프롬프트 접근",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Ollama.com에서 \"{{searchValue}}\" 가져오기",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Ollama.com에서 \"{{searchValue}}\" 가져오기",
 	"Pull a model from Ollama.com": "Ollama.com에서 모델 가져오기(pull)",
 	"Pull a model from Ollama.com": "Ollama.com에서 모델 가져오기(pull)",
 	"Query Generation Prompt": "",
 	"Query Generation Prompt": "",
@@ -1009,6 +1013,7 @@
 	"Theme": "테마",
 	"Theme": "테마",
 	"Thinking...": "생각 중...",
 	"Thinking...": "생각 중...",
 	"This action cannot be undone. Do you wish to continue?": "이 액션은 되돌릴 수 없습니다. 계속 하시겠습니까?",
 	"This action cannot be undone. Do you wish to continue?": "이 액션은 되돌릴 수 없습니다. 계속 하시겠습니까?",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "이렇게 하면 소중한 대화 내용이 백엔드 데이터베이스에 안전하게 저장됩니다. 감사합니다!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "이렇게 하면 소중한 대화 내용이 백엔드 데이터베이스에 안전하게 저장됩니다. 감사합니다!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "이것은 실험적 기능으로, 예상대로 작동하지 않을 수 있으며 언제든지 변경될 수 있습니다.",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "이것은 실험적 기능으로, 예상대로 작동하지 않을 수 있으며 언제든지 변경될 수 있습니다.",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "성공적으로 밸브가 업데이트되었습니다",
 	"Valves updated successfully": "성공적으로 밸브가 업데이트되었습니다",
 	"variable": "변수",
 	"variable": "변수",
 	"variable to have them replaced with clipboard content.": "변수를 사용하여 클립보드 내용으로 바꾸세요.",
 	"variable to have them replaced with clipboard content.": "변수를 사용하여 클립보드 내용으로 바꾸세요.",
+	"Verify Connection": "",
 	"Version": "버전",
 	"Version": "버전",
 	"Version {{selectedVersion}} of {{totalVersions}}": "버전 {{totalVersions}}의 {{selectedVersion}}",
 	"Version {{selectedVersion}} of {{totalVersions}}": "버전 {{totalVersions}}의 {{selectedVersion}}",
 	"View Replies": "",
 	"View Replies": "",

+ 6 - 0
src/lib/i18n/locales/lt-LT/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "Numatytieji užklausų pasiūlymai",
 	"Default Prompt Suggestions": "Numatytieji užklausų pasiūlymai",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to ALL": "",
 	"Default to ALL": "",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "Numatytoji naudotojo rolė",
 	"Default User Role": "Numatytoji naudotojo rolė",
 	"Delete": "ištrinti",
 	"Delete": "ištrinti",
 	"Delete a model": "Ištrinti modėlį",
 	"Delete a model": "Ištrinti modėlį",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "Pridėti `--api` kai vykdomas stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Pridėti `--api` kai vykdomas stable-diffusion-webui",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Informacija",
 	"Info": "Informacija",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "Įvesties komandos",
 	"Input commands": "Įvesties komandos",
 	"Install from Github URL": "Instaliuoti Github nuorodą",
 	"Install from Github URL": "Instaliuoti Github nuorodą",
 	"Instant Auto-Send After Voice Transcription": "Siųsti iškart po balso transkripcijos",
 	"Instant Auto-Send After Voice Transcription": "Siųsti iškart po balso transkripcijos",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "Paskutinės 30 dienų",
 	"Previous 30 days": "Paskutinės 30 dienų",
 	"Previous 7 days": "Paskutinės 7 dienos",
 	"Previous 7 days": "Paskutinės 7 dienos",
+	"Private": "",
 	"Profile Image": "Profilio nuotrauka",
 	"Profile Image": "Profilio nuotrauka",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Užklausa (pvz. supaprastink šį laišką)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Užklausa (pvz. supaprastink šį laišką)",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "",
 	"Prompt updated successfully": "",
 	"Prompts": "Užklausos",
 	"Prompts": "Užklausos",
 	"Prompts Access": "",
 	"Prompts Access": "",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Rasti \"{{searchValue}}\" iš Ollama.com",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Rasti \"{{searchValue}}\" iš Ollama.com",
 	"Pull a model from Ollama.com": "Gauti modelį iš Ollama.com",
 	"Pull a model from Ollama.com": "Gauti modelį iš Ollama.com",
 	"Query Generation Prompt": "",
 	"Query Generation Prompt": "",
@@ -1009,6 +1013,7 @@
 	"Theme": "Tema",
 	"Theme": "Tema",
 	"Thinking...": "Mąsto...",
 	"Thinking...": "Mąsto...",
 	"This action cannot be undone. Do you wish to continue?": "Šis veiksmas negali būti atšauktas. Ar norite tęsti?",
 	"This action cannot be undone. Do you wish to continue?": "Šis veiksmas negali būti atšauktas. Ar norite tęsti?",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Tai užtikrina, kad Jūsų pokalbiai saugiai saugojami duomenų bazėje. Ačiū!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Tai užtikrina, kad Jūsų pokalbiai saugiai saugojami duomenų bazėje. Ačiū!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Tai eksperimentinė funkcija ir gali veikti nevisada.",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Tai eksperimentinė funkcija ir gali veikti nevisada.",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "Įeitys atnaujintos sėkmingai",
 	"Valves updated successfully": "Įeitys atnaujintos sėkmingai",
 	"variable": "kintamasis",
 	"variable": "kintamasis",
 	"variable to have them replaced with clipboard content.": "kintamoji pakeičiama kopijuoklės turiniu.",
 	"variable to have them replaced with clipboard content.": "kintamoji pakeičiama kopijuoklės turiniu.",
+	"Verify Connection": "",
 	"Version": "Versija",
 	"Version": "Versija",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"View Replies": "",
 	"View Replies": "",

+ 6 - 0
src/lib/i18n/locales/ms-MY/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "Cadangan Gesaan Lalai",
 	"Default Prompt Suggestions": "Cadangan Gesaan Lalai",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to ALL": "",
 	"Default to ALL": "",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "Peranan Pengguna Lalai",
 	"Default User Role": "Peranan Pengguna Lalai",
 	"Delete": "Padam",
 	"Delete": "Padam",
 	"Delete a model": "Padam Model",
 	"Delete a model": "Padam Model",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "Sertakan bendera `-- api ` semasa menjalankan stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Sertakan bendera `-- api ` semasa menjalankan stable-diffusion-webui",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Maklumat",
 	"Info": "Maklumat",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "Masukkan Arahan",
 	"Input commands": "Masukkan Arahan",
 	"Install from Github URL": "Pasang daripada URL Github",
 	"Install from Github URL": "Pasang daripada URL Github",
 	"Instant Auto-Send After Voice Transcription": "Hantar Secara Automatik Dengan Segera Selepas Transkripsi Suara",
 	"Instant Auto-Send After Voice Transcription": "Hantar Secara Automatik Dengan Segera Selepas Transkripsi Suara",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "30 hari sebelumnya",
 	"Previous 30 days": "30 hari sebelumnya",
 	"Previous 7 days": "7 hari sebelumnya",
 	"Previous 7 days": "7 hari sebelumnya",
+	"Private": "",
 	"Profile Image": "Imej Profail",
 	"Profile Image": "Imej Profail",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Gesaan (cth Beritahu saya fakta yang menyeronokkan tentang Kesultanan Melaka)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Gesaan (cth Beritahu saya fakta yang menyeronokkan tentang Kesultanan Melaka)",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "",
 	"Prompt updated successfully": "",
 	"Prompts": "Gesaan",
 	"Prompts": "Gesaan",
 	"Prompts Access": "",
 	"Prompts Access": "",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Tarik \"{{ searchValue }}\" daripada Ollama.com",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Tarik \"{{ searchValue }}\" daripada Ollama.com",
 	"Pull a model from Ollama.com": "Tarik model dari Ollama.com",
 	"Pull a model from Ollama.com": "Tarik model dari Ollama.com",
 	"Query Generation Prompt": "",
 	"Query Generation Prompt": "",
@@ -1009,6 +1013,7 @@
 	"Theme": "Tema",
 	"Theme": "Tema",
 	"Thinking...": "Berfikir...",
 	"Thinking...": "Berfikir...",
 	"This action cannot be undone. Do you wish to continue?": "Tindakan ini tidak boleh diubah semula kepada asal. Adakah anda ingin teruskan",
 	"This action cannot be undone. Do you wish to continue?": "Tindakan ini tidak boleh diubah semula kepada asal. Adakah anda ingin teruskan",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Ini akan memastikan bahawa perbualan berharga anda disimpan dengan selamat ke pangkalan data 'backend' anda. Terima kasih!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Ini akan memastikan bahawa perbualan berharga anda disimpan dengan selamat ke pangkalan data 'backend' anda. Terima kasih!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "ni adalah ciri percubaan, ia mungkin tidak berfungsi seperti yang diharapkan dan tertakluk kepada perubahan pada bila-bila masa.",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "ni adalah ciri percubaan, ia mungkin tidak berfungsi seperti yang diharapkan dan tertakluk kepada perubahan pada bila-bila masa.",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "'Valves' berjaya dikemaskini",
 	"Valves updated successfully": "'Valves' berjaya dikemaskini",
 	"variable": "pembolehubah",
 	"variable": "pembolehubah",
 	"variable to have them replaced with clipboard content.": "pembolehubah untuk ia digantikan dengan kandungan papan klip.",
 	"variable to have them replaced with clipboard content.": "pembolehubah untuk ia digantikan dengan kandungan papan klip.",
+	"Verify Connection": "",
 	"Version": "Versi",
 	"Version": "Versi",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"View Replies": "",
 	"View Replies": "",

+ 6 - 0
src/lib/i18n/locales/nb-NO/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "Standard forslag til ledetekster",
 	"Default Prompt Suggestions": "Standard forslag til ledetekster",
 	"Default to 389 or 636 if TLS is enabled": "Velg 389 eller 636 som standard hvis TLS er aktivert",
 	"Default to 389 or 636 if TLS is enabled": "Velg 389 eller 636 som standard hvis TLS er aktivert",
 	"Default to ALL": "Velg ALL som standard",
 	"Default to ALL": "Velg ALL som standard",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "Standard brukerrolle",
 	"Default User Role": "Standard brukerrolle",
 	"Delete": "Slett",
 	"Delete": "Slett",
 	"Delete a model": "Slett en modell",
 	"Delete a model": "Slett en modell",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "Inkluder flagget --api når du kjører stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Inkluder flagget --api når du kjører stable-diffusion-webui",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Info",
 	"Info": "Info",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "Inntast kommandoer",
 	"Input commands": "Inntast kommandoer",
 	"Install from Github URL": "Installer fra GitHub-URL",
 	"Install from Github URL": "Installer fra GitHub-URL",
 	"Instant Auto-Send After Voice Transcription": "Øyeblikkelig automatisk sending etter taletranskripsjon",
 	"Instant Auto-Send After Voice Transcription": "Øyeblikkelig automatisk sending etter taletranskripsjon",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "Straff for opptreden",
 	"Presence Penalty": "Straff for opptreden",
 	"Previous 30 days": "Siste 30 dager",
 	"Previous 30 days": "Siste 30 dager",
 	"Previous 7 days": "Siste 7 dager",
 	"Previous 7 days": "Siste 7 dager",
+	"Private": "",
 	"Profile Image": "Profilbilde",
 	"Profile Image": "Profilbilde",
 	"Prompt": "Ledetekst",
 	"Prompt": "Ledetekst",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Ledetekst (f.eks. Fortell meg noe morsomt om romerriket)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Ledetekst (f.eks. Fortell meg noe morsomt om romerriket)",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "Ledetekst oppdatert",
 	"Prompt updated successfully": "Ledetekst oppdatert",
 	"Prompts": "Ledetekster",
 	"Prompts": "Ledetekster",
 	"Prompts Access": "Tilgang til ledetekster",
 	"Prompts Access": "Tilgang til ledetekster",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Hent {{searchValue}} fra Ollama.com",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Hent {{searchValue}} fra Ollama.com",
 	"Pull a model from Ollama.com": "Hent en modell fra Ollama.com",
 	"Pull a model from Ollama.com": "Hent en modell fra Ollama.com",
 	"Query Generation Prompt": "Ledetekst for genering av spørringer",
 	"Query Generation Prompt": "Ledetekst for genering av spørringer",
@@ -1009,6 +1013,7 @@
 	"Theme": "Tema",
 	"Theme": "Tema",
 	"Thinking...": "Tenker ...",
 	"Thinking...": "Tenker ...",
 	"This action cannot be undone. Do you wish to continue?": "Denne handlingen kan ikke angres. Vil du fortsette?",
 	"This action cannot be undone. Do you wish to continue?": "Denne handlingen kan ikke angres. Vil du fortsette?",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Dette sikrer at de verdifulle samtalene dine lagres sikkert i backend-databasen din. Takk!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Dette sikrer at de verdifulle samtalene dine lagres sikkert i backend-databasen din. Takk!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Dette er en eksperimentell funksjon. Det er mulig den ikke fungerer som forventet, og den kan endres når som helst.",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Dette er en eksperimentell funksjon. Det er mulig den ikke fungerer som forventet, og den kan endres når som helst.",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "Ventilene er oppdatert",
 	"Valves updated successfully": "Ventilene er oppdatert",
 	"variable": "variabel",
 	"variable": "variabel",
 	"variable to have them replaced with clipboard content.": "variabel for å erstatte dem med utklippstavleinnhold.",
 	"variable to have them replaced with clipboard content.": "variabel for å erstatte dem med utklippstavleinnhold.",
+	"Verify Connection": "",
 	"Version": "Versjon",
 	"Version": "Versjon",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Version {{selectedVersion}} av {{totalVersions}}",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Version {{selectedVersion}} av {{totalVersions}}",
 	"View Replies": "Vis svar",
 	"View Replies": "Vis svar",

+ 6 - 0
src/lib/i18n/locales/nl-NL/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "Standaard Prompt Suggesties",
 	"Default Prompt Suggestions": "Standaard Prompt Suggesties",
 	"Default to 389 or 636 if TLS is enabled": "Standaard 389 of 636 als TLS is ingeschakeld",
 	"Default to 389 or 636 if TLS is enabled": "Standaard 389 of 636 als TLS is ingeschakeld",
 	"Default to ALL": "Standaar op ALL",
 	"Default to ALL": "Standaar op ALL",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "Standaard gebruikersrol",
 	"Default User Role": "Standaard gebruikersrol",
 	"Delete": "Verwijderen",
 	"Delete": "Verwijderen",
 	"Delete a model": "Verwijder een model",
 	"Delete a model": "Verwijder een model",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "Voeg `--api` vlag toe bij het uitvoeren van stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Voeg `--api` vlag toe bij het uitvoeren van stable-diffusion-webui",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Info",
 	"Info": "Info",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "Voer commando's in",
 	"Input commands": "Voer commando's in",
 	"Install from Github URL": "Installeren vanaf Github-URL",
 	"Install from Github URL": "Installeren vanaf Github-URL",
 	"Instant Auto-Send After Voice Transcription": "Direct automatisch verzenden na spraaktranscriptie",
 	"Instant Auto-Send After Voice Transcription": "Direct automatisch verzenden na spraaktranscriptie",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "Afgelopen 30 dagen",
 	"Previous 30 days": "Afgelopen 30 dagen",
 	"Previous 7 days": "Afgelopen 7 dagen",
 	"Previous 7 days": "Afgelopen 7 dagen",
+	"Private": "",
 	"Profile Image": "Profielafbeelding",
 	"Profile Image": "Profielafbeelding",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (bv. Vertel me een leuke gebeurtenis over het Romeinse Rijk)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (bv. Vertel me een leuke gebeurtenis over het Romeinse Rijk)",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "Prompt succesvol bijgewerkt",
 	"Prompt updated successfully": "Prompt succesvol bijgewerkt",
 	"Prompts": "Prompts",
 	"Prompts": "Prompts",
 	"Prompts Access": "Prompttoegang",
 	"Prompts Access": "Prompttoegang",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Haal \"{{searchValue}}\" uit Ollama.com",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Haal \"{{searchValue}}\" uit Ollama.com",
 	"Pull a model from Ollama.com": "Haal een model van Ollama.com",
 	"Pull a model from Ollama.com": "Haal een model van Ollama.com",
 	"Query Generation Prompt": "Vraaggeneratieprompt",
 	"Query Generation Prompt": "Vraaggeneratieprompt",
@@ -1009,6 +1013,7 @@
 	"Theme": "Thema",
 	"Theme": "Thema",
 	"Thinking...": "Aan het denken...",
 	"Thinking...": "Aan het denken...",
 	"This action cannot be undone. Do you wish to continue?": "Deze actie kan niet ongedaan worden gemaakt. Wilt u doorgaan?",
 	"This action cannot be undone. Do you wish to continue?": "Deze actie kan niet ongedaan worden gemaakt. Wilt u doorgaan?",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Dit zorgt ervoor dat je waardevolle gesprekken veilig worden opgeslagen in je backend database. Dank je wel!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Dit zorgt ervoor dat je waardevolle gesprekken veilig worden opgeslagen in je backend database. Dank je wel!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Dit is een experimentele functie, het kan functioneren zoals verwacht en kan op elk moment veranderen.",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Dit is een experimentele functie, het kan functioneren zoals verwacht en kan op elk moment veranderen.",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "Kleppen succesvol bijgewerkt",
 	"Valves updated successfully": "Kleppen succesvol bijgewerkt",
 	"variable": "variabele",
 	"variable": "variabele",
 	"variable to have them replaced with clipboard content.": "variabele om ze te laten vervangen door klembord inhoud.",
 	"variable to have them replaced with clipboard content.": "variabele om ze te laten vervangen door klembord inhoud.",
+	"Verify Connection": "",
 	"Version": "Versie",
 	"Version": "Versie",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Versie {{selectedVersion}} van {{totalVersions}}",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Versie {{selectedVersion}} van {{totalVersions}}",
 	"View Replies": "",
 	"View Replies": "",

+ 6 - 0
src/lib/i18n/locales/pa-IN/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "ਮੂਲ ਪ੍ਰੰਪਟ ਸੁਝਾਅ",
 	"Default Prompt Suggestions": "ਮੂਲ ਪ੍ਰੰਪਟ ਸੁਝਾਅ",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to ALL": "",
 	"Default to ALL": "",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "ਮੂਲ ਉਪਭੋਗਤਾ ਭੂਮਿਕਾ",
 	"Default User Role": "ਮੂਲ ਉਪਭੋਗਤਾ ਭੂਮਿਕਾ",
 	"Delete": "ਮਿਟਾਓ",
 	"Delete": "ਮਿਟਾਓ",
 	"Delete a model": "ਇੱਕ ਮਾਡਲ ਮਿਟਾਓ",
 	"Delete a model": "ਇੱਕ ਮਾਡਲ ਮਿਟਾਓ",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "ਸਟੇਬਲ-ਡਿਫਿਊਸ਼ਨ-ਵੈਬਯੂਆਈ ਚਲਾਉਣ ਸਮੇਂ `--api` ਝੰਡਾ ਸ਼ਾਮਲ ਕਰੋ",
 	"Include `--api` flag when running stable-diffusion-webui": "ਸਟੇਬਲ-ਡਿਫਿਊਸ਼ਨ-ਵੈਬਯੂਆਈ ਚਲਾਉਣ ਸਮੇਂ `--api` ਝੰਡਾ ਸ਼ਾਮਲ ਕਰੋ",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "ਜਾਣਕਾਰੀ",
 	"Info": "ਜਾਣਕਾਰੀ",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "ਇਨਪੁਟ ਕਮਾਂਡਾਂ",
 	"Input commands": "ਇਨਪੁਟ ਕਮਾਂਡਾਂ",
 	"Install from Github URL": "Github URL ਤੋਂ ਇੰਸਟਾਲ ਕਰੋ",
 	"Install from Github URL": "Github URL ਤੋਂ ਇੰਸਟਾਲ ਕਰੋ",
 	"Instant Auto-Send After Voice Transcription": "",
 	"Instant Auto-Send After Voice Transcription": "",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "ਪਿਛਲੇ 30 ਦਿਨ",
 	"Previous 30 days": "ਪਿਛਲੇ 30 ਦਿਨ",
 	"Previous 7 days": "ਪਿਛਲੇ 7 ਦਿਨ",
 	"Previous 7 days": "ਪਿਛਲੇ 7 ਦਿਨ",
+	"Private": "",
 	"Profile Image": "ਪ੍ਰੋਫਾਈਲ ਚਿੱਤਰ",
 	"Profile Image": "ਪ੍ਰੋਫਾਈਲ ਚਿੱਤਰ",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "ਪ੍ਰੰਪਟ (ਉਦਾਹਰਣ ਲਈ ਮੈਨੂੰ ਰੋਮਨ ਸਾਮਰਾਜ ਬਾਰੇ ਇੱਕ ਮਜ਼ੇਦਾਰ ਤੱਥ ਦੱਸੋ)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "ਪ੍ਰੰਪਟ (ਉਦਾਹਰਣ ਲਈ ਮੈਨੂੰ ਰੋਮਨ ਸਾਮਰਾਜ ਬਾਰੇ ਇੱਕ ਮਜ਼ੇਦਾਰ ਤੱਥ ਦੱਸੋ)",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "",
 	"Prompt updated successfully": "",
 	"Prompts": "ਪ੍ਰੰਪਟ",
 	"Prompts": "ਪ੍ਰੰਪਟ",
 	"Prompts Access": "",
 	"Prompts Access": "",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "ਓਲਾਮਾ.ਕਾਮ ਤੋਂ \"{{searchValue}}\" ਖਿੱਚੋ",
 	"Pull \"{{searchValue}}\" from Ollama.com": "ਓਲਾਮਾ.ਕਾਮ ਤੋਂ \"{{searchValue}}\" ਖਿੱਚੋ",
 	"Pull a model from Ollama.com": "ਓਲਾਮਾ.ਕਾਮ ਤੋਂ ਇੱਕ ਮਾਡਲ ਖਿੱਚੋ",
 	"Pull a model from Ollama.com": "ਓਲਾਮਾ.ਕਾਮ ਤੋਂ ਇੱਕ ਮਾਡਲ ਖਿੱਚੋ",
 	"Query Generation Prompt": "",
 	"Query Generation Prompt": "",
@@ -1009,6 +1013,7 @@
 	"Theme": "ਥੀਮ",
 	"Theme": "ਥੀਮ",
 	"Thinking...": "",
 	"Thinking...": "",
 	"This action cannot be undone. Do you wish to continue?": "",
 	"This action cannot be undone. Do you wish to continue?": "",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "ਇਹ ਯਕੀਨੀ ਬਣਾਉਂਦਾ ਹੈ ਕਿ ਤੁਹਾਡੀਆਂ ਕੀਮਤੀ ਗੱਲਾਂ ਤੁਹਾਡੇ ਬੈਕਐਂਡ ਡਾਟਾਬੇਸ ਵਿੱਚ ਸੁਰੱਖਿਅਤ ਤੌਰ 'ਤੇ ਸੰਭਾਲੀਆਂ ਗਈਆਂ ਹਨ। ਧੰਨਵਾਦ!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "ਇਹ ਯਕੀਨੀ ਬਣਾਉਂਦਾ ਹੈ ਕਿ ਤੁਹਾਡੀਆਂ ਕੀਮਤੀ ਗੱਲਾਂ ਤੁਹਾਡੇ ਬੈਕਐਂਡ ਡਾਟਾਬੇਸ ਵਿੱਚ ਸੁਰੱਖਿਅਤ ਤੌਰ 'ਤੇ ਸੰਭਾਲੀਆਂ ਗਈਆਂ ਹਨ। ਧੰਨਵਾਦ!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "",
 	"Valves updated successfully": "",
 	"variable": "ਵੈਰੀਏਬਲ",
 	"variable": "ਵੈਰੀਏਬਲ",
 	"variable to have them replaced with clipboard content.": "ਕਲਿੱਪਬੋਰਡ ਸਮੱਗਰੀ ਨਾਲ ਬਦਲਣ ਲਈ ਵੈਰੀਏਬਲ।",
 	"variable to have them replaced with clipboard content.": "ਕਲਿੱਪਬੋਰਡ ਸਮੱਗਰੀ ਨਾਲ ਬਦਲਣ ਲਈ ਵੈਰੀਏਬਲ।",
+	"Verify Connection": "",
 	"Version": "ਵਰਜਨ",
 	"Version": "ਵਰਜਨ",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"View Replies": "",
 	"View Replies": "",

+ 6 - 0
src/lib/i18n/locales/pl-PL/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "Domyślne propozycje wpisów",
 	"Default Prompt Suggestions": "Domyślne propozycje wpisów",
 	"Default to 389 or 636 if TLS is enabled": "Domyślnie użyj 389 lub 636, jeśli TLS jest włączony",
 	"Default to 389 or 636 if TLS is enabled": "Domyślnie użyj 389 lub 636, jeśli TLS jest włączony",
 	"Default to ALL": "Domyślne dla wszystkich",
 	"Default to ALL": "Domyślne dla wszystkich",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "Domyślna rola użytkownika",
 	"Default User Role": "Domyślna rola użytkownika",
 	"Delete": "Usuń",
 	"Delete": "Usuń",
 	"Delete a model": "Usuń model",
 	"Delete a model": "Usuń model",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "Użyj flagi `--api` podczas uruchamiania stable-diffusion-webui.",
 	"Include `--api` flag when running stable-diffusion-webui": "Użyj flagi `--api` podczas uruchamiania stable-diffusion-webui.",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Informacje",
 	"Info": "Informacje",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "Wprowadź polecenia",
 	"Input commands": "Wprowadź polecenia",
 	"Install from Github URL": "Instalacja z adresu URL serwisu Github",
 	"Install from Github URL": "Instalacja z adresu URL serwisu Github",
 	"Instant Auto-Send After Voice Transcription": "Automatyczne natychmiastowe wysyłanie po transkrypcji głosowej",
 	"Instant Auto-Send After Voice Transcription": "Automatyczne natychmiastowe wysyłanie po transkrypcji głosowej",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "Kara za obecność",
 	"Presence Penalty": "Kara za obecność",
 	"Previous 30 days": "Ostatnie 30 dni",
 	"Previous 30 days": "Ostatnie 30 dni",
 	"Previous 7 days": "Ostatnie 7 dni",
 	"Previous 7 days": "Ostatnie 7 dni",
+	"Private": "",
 	"Profile Image": "Zdjęcie profilowe",
 	"Profile Image": "Zdjęcie profilowe",
 	"Prompt": "Wprowadź podpowiedź: ",
 	"Prompt": "Wprowadź podpowiedź: ",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (np. podaj ciekawostkę o Imperium Rzymskim)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (np. podaj ciekawostkę o Imperium Rzymskim)",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "Podpowiedź została zaktualizowana pomyślnie.",
 	"Prompt updated successfully": "Podpowiedź została zaktualizowana pomyślnie.",
 	"Prompts": "Podpowiedzi",
 	"Prompts": "Podpowiedzi",
 	"Prompts Access": "Dostęp do podpowiedzi",
 	"Prompts Access": "Dostęp do podpowiedzi",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Pobierz \"{{searchValue}}\" z Ollama.com",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Pobierz \"{{searchValue}}\" z Ollama.com",
 	"Pull a model from Ollama.com": "Pobierz model z Ollama.com",
 	"Pull a model from Ollama.com": "Pobierz model z Ollama.com",
 	"Query Generation Prompt": "Podpowiedź do generowania zapytań",
 	"Query Generation Prompt": "Podpowiedź do generowania zapytań",
@@ -1009,6 +1013,7 @@
 	"Theme": "Motyw",
 	"Theme": "Motyw",
 	"Thinking...": "Myślę...",
 	"Thinking...": "Myślę...",
 	"This action cannot be undone. Do you wish to continue?": "Czy na pewno chcesz kontynuować? Ta akcja nie może zostać cofnięta.",
 	"This action cannot be undone. Do you wish to continue?": "Czy na pewno chcesz kontynuować? Ta akcja nie może zostać cofnięta.",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "To gwarantuje, że Twoje wartościowe rozmowy są bezpiecznie zapisywane w bazie danych backendowej. Dziękujemy!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "To gwarantuje, że Twoje wartościowe rozmowy są bezpiecznie zapisywane w bazie danych backendowej. Dziękujemy!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "To jest funkcja eksperymentalna, może nie działać zgodnie z oczekiwaniami i jest podatna na zmiany w dowolnym momencie.",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "To jest funkcja eksperymentalna, może nie działać zgodnie z oczekiwaniami i jest podatna na zmiany w dowolnym momencie.",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "Zawory zaktualizowane pomyślnie",
 	"Valves updated successfully": "Zawory zaktualizowane pomyślnie",
 	"variable": "zmienna",
 	"variable": "zmienna",
 	"variable to have them replaced with clipboard content.": "Zmienna, która ma zostać zastąpiona zawartością schowka.",
 	"variable to have them replaced with clipboard content.": "Zmienna, która ma zostać zastąpiona zawartością schowka.",
+	"Verify Connection": "",
 	"Version": "Wersja",
 	"Version": "Wersja",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Wersja {{selectedVersion}} z {{totalVersions}}",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Wersja {{selectedVersion}} z {{totalVersions}}",
 	"View Replies": "Wyświetl odpowiedzi",
 	"View Replies": "Wyświetl odpowiedzi",

+ 6 - 0
src/lib/i18n/locales/pt-BR/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "Sugestões de Prompt Padrão",
 	"Default Prompt Suggestions": "Sugestões de Prompt Padrão",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to ALL": "Padrão para TODOS",
 	"Default to ALL": "Padrão para TODOS",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "Padrão para novos usuários",
 	"Default User Role": "Padrão para novos usuários",
 	"Delete": "Excluir",
 	"Delete": "Excluir",
 	"Delete a model": "Excluir um modelo",
 	"Delete a model": "Excluir um modelo",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "Incluir a flag `--api` ao executar stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Incluir a flag `--api` ao executar stable-diffusion-webui",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Informação",
 	"Info": "Informação",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "Comandos de entrada",
 	"Input commands": "Comandos de entrada",
 	"Install from Github URL": "Instalar da URL do Github",
 	"Install from Github URL": "Instalar da URL do Github",
 	"Instant Auto-Send After Voice Transcription": "Envio Automático Instantâneo Após Transcrição de Voz",
 	"Instant Auto-Send After Voice Transcription": "Envio Automático Instantâneo Após Transcrição de Voz",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "Últimos 30 dias",
 	"Previous 30 days": "Últimos 30 dias",
 	"Previous 7 days": "Últimos 7 dias",
 	"Previous 7 days": "Últimos 7 dias",
+	"Private": "",
 	"Profile Image": "Imagem de Perfil",
 	"Profile Image": "Imagem de Perfil",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (por exemplo, Diga-me um fato divertido sobre o Império Romano)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (por exemplo, Diga-me um fato divertido sobre o Império Romano)",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "Prompt atualizado com sucesso",
 	"Prompt updated successfully": "Prompt atualizado com sucesso",
 	"Prompts": "Prompts",
 	"Prompts": "Prompts",
 	"Prompts Access": "Acessar prompts",
 	"Prompts Access": "Acessar prompts",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Obter \"{{searchValue}}\" de Ollama.com",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Obter \"{{searchValue}}\" de Ollama.com",
 	"Pull a model from Ollama.com": "Obter um modelo de Ollama.com",
 	"Pull a model from Ollama.com": "Obter um modelo de Ollama.com",
 	"Query Generation Prompt": "Prompt de Geração de Consulta",
 	"Query Generation Prompt": "Prompt de Geração de Consulta",
@@ -1009,6 +1013,7 @@
 	"Theme": "Tema",
 	"Theme": "Tema",
 	"Thinking...": "Pensando...",
 	"Thinking...": "Pensando...",
 	"This action cannot be undone. Do you wish to continue?": "Esta ação não pode ser desfeita. Você deseja continuar?",
 	"This action cannot be undone. Do you wish to continue?": "Esta ação não pode ser desfeita. Você deseja continuar?",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Isso garante que suas conversas valiosas sejam salvas com segurança no banco de dados do backend. Obrigado!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Isso garante que suas conversas valiosas sejam salvas com segurança no banco de dados do backend. Obrigado!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Esta é uma funcionalidade experimental, pode não funcionar como esperado e está sujeita a alterações a qualquer momento.",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Esta é uma funcionalidade experimental, pode não funcionar como esperado e está sujeita a alterações a qualquer momento.",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "Válvulas atualizadas com sucesso",
 	"Valves updated successfully": "Válvulas atualizadas com sucesso",
 	"variable": "variável",
 	"variable": "variável",
 	"variable to have them replaced with clipboard content.": "variável para ser substituída pelo conteúdo da área de transferência.",
 	"variable to have them replaced with clipboard content.": "variável para ser substituída pelo conteúdo da área de transferência.",
+	"Verify Connection": "",
 	"Version": "Versão",
 	"Version": "Versão",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Versão {{selectedVersion}} de {{totalVersions}}",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Versão {{selectedVersion}} de {{totalVersions}}",
 	"View Replies": "",
 	"View Replies": "",

+ 6 - 0
src/lib/i18n/locales/pt-PT/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "Sugestões de Prompt Padrão",
 	"Default Prompt Suggestions": "Sugestões de Prompt Padrão",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to ALL": "",
 	"Default to ALL": "",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "Função de Utilizador Padrão",
 	"Default User Role": "Função de Utilizador Padrão",
 	"Delete": "Apagar",
 	"Delete": "Apagar",
 	"Delete a model": "Apagar um modelo",
 	"Delete a model": "Apagar um modelo",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "Inclua a flag `--api` ao executar stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Inclua a flag `--api` ao executar stable-diffusion-webui",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Informação",
 	"Info": "Informação",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "Comandos de entrada",
 	"Input commands": "Comandos de entrada",
 	"Install from Github URL": "Instalar a partir do URL do Github",
 	"Install from Github URL": "Instalar a partir do URL do Github",
 	"Instant Auto-Send After Voice Transcription": "Enviar automaticamente depois da transcrição da voz",
 	"Instant Auto-Send After Voice Transcription": "Enviar automaticamente depois da transcrição da voz",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "Últimos 30 dias",
 	"Previous 30 days": "Últimos 30 dias",
 	"Previous 7 days": "Últimos 7 dias",
 	"Previous 7 days": "Últimos 7 dias",
+	"Private": "",
 	"Profile Image": "Imagem de Perfil",
 	"Profile Image": "Imagem de Perfil",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (ex.: Dê-me um facto divertido sobre o Império Romano)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (ex.: Dê-me um facto divertido sobre o Império Romano)",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "",
 	"Prompt updated successfully": "",
 	"Prompts": "Prompts",
 	"Prompts": "Prompts",
 	"Prompts Access": "",
 	"Prompts Access": "",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Puxar \"{{searchValue}}\" do Ollama.com",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Puxar \"{{searchValue}}\" do Ollama.com",
 	"Pull a model from Ollama.com": "Puxar um modelo do Ollama.com",
 	"Pull a model from Ollama.com": "Puxar um modelo do Ollama.com",
 	"Query Generation Prompt": "",
 	"Query Generation Prompt": "",
@@ -1009,6 +1013,7 @@
 	"Theme": "Tema",
 	"Theme": "Tema",
 	"Thinking...": "A pensar...",
 	"Thinking...": "A pensar...",
 	"This action cannot be undone. Do you wish to continue?": "",
 	"This action cannot be undone. Do you wish to continue?": "",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Isto garante que suas conversas valiosas sejam guardadas com segurança na sua base de dados de backend. Obrigado!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Isto garante que suas conversas valiosas sejam guardadas com segurança na sua base de dados de backend. Obrigado!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Isto é um recurso experimental, pode não funcionar conforme o esperado e está sujeito a alterações a qualquer momento.",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Isto é um recurso experimental, pode não funcionar conforme o esperado e está sujeito a alterações a qualquer momento.",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "",
 	"Valves updated successfully": "",
 	"variable": "variável",
 	"variable": "variável",
 	"variable to have them replaced with clipboard content.": "variável para que sejam substituídos pelo conteúdo da área de transferência.",
 	"variable to have them replaced with clipboard content.": "variável para que sejam substituídos pelo conteúdo da área de transferência.",
+	"Verify Connection": "",
 	"Version": "Versão",
 	"Version": "Versão",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"View Replies": "",
 	"View Replies": "",

+ 6 - 0
src/lib/i18n/locales/ro-RO/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "Sugestii de Prompt Implicite",
 	"Default Prompt Suggestions": "Sugestii de Prompt Implicite",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to ALL": "",
 	"Default to ALL": "",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "Rolul Implicit al Utilizatorului",
 	"Default User Role": "Rolul Implicit al Utilizatorului",
 	"Delete": "Șterge",
 	"Delete": "Șterge",
 	"Delete a model": "Șterge un model",
 	"Delete a model": "Șterge un model",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "Includeți flag-ul `--api` când rulați stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Includeți flag-ul `--api` când rulați stable-diffusion-webui",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Informații",
 	"Info": "Informații",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "Comenzi de intrare",
 	"Input commands": "Comenzi de intrare",
 	"Install from Github URL": "Instalează de la URL-ul Github",
 	"Install from Github URL": "Instalează de la URL-ul Github",
 	"Instant Auto-Send After Voice Transcription": "Trimitere Automată Instantanee După Transcrierea Vocii",
 	"Instant Auto-Send After Voice Transcription": "Trimitere Automată Instantanee După Transcrierea Vocii",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "Ultimele 30 de zile",
 	"Previous 30 days": "Ultimele 30 de zile",
 	"Previous 7 days": "Ultimele 7 zile",
 	"Previous 7 days": "Ultimele 7 zile",
+	"Private": "",
 	"Profile Image": "Imagine de Profil",
 	"Profile Image": "Imagine de Profil",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (de ex. Spune-mi un fapt amuzant despre Imperiul Roman)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (de ex. Spune-mi un fapt amuzant despre Imperiul Roman)",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "",
 	"Prompt updated successfully": "",
 	"Prompts": "Prompturi",
 	"Prompts": "Prompturi",
 	"Prompts Access": "",
 	"Prompts Access": "",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Extrage \"{{searchValue}}\" de pe Ollama.com",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Extrage \"{{searchValue}}\" de pe Ollama.com",
 	"Pull a model from Ollama.com": "Extrage un model de pe Ollama.com",
 	"Pull a model from Ollama.com": "Extrage un model de pe Ollama.com",
 	"Query Generation Prompt": "",
 	"Query Generation Prompt": "",
@@ -1009,6 +1013,7 @@
 	"Theme": "Temă",
 	"Theme": "Temă",
 	"Thinking...": "Gândește...",
 	"Thinking...": "Gândește...",
 	"This action cannot be undone. Do you wish to continue?": "Această acțiune nu poate fi anulată. Doriți să continuați?",
 	"This action cannot be undone. Do you wish to continue?": "Această acțiune nu poate fi anulată. Doriți să continuați?",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Acest lucru asigură că conversațiile dvs. valoroase sunt salvate în siguranță în baza de date a backend-ului dvs. Mulțumim!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Acest lucru asigură că conversațiile dvs. valoroase sunt salvate în siguranță în baza de date a backend-ului dvs. Mulțumim!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Aceasta este o funcție experimentală, poate să nu funcționeze așa cum vă așteptați și este supusă schimbării în orice moment.",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Aceasta este o funcție experimentală, poate să nu funcționeze așa cum vă așteptați și este supusă schimbării în orice moment.",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "Valve actualizate cu succes",
 	"Valves updated successfully": "Valve actualizate cu succes",
 	"variable": "variabilă",
 	"variable": "variabilă",
 	"variable to have them replaced with clipboard content.": "variabilă pentru a fi înlocuite cu conținutul clipboard-ului.",
 	"variable to have them replaced with clipboard content.": "variabilă pentru a fi înlocuite cu conținutul clipboard-ului.",
+	"Verify Connection": "",
 	"Version": "Versiune",
 	"Version": "Versiune",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Versiunea {{selectedVersion}} din {{totalVersions}}",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Versiunea {{selectedVersion}} din {{totalVersions}}",
 	"View Replies": "Vezi răspunsurile",
 	"View Replies": "Vezi răspunsurile",

+ 6 - 0
src/lib/i18n/locales/ru-RU/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "Предложения промптов по умолчанию",
 	"Default Prompt Suggestions": "Предложения промптов по умолчанию",
 	"Default to 389 or 636 if TLS is enabled": "По умолчанию 389 или 636, если TLS включен.",
 	"Default to 389 or 636 if TLS is enabled": "По умолчанию 389 или 636, если TLS включен.",
 	"Default to ALL": "По умолчанию ВСЕ",
 	"Default to ALL": "По умолчанию ВСЕ",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "Роль пользователя по умолчанию",
 	"Default User Role": "Роль пользователя по умолчанию",
 	"Delete": "Удалить",
 	"Delete": "Удалить",
 	"Delete a model": "Удалить модель",
 	"Delete a model": "Удалить модель",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "Добавьте флаг `--api` при запуске stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Добавьте флаг `--api` при запуске stable-diffusion-webui",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Информация",
 	"Info": "Информация",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "Введите команды",
 	"Input commands": "Введите команды",
 	"Install from Github URL": "Установка с URL-адреса Github",
 	"Install from Github URL": "Установка с URL-адреса Github",
 	"Instant Auto-Send After Voice Transcription": "Мгновенная автоматическая отправка после расшифровки голоса",
 	"Instant Auto-Send After Voice Transcription": "Мгновенная автоматическая отправка после расшифровки голоса",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "Предыдущие 30 дней",
 	"Previous 30 days": "Предыдущие 30 дней",
 	"Previous 7 days": "Предыдущие 7 дней",
 	"Previous 7 days": "Предыдущие 7 дней",
+	"Private": "",
 	"Profile Image": "Изображение профиля",
 	"Profile Image": "Изображение профиля",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Промпт (например, Расскажи мне интересный факт о Римской империи)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Промпт (например, Расскажи мне интересный факт о Римской империи)",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "",
 	"Prompt updated successfully": "",
 	"Prompts": "Промпты",
 	"Prompts": "Промпты",
 	"Prompts Access": "",
 	"Prompts Access": "",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Загрузить \"{{searchValue}}\" с Ollama.com",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Загрузить \"{{searchValue}}\" с Ollama.com",
 	"Pull a model from Ollama.com": "Загрузить модель с Ollama.com",
 	"Pull a model from Ollama.com": "Загрузить модель с Ollama.com",
 	"Query Generation Prompt": "",
 	"Query Generation Prompt": "",
@@ -1009,6 +1013,7 @@
 	"Theme": "Тема",
 	"Theme": "Тема",
 	"Thinking...": "Думаю...",
 	"Thinking...": "Думаю...",
 	"This action cannot be undone. Do you wish to continue?": "Это действие нельзя отменить. Вы хотите продолжить?",
 	"This action cannot be undone. Do you wish to continue?": "Это действие нельзя отменить. Вы хотите продолжить?",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Это обеспечивает сохранение ваших ценных разговоров в безопасной базе данных на вашем сервере. Спасибо!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Это обеспечивает сохранение ваших ценных разговоров в безопасной базе данных на вашем сервере. Спасибо!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Это экспериментальная функция, она может работать не так, как ожидалось, и может быть изменена в любое время.",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Это экспериментальная функция, она может работать не так, как ожидалось, и может быть изменена в любое время.",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "Вентили успешно обновлены",
 	"Valves updated successfully": "Вентили успешно обновлены",
 	"variable": "переменная",
 	"variable": "переменная",
 	"variable to have them replaced with clipboard content.": "переменную, чтобы заменить их содержимым буфера обмена.",
 	"variable to have them replaced with clipboard content.": "переменную, чтобы заменить их содержимым буфера обмена.",
+	"Verify Connection": "",
 	"Version": "Версия",
 	"Version": "Версия",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"View Replies": "",
 	"View Replies": "",

+ 6 - 0
src/lib/i18n/locales/sk-SK/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "Predvolené návrhy promptov",
 	"Default Prompt Suggestions": "Predvolené návrhy promptov",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to ALL": "",
 	"Default to ALL": "",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "Predvolená rola užívateľa",
 	"Default User Role": "Predvolená rola užívateľa",
 	"Delete": "Odstrániť",
 	"Delete": "Odstrániť",
 	"Delete a model": "Odstrániť model.",
 	"Delete a model": "Odstrániť model.",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "Pri spustení stable-diffusion-webui zahrňte príznak `--api`.",
 	"Include `--api` flag when running stable-diffusion-webui": "Pri spustení stable-diffusion-webui zahrňte príznak `--api`.",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Info",
 	"Info": "Info",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "Vstupné príkazy",
 	"Input commands": "Vstupné príkazy",
 	"Install from Github URL": "Inštalácia z URL adresy Githubu",
 	"Install from Github URL": "Inštalácia z URL adresy Githubu",
 	"Instant Auto-Send After Voice Transcription": "Okamžité automatické odoslanie po prepisu hlasu",
 	"Instant Auto-Send After Voice Transcription": "Okamžité automatické odoslanie po prepisu hlasu",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "Predchádzajúcich 30 dní",
 	"Previous 30 days": "Predchádzajúcich 30 dní",
 	"Previous 7 days": "Predchádzajúcich 7 dní",
 	"Previous 7 days": "Predchádzajúcich 7 dní",
+	"Private": "",
 	"Profile Image": "Profilový obrázok",
 	"Profile Image": "Profilový obrázok",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (napr. Povedz mi zábavnú skutočnosť o Rímskej ríši)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (napr. Povedz mi zábavnú skutočnosť o Rímskej ríši)",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "",
 	"Prompt updated successfully": "",
 	"Prompts": "Prompty",
 	"Prompts": "Prompty",
 	"Prompts Access": "",
 	"Prompts Access": "",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Stiahnite \"{{searchValue}}\" z Ollama.com",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Stiahnite \"{{searchValue}}\" z Ollama.com",
 	"Pull a model from Ollama.com": "Stiahnite model z Ollama.com",
 	"Pull a model from Ollama.com": "Stiahnite model z Ollama.com",
 	"Query Generation Prompt": "",
 	"Query Generation Prompt": "",
@@ -1009,6 +1013,7 @@
 	"Theme": "Téma",
 	"Theme": "Téma",
 	"Thinking...": "Premýšľam...",
 	"Thinking...": "Premýšľam...",
 	"This action cannot be undone. Do you wish to continue?": "Túto akciu nie je možné vrátiť späť. Prajete si pokračovať?",
 	"This action cannot be undone. Do you wish to continue?": "Túto akciu nie je možné vrátiť späť. Prajete si pokračovať?",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Týmto je zaistené, že vaše cenné konverzácie sú bezpečne uložené vo vašej backendovej databáze. Ďakujeme!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Týmto je zaistené, že vaše cenné konverzácie sú bezpečne uložené vo vašej backendovej databáze. Ďakujeme!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Toto je experimentálna funkcia, nemusí fungovať podľa očakávania a môže byť kedykoľvek zmenená.",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Toto je experimentálna funkcia, nemusí fungovať podľa očakávania a môže byť kedykoľvek zmenená.",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "Ventily boli úspešne aktualizované.",
 	"Valves updated successfully": "Ventily boli úspešne aktualizované.",
 	"variable": "premenná",
 	"variable": "premenná",
 	"variable to have them replaced with clipboard content.": "premennú, aby bol ich obsah nahradený obsahom schránky.",
 	"variable to have them replaced with clipboard content.": "premennú, aby bol ich obsah nahradený obsahom schránky.",
+	"Verify Connection": "",
 	"Version": "Verzia",
 	"Version": "Verzia",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Verzia {{selectedVersion}} z {{totalVersions}}",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Verzia {{selectedVersion}} z {{totalVersions}}",
 	"View Replies": "",
 	"View Replies": "",

+ 6 - 0
src/lib/i18n/locales/sr-RS/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "Подразумевани предлози упита",
 	"Default Prompt Suggestions": "Подразумевани предлози упита",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to ALL": "",
 	"Default to ALL": "",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "Подразумевана улога корисника",
 	"Default User Role": "Подразумевана улога корисника",
 	"Delete": "Обриши",
 	"Delete": "Обриши",
 	"Delete a model": "Обриши модел",
 	"Delete a model": "Обриши модел",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "Укључи `--api` заставицу при покретању stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Укључи `--api` заставицу при покретању stable-diffusion-webui",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Инфо",
 	"Info": "Инфо",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "Унеси наредбе",
 	"Input commands": "Унеси наредбе",
 	"Install from Github URL": "Инсталирај из Гитхуб УРЛ адресе",
 	"Install from Github URL": "Инсталирај из Гитхуб УРЛ адресе",
 	"Instant Auto-Send After Voice Transcription": "",
 	"Instant Auto-Send After Voice Transcription": "",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "Претходних 30 дана",
 	"Previous 30 days": "Претходних 30 дана",
 	"Previous 7 days": "Претходних 7 дана",
 	"Previous 7 days": "Претходних 7 дана",
+	"Private": "",
 	"Profile Image": "Слика профила",
 	"Profile Image": "Слика профила",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Упит (нпр. „подели занимљивост о Римском царству“)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Упит (нпр. „подели занимљивост о Римском царству“)",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "Упит измењен успешно",
 	"Prompt updated successfully": "Упит измењен успешно",
 	"Prompts": "Упити",
 	"Prompts": "Упити",
 	"Prompts Access": "Приступ упитима",
 	"Prompts Access": "Приступ упитима",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Повуците \"{{searchValue}}\" са Ollama.com",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Повуците \"{{searchValue}}\" са Ollama.com",
 	"Pull a model from Ollama.com": "Повуците модел са Ollama.com",
 	"Pull a model from Ollama.com": "Повуците модел са Ollama.com",
 	"Query Generation Prompt": "",
 	"Query Generation Prompt": "",
@@ -1009,6 +1013,7 @@
 	"Theme": "Тема",
 	"Theme": "Тема",
 	"Thinking...": "Размишљам...",
 	"Thinking...": "Размишљам...",
 	"This action cannot be undone. Do you wish to continue?": "Ова радња се не може опозвати. Да ли желите наставити?",
 	"This action cannot be undone. Do you wish to continue?": "Ова радња се не може опозвати. Да ли желите наставити?",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Ово осигурава да су ваши вредни разговори безбедно сачувани у вашој бекенд бази података. Хвала вам!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Ово осигурава да су ваши вредни разговори безбедно сачувани у вашој бекенд бази података. Хвала вам!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "Вентили успешно ажурирани",
 	"Valves updated successfully": "Вентили успешно ажурирани",
 	"variable": "променљива",
 	"variable": "променљива",
 	"variable to have them replaced with clipboard content.": "променљива за замену са садржајем оставе.",
 	"variable to have them replaced with clipboard content.": "променљива за замену са садржајем оставе.",
+	"Verify Connection": "",
 	"Version": "Издање",
 	"Version": "Издање",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"View Replies": "Погледај одговоре",
 	"View Replies": "Погледај одговоре",

+ 6 - 0
src/lib/i18n/locales/sv-SE/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "Standardinstruktionsförslag",
 	"Default Prompt Suggestions": "Standardinstruktionsförslag",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to ALL": "",
 	"Default to ALL": "",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "Standardanvändarroll",
 	"Default User Role": "Standardanvändarroll",
 	"Delete": "Radera",
 	"Delete": "Radera",
 	"Delete a model": "Ta bort en modell",
 	"Delete a model": "Ta bort en modell",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "Inkludera flaggan `--api` när du kör stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Inkludera flaggan `--api` när du kör stable-diffusion-webui",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Information",
 	"Info": "Information",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "Indatakommandon",
 	"Input commands": "Indatakommandon",
 	"Install from Github URL": "Installera från Github-URL",
 	"Install from Github URL": "Installera från Github-URL",
 	"Instant Auto-Send After Voice Transcription": "Skicka automatiskt efter rösttranskribering",
 	"Instant Auto-Send After Voice Transcription": "Skicka automatiskt efter rösttranskribering",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "Föregående 30 dagar",
 	"Previous 30 days": "Föregående 30 dagar",
 	"Previous 7 days": "Föregående 7 dagar",
 	"Previous 7 days": "Föregående 7 dagar",
+	"Private": "",
 	"Profile Image": "Profilbild",
 	"Profile Image": "Profilbild",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Instruktion (t.ex. Berätta en kuriosa om Romerska Imperiet)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Instruktion (t.ex. Berätta en kuriosa om Romerska Imperiet)",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "",
 	"Prompt updated successfully": "",
 	"Prompts": "Instruktioner",
 	"Prompts": "Instruktioner",
 	"Prompts Access": "",
 	"Prompts Access": "",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Ladda ner \"{{searchValue}}\" från Ollama.com",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Ladda ner \"{{searchValue}}\" från Ollama.com",
 	"Pull a model from Ollama.com": "Ladda ner en modell från Ollama.com",
 	"Pull a model from Ollama.com": "Ladda ner en modell från Ollama.com",
 	"Query Generation Prompt": "",
 	"Query Generation Prompt": "",
@@ -1009,6 +1013,7 @@
 	"Theme": "Tema",
 	"Theme": "Tema",
 	"Thinking...": "Tänker...",
 	"Thinking...": "Tänker...",
 	"This action cannot be undone. Do you wish to continue?": "Denna åtgärd kan inte ångras. Vill du fortsätta?",
 	"This action cannot be undone. Do you wish to continue?": "Denna åtgärd kan inte ångras. Vill du fortsätta?",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Detta säkerställer att dina värdefulla samtal sparas säkert till din backend-databas. Tack!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Detta säkerställer att dina värdefulla samtal sparas säkert till din backend-databas. Tack!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Detta är en experimentell funktion som kanske inte fungerar som förväntat och som kan komma att ändras när som helst.",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Detta är en experimentell funktion som kanske inte fungerar som förväntat och som kan komma att ändras när som helst.",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "",
 	"Valves updated successfully": "",
 	"variable": "variabel",
 	"variable": "variabel",
 	"variable to have them replaced with clipboard content.": "variabel för att få dem ersatta med urklippsinnehåll.",
 	"variable to have them replaced with clipboard content.": "variabel för att få dem ersatta med urklippsinnehåll.",
+	"Verify Connection": "",
 	"Version": "Version",
 	"Version": "Version",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Version {{selectedVersion}} av {{totalVersions}}",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Version {{selectedVersion}} av {{totalVersions}}",
 	"View Replies": "Se svar",
 	"View Replies": "Se svar",

+ 6 - 0
src/lib/i18n/locales/th-TH/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "คำแนะนำพรอมต์ค่าเริ่มต้น",
 	"Default Prompt Suggestions": "คำแนะนำพรอมต์ค่าเริ่มต้น",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to ALL": "",
 	"Default to ALL": "",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "บทบาทผู้ใช้ค่าเริ่มต้น",
 	"Default User Role": "บทบาทผู้ใช้ค่าเริ่มต้น",
 	"Delete": "ลบ",
 	"Delete": "ลบ",
 	"Delete a model": "ลบโมเดล",
 	"Delete a model": "ลบโมเดล",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "รวมแฟลก `--api` เมื่อเรียกใช้ stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "รวมแฟลก `--api` เมื่อเรียกใช้ stable-diffusion-webui",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "ข้อมูล",
 	"Info": "ข้อมูล",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "คำสั่งป้อนข้อมูล",
 	"Input commands": "คำสั่งป้อนข้อมูล",
 	"Install from Github URL": "ติดตั้งจาก URL ของ Github",
 	"Install from Github URL": "ติดตั้งจาก URL ของ Github",
 	"Instant Auto-Send After Voice Transcription": "ส่งอัตโนมัติทันทีหลังจากการถอดเสียง",
 	"Instant Auto-Send After Voice Transcription": "ส่งอัตโนมัติทันทีหลังจากการถอดเสียง",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "30 วันที่ผ่านมา",
 	"Previous 30 days": "30 วันที่ผ่านมา",
 	"Previous 7 days": "7 วันที่ผ่านมา",
 	"Previous 7 days": "7 วันที่ผ่านมา",
+	"Private": "",
 	"Profile Image": "รูปโปรไฟล์",
 	"Profile Image": "รูปโปรไฟล์",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "พรอมต์ (เช่น บอกข้อเท็จจริงที่น่าสนุกเกี่ยวกับจักรวรรดิโรมัน)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "พรอมต์ (เช่น บอกข้อเท็จจริงที่น่าสนุกเกี่ยวกับจักรวรรดิโรมัน)",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "",
 	"Prompt updated successfully": "",
 	"Prompts": "พรอมต์",
 	"Prompts": "พรอมต์",
 	"Prompts Access": "",
 	"Prompts Access": "",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "",
 	"Pull a model from Ollama.com": "",
 	"Pull a model from Ollama.com": "",
 	"Query Generation Prompt": "",
 	"Query Generation Prompt": "",
@@ -1009,6 +1013,7 @@
 	"Theme": "ธีม",
 	"Theme": "ธีม",
 	"Thinking...": "กำลังคิด...",
 	"Thinking...": "กำลังคิด...",
 	"This action cannot be undone. Do you wish to continue?": "การกระทำนี้ไม่สามารถย้อนกลับได้ คุณต้องการดำเนินการต่อหรือไม่?",
 	"This action cannot be undone. Do you wish to continue?": "การกระทำนี้ไม่สามารถย้อนกลับได้ คุณต้องการดำเนินการต่อหรือไม่?",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "สิ่งนี้ทำให้มั่นใจได้ว่าการสนทนาที่มีค่าของคุณจะถูกบันทึกอย่างปลอดภัยในฐานข้อมูลแบ็กเอนด์ของคุณ ขอบคุณ!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "สิ่งนี้ทำให้มั่นใจได้ว่าการสนทนาที่มีค่าของคุณจะถูกบันทึกอย่างปลอดภัยในฐานข้อมูลแบ็กเอนด์ของคุณ ขอบคุณ!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "นี่เป็นฟีเจอร์ทดลอง อาจไม่ทำงานตามที่คาดไว้และอาจมีการเปลี่ยนแปลงได้ตลอดเวลา",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "นี่เป็นฟีเจอร์ทดลอง อาจไม่ทำงานตามที่คาดไว้และอาจมีการเปลี่ยนแปลงได้ตลอดเวลา",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "อัปเดตวาล์วเรียบร้อยแล้ว",
 	"Valves updated successfully": "อัปเดตวาล์วเรียบร้อยแล้ว",
 	"variable": "ตัวแปร",
 	"variable": "ตัวแปร",
 	"variable to have them replaced with clipboard content.": "ตัวแปรเพื่อให้แทนที่ด้วยเนื้อหาคลิปบอร์ด",
 	"variable to have them replaced with clipboard content.": "ตัวแปรเพื่อให้แทนที่ด้วยเนื้อหาคลิปบอร์ด",
+	"Verify Connection": "",
 	"Version": "เวอร์ชัน",
 	"Version": "เวอร์ชัน",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"View Replies": "",
 	"View Replies": "",

+ 6 - 0
src/lib/i18n/locales/tk-TW/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "",
 	"Default Prompt Suggestions": "",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to ALL": "",
 	"Default to ALL": "",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "",
 	"Default User Role": "",
 	"Delete": "",
 	"Delete": "",
 	"Delete a model": "",
 	"Delete a model": "",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "",
 	"Include `--api` flag when running stable-diffusion-webui": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "",
 	"Info": "",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "",
 	"Input commands": "",
 	"Install from Github URL": "",
 	"Install from Github URL": "",
 	"Instant Auto-Send After Voice Transcription": "",
 	"Instant Auto-Send After Voice Transcription": "",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "",
 	"Previous 30 days": "",
 	"Previous 7 days": "",
 	"Previous 7 days": "",
+	"Private": "",
 	"Profile Image": "",
 	"Profile Image": "",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "",
 	"Prompt updated successfully": "",
 	"Prompts": "",
 	"Prompts": "",
 	"Prompts Access": "",
 	"Prompts Access": "",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "",
 	"Pull a model from Ollama.com": "",
 	"Pull a model from Ollama.com": "",
 	"Query Generation Prompt": "",
 	"Query Generation Prompt": "",
@@ -1009,6 +1013,7 @@
 	"Theme": "",
 	"Theme": "",
 	"Thinking...": "",
 	"Thinking...": "",
 	"This action cannot be undone. Do you wish to continue?": "",
 	"This action cannot be undone. Do you wish to continue?": "",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "",
 	"Valves updated successfully": "",
 	"variable": "",
 	"variable": "",
 	"variable to have them replaced with clipboard content.": "",
 	"variable to have them replaced with clipboard content.": "",
+	"Verify Connection": "",
 	"Version": "",
 	"Version": "",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"View Replies": "",
 	"View Replies": "",

+ 6 - 0
src/lib/i18n/locales/tr-TR/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "Varsayılan Prompt Önerileri",
 	"Default Prompt Suggestions": "Varsayılan Prompt Önerileri",
 	"Default to 389 or 636 if TLS is enabled": "TLS etkinse 389 veya 636'ya varsayılan olarak",
 	"Default to 389 or 636 if TLS is enabled": "TLS etkinse 389 veya 636'ya varsayılan olarak",
 	"Default to ALL": "TÜMÜ'nü varsayılan olarak",
 	"Default to ALL": "TÜMÜ'nü varsayılan olarak",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "Varsayılan Kullanıcı Rolü",
 	"Default User Role": "Varsayılan Kullanıcı Rolü",
 	"Delete": "Sil",
 	"Delete": "Sil",
 	"Delete a model": "Bir modeli sil",
 	"Delete a model": "Bir modeli sil",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "stable-diffusion-webui çalıştırılırken `--api` bayrağını dahil edin",
 	"Include `--api` flag when running stable-diffusion-webui": "stable-diffusion-webui çalıştırılırken `--api` bayrağını dahil edin",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Bilgi",
 	"Info": "Bilgi",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "Giriş komutları",
 	"Input commands": "Giriş komutları",
 	"Install from Github URL": "Github URL'sinden yükleyin",
 	"Install from Github URL": "Github URL'sinden yükleyin",
 	"Instant Auto-Send After Voice Transcription": "Ses Transkripsiyonundan Sonra Anında Otomatik Gönder",
 	"Instant Auto-Send After Voice Transcription": "Ses Transkripsiyonundan Sonra Anında Otomatik Gönder",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "Önceki 30 gün",
 	"Previous 30 days": "Önceki 30 gün",
 	"Previous 7 days": "Önceki 7 gün",
 	"Previous 7 days": "Önceki 7 gün",
+	"Private": "",
 	"Profile Image": "Profil Fotoğrafı",
 	"Profile Image": "Profil Fotoğrafı",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (örn. Roma İmparatorluğu hakkında ilginç bir bilgi verin)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (örn. Roma İmparatorluğu hakkında ilginç bir bilgi verin)",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "Prompt başarıyla güncellendi",
 	"Prompt updated successfully": "Prompt başarıyla güncellendi",
 	"Prompts": "Promptlar",
 	"Prompts": "Promptlar",
 	"Prompts Access": "Promptlara Erişim",
 	"Prompts Access": "Promptlara Erişim",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Ollama.com'dan \"{{searchValue}}\" çekin",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Ollama.com'dan \"{{searchValue}}\" çekin",
 	"Pull a model from Ollama.com": "Ollama.com'dan bir model çekin",
 	"Pull a model from Ollama.com": "Ollama.com'dan bir model çekin",
 	"Query Generation Prompt": "Sorgu Oluşturma Promptu",
 	"Query Generation Prompt": "Sorgu Oluşturma Promptu",
@@ -1009,6 +1013,7 @@
 	"Theme": "Tema",
 	"Theme": "Tema",
 	"Thinking...": "Düşünüyor...",
 	"Thinking...": "Düşünüyor...",
 	"This action cannot be undone. Do you wish to continue?": "Bu eylem geri alınamaz. Devam etmek istiyor musunuz?",
 	"This action cannot be undone. Do you wish to continue?": "Bu eylem geri alınamaz. Devam etmek istiyor musunuz?",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Bu, önemli konuşmalarınızın güvenli bir şekilde arkayüz veritabanınıza kaydedildiğini garantiler. Teşekkür ederiz!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Bu, önemli konuşmalarınızın güvenli bir şekilde arkayüz veritabanınıza kaydedildiğini garantiler. Teşekkür ederiz!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Bu deneysel bir özelliktir, beklendiği gibi çalışmayabilir ve her an değişiklik yapılabilir.",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Bu deneysel bir özelliktir, beklendiği gibi çalışmayabilir ve her an değişiklik yapılabilir.",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "Valvler başarıyla güncellendi",
 	"Valves updated successfully": "Valvler başarıyla güncellendi",
 	"variable": "değişken",
 	"variable": "değişken",
 	"variable to have them replaced with clipboard content.": "panodaki içerikle değiştirilmesi için değişken.",
 	"variable to have them replaced with clipboard content.": "panodaki içerikle değiştirilmesi için değişken.",
+	"Verify Connection": "",
 	"Version": "Sürüm",
 	"Version": "Sürüm",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Sürüm {{selectedVersion}} / {{totalVersions}}",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Sürüm {{selectedVersion}} / {{totalVersions}}",
 	"View Replies": "Yanıtları Görüntüle",
 	"View Replies": "Yanıtları Görüntüle",

+ 6 - 0
src/lib/i18n/locales/uk-UA/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "Пропозиції промтів замовчуванням",
 	"Default Prompt Suggestions": "Пропозиції промтів замовчуванням",
 	"Default to 389 or 636 if TLS is enabled": "За замовчуванням використовується 389 або 636, якщо TLS увімкнено.",
 	"Default to 389 or 636 if TLS is enabled": "За замовчуванням використовується 389 або 636, якщо TLS увімкнено.",
 	"Default to ALL": "За замовчуванням — ВСІ.",
 	"Default to ALL": "За замовчуванням — ВСІ.",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "Роль користувача за замовчуванням",
 	"Default User Role": "Роль користувача за замовчуванням",
 	"Delete": "Видалити",
 	"Delete": "Видалити",
 	"Delete a model": "Видалити модель",
 	"Delete a model": "Видалити модель",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "Включіть прапор `--api` при запуску stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Включіть прапор `--api` при запуску stable-diffusion-webui",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Інфо",
 	"Info": "Інфо",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "Команди вводу",
 	"Input commands": "Команди вводу",
 	"Install from Github URL": "Встановіть з URL-адреси Github",
 	"Install from Github URL": "Встановіть з URL-адреси Github",
 	"Instant Auto-Send After Voice Transcription": "Миттєва автоматична відправка після транскрипції голосу",
 	"Instant Auto-Send After Voice Transcription": "Миттєва автоматична відправка після транскрипції голосу",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "Попередні 30 днів",
 	"Previous 30 days": "Попередні 30 днів",
 	"Previous 7 days": "Попередні 7 днів",
 	"Previous 7 days": "Попередні 7 днів",
+	"Private": "",
 	"Profile Image": "Зображення профілю",
 	"Profile Image": "Зображення профілю",
 	"Prompt": "Підказка",
 	"Prompt": "Підказка",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Підказка (напр., розкажіть мені цікавий факт про Римську імперію)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Підказка (напр., розкажіть мені цікавий факт про Римську імперію)",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "Підказку успішно оновлено",
 	"Prompt updated successfully": "Підказку успішно оновлено",
 	"Prompts": "Промти",
 	"Prompts": "Промти",
 	"Prompts Access": "Доступ до підказок",
 	"Prompts Access": "Доступ до підказок",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Завантажити \"{{searchValue}}\" з Ollama.com",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Завантажити \"{{searchValue}}\" з Ollama.com",
 	"Pull a model from Ollama.com": "Завантажити модель з Ollama.com",
 	"Pull a model from Ollama.com": "Завантажити модель з Ollama.com",
 	"Query Generation Prompt": "Підказка для генерації запиту",
 	"Query Generation Prompt": "Підказка для генерації запиту",
@@ -1009,6 +1013,7 @@
 	"Theme": "Тема",
 	"Theme": "Тема",
 	"Thinking...": "Думаю...",
 	"Thinking...": "Думаю...",
 	"This action cannot be undone. Do you wish to continue?": "Цю дію не можна скасувати. Ви бажаєте продовжити?",
 	"This action cannot be undone. Do you wish to continue?": "Цю дію не можна скасувати. Ви бажаєте продовжити?",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Це забезпечує збереження ваших цінних розмов у безпечному бекенд-сховищі. Дякуємо!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Це забезпечує збереження ваших цінних розмов у безпечному бекенд-сховищі. Дякуємо!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Це експериментальна функція, вона може працювати не так, як очікувалося, і може бути змінена в будь-який час.",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Це експериментальна функція, вона може працювати не так, як очікувалося, і може бути змінена в будь-який час.",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "Клапани успішно оновлено",
 	"Valves updated successfully": "Клапани успішно оновлено",
 	"variable": "змінна",
 	"variable": "змінна",
 	"variable to have them replaced with clipboard content.": "змінна, щоб замінити їх вмістом буфера обміну.",
 	"variable to have them replaced with clipboard content.": "змінна, щоб замінити їх вмістом буфера обміну.",
+	"Verify Connection": "",
 	"Version": "Версія",
 	"Version": "Версія",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Версія {{selectedVersion}} з {{totalVersions}}",
 	"Version {{selectedVersion}} of {{totalVersions}}": "Версія {{selectedVersion}} з {{totalVersions}}",
 	"View Replies": "Переглянути відповіді",
 	"View Replies": "Переглянути відповіді",

+ 6 - 0
src/lib/i18n/locales/ur-PK/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "ڈیفالٹ پرامپٹ تجاویز",
 	"Default Prompt Suggestions": "ڈیفالٹ پرامپٹ تجاویز",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to ALL": "",
 	"Default to ALL": "",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "ڈیفالٹ صارف کا کردار",
 	"Default User Role": "ڈیفالٹ صارف کا کردار",
 	"Delete": "حذف کریں",
 	"Delete": "حذف کریں",
 	"Delete a model": "ایک ماڈل حذف کریں",
 	"Delete a model": "ایک ماڈل حذف کریں",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "اسٹیبل-ڈیفیوژن-ویب یو آئی چلانے کے دوران `--api` فلیگ شامل کریں",
 	"Include `--api` flag when running stable-diffusion-webui": "اسٹیبل-ڈیفیوژن-ویب یو آئی چلانے کے دوران `--api` فلیگ شامل کریں",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "معلومات",
 	"Info": "معلومات",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "کمانڈز داخل کریں",
 	"Input commands": "کمانڈز داخل کریں",
 	"Install from Github URL": "گِٹ حب یو آر ایل سے انسٹال کریں",
 	"Install from Github URL": "گِٹ حب یو آر ایل سے انسٹال کریں",
 	"Instant Auto-Send After Voice Transcription": "آواز کی نقل کے بعد فوری خودکار بھیجنا",
 	"Instant Auto-Send After Voice Transcription": "آواز کی نقل کے بعد فوری خودکار بھیجنا",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "پچھلے 30 دن",
 	"Previous 30 days": "پچھلے 30 دن",
 	"Previous 7 days": "پچھلے 7 دن",
 	"Previous 7 days": "پچھلے 7 دن",
+	"Private": "",
 	"Profile Image": "پروفائل تصویر",
 	"Profile Image": "پروفائل تصویر",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "سوال کریں (مثلاً: مجھے رومن سلطنت کے بارے میں کوئی دلچسپ حقیقت بتائیں)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "سوال کریں (مثلاً: مجھے رومن سلطنت کے بارے میں کوئی دلچسپ حقیقت بتائیں)",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "",
 	"Prompt updated successfully": "",
 	"Prompts": "پرومپٹس",
 	"Prompts": "پرومپٹس",
 	"Prompts Access": "",
 	"Prompts Access": "",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Ollama.com سے \"{{searchValue}}\" کو کھینچیں",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Ollama.com سے \"{{searchValue}}\" کو کھینچیں",
 	"Pull a model from Ollama.com": "Ollama.com سے ماڈل حاصل کریں",
 	"Pull a model from Ollama.com": "Ollama.com سے ماڈل حاصل کریں",
 	"Query Generation Prompt": "",
 	"Query Generation Prompt": "",
@@ -1009,6 +1013,7 @@
 	"Theme": "تھیم",
 	"Theme": "تھیم",
 	"Thinking...": "سوچ رہا ہے...",
 	"Thinking...": "سوچ رہا ہے...",
 	"This action cannot be undone. Do you wish to continue?": "یہ عمل واپس نہیں کیا جا سکتا کیا آپ جاری رکھنا چاہتے ہیں؟",
 	"This action cannot be undone. Do you wish to continue?": "یہ عمل واپس نہیں کیا جا سکتا کیا آپ جاری رکھنا چاہتے ہیں؟",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "یہ یقینی بناتا ہے کہ آپ کی قیمتی گفتگو محفوظ طریقے سے آپ کے بیک اینڈ ڈیٹا بیس میں محفوظ کی گئی ہیں شکریہ!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "یہ یقینی بناتا ہے کہ آپ کی قیمتی گفتگو محفوظ طریقے سے آپ کے بیک اینڈ ڈیٹا بیس میں محفوظ کی گئی ہیں شکریہ!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "یہ ایک تجرباتی خصوصیت ہے، یہ متوقع طور پر کام نہ کر سکتی ہو اور کسی بھی وقت تبدیل کی جا سکتی ہے",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "یہ ایک تجرباتی خصوصیت ہے، یہ متوقع طور پر کام نہ کر سکتی ہو اور کسی بھی وقت تبدیل کی جا سکتی ہے",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "والو کامیابی کے ساتھ اپ ڈیٹ ہو گئے",
 	"Valves updated successfully": "والو کامیابی کے ساتھ اپ ڈیٹ ہو گئے",
 	"variable": "متغیر",
 	"variable": "متغیر",
 	"variable to have them replaced with clipboard content.": "انہیں کلپ بورڈ کے مواد سے تبدیل کرنے کے لیے متغیر",
 	"variable to have them replaced with clipboard content.": "انہیں کلپ بورڈ کے مواد سے تبدیل کرنے کے لیے متغیر",
+	"Verify Connection": "",
 	"Version": "ورژن",
 	"Version": "ورژن",
 	"Version {{selectedVersion}} of {{totalVersions}}": "ورژن {{selectedVersion}} کا {{totalVersions}} میں سے",
 	"Version {{selectedVersion}} of {{totalVersions}}": "ورژن {{selectedVersion}} کا {{totalVersions}} میں سے",
 	"View Replies": "",
 	"View Replies": "",

+ 6 - 0
src/lib/i18n/locales/vi-VN/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "Đề xuất prompt mặc định",
 	"Default Prompt Suggestions": "Đề xuất prompt mặc định",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to 389 or 636 if TLS is enabled": "",
 	"Default to ALL": "",
 	"Default to ALL": "",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "Vai trò mặc định",
 	"Default User Role": "Vai trò mặc định",
 	"Delete": "Xóa",
 	"Delete": "Xóa",
 	"Delete a model": "Xóa mô hình",
 	"Delete a model": "Xóa mô hình",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "Bao gồm flag `--api` khi chạy stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Bao gồm flag `--api` khi chạy stable-diffusion-webui",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Thông tin",
 	"Info": "Thông tin",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "Nhập các câu lệnh",
 	"Input commands": "Nhập các câu lệnh",
 	"Install from Github URL": "Cài đặt từ Github URL",
 	"Install from Github URL": "Cài đặt từ Github URL",
 	"Instant Auto-Send After Voice Transcription": "Tự động gửi ngay lập tức sau khi phiên dịch giọng nói",
 	"Instant Auto-Send After Voice Transcription": "Tự động gửi ngay lập tức sau khi phiên dịch giọng nói",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "",
 	"Presence Penalty": "",
 	"Previous 30 days": "30 ngày trước",
 	"Previous 30 days": "30 ngày trước",
 	"Previous 7 days": "7 ngày trước",
 	"Previous 7 days": "7 ngày trước",
+	"Private": "",
 	"Profile Image": "Ảnh đại diện",
 	"Profile Image": "Ảnh đại diện",
 	"Prompt": "",
 	"Prompt": "",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (ví dụ: Hãy kể cho tôi một sự thật thú vị về Đế chế La Mã)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (ví dụ: Hãy kể cho tôi một sự thật thú vị về Đế chế La Mã)",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "",
 	"Prompt updated successfully": "",
 	"Prompts": "Prompt",
 	"Prompts": "Prompt",
 	"Prompts Access": "",
 	"Prompts Access": "",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Tải \"{{searchValue}}\" từ Ollama.com",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Tải \"{{searchValue}}\" từ Ollama.com",
 	"Pull a model from Ollama.com": "Tải mô hình từ Ollama.com",
 	"Pull a model from Ollama.com": "Tải mô hình từ Ollama.com",
 	"Query Generation Prompt": "",
 	"Query Generation Prompt": "",
@@ -1009,6 +1013,7 @@
 	"Theme": "Chủ đề",
 	"Theme": "Chủ đề",
 	"Thinking...": "Đang suy luận...",
 	"Thinking...": "Đang suy luận...",
 	"This action cannot be undone. Do you wish to continue?": "Hành động này không thể được hoàn tác. Bạn có muốn tiếp tục không?",
 	"This action cannot be undone. Do you wish to continue?": "Hành động này không thể được hoàn tác. Bạn có muốn tiếp tục không?",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Điều này đảm bảo rằng các nội dung chat có giá trị của bạn được lưu an toàn vào cơ sở dữ liệu backend của bạn. Cảm ơn bạn!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Điều này đảm bảo rằng các nội dung chat có giá trị của bạn được lưu an toàn vào cơ sở dữ liệu backend của bạn. Cảm ơn bạn!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Đây là tính năng thử nghiệm, có thể không hoạt động như mong đợi và có thể thay đổi bất kỳ lúc nào.",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Đây là tính năng thử nghiệm, có thể không hoạt động như mong đợi và có thể thay đổi bất kỳ lúc nào.",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "Đã cập nhật Valves thành công",
 	"Valves updated successfully": "Đã cập nhật Valves thành công",
 	"variable": "biến",
 	"variable": "biến",
 	"variable to have them replaced with clipboard content.": "biến để có chúng được thay thế bằng nội dung clipboard.",
 	"variable to have them replaced with clipboard content.": "biến để có chúng được thay thế bằng nội dung clipboard.",
+	"Verify Connection": "",
 	"Version": "Version",
 	"Version": "Version",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"Version {{selectedVersion}} of {{totalVersions}}": "",
 	"View Replies": "",
 	"View Replies": "",

+ 6 - 0
src/lib/i18n/locales/zh-CN/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "默认提示词建议",
 	"Default Prompt Suggestions": "默认提示词建议",
 	"Default to 389 or 636 if TLS is enabled": "如果启用 TLS,则默认为 389 或 636",
 	"Default to 389 or 636 if TLS is enabled": "如果启用 TLS,则默认为 389 或 636",
 	"Default to ALL": "默认为 ALL",
 	"Default to ALL": "默认为 ALL",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "默认进行分段检索以提取重点和相关内容 (推荐)",
 	"Default User Role": "默认用户角色",
 	"Default User Role": "默认用户角色",
 	"Delete": "删除",
 	"Delete": "删除",
 	"Delete a model": "删除一个模型",
 	"Delete a model": "删除一个模型",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "运行 stable-diffusion-webui 时包含 `--api` 参数",
 	"Include `--api` flag when running stable-diffusion-webui": "运行 stable-diffusion-webui 时包含 `--api` 参数",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "影响算法对生成文本反馈的响应速度。较低的学习率将导致调整更慢,而较高的学习率将使算法反应更灵敏。",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "影响算法对生成文本反馈的响应速度。较低的学习率将导致调整更慢,而较高的学习率将使算法反应更灵敏。",
 	"Info": "信息",
 	"Info": "信息",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "注入整个内容作为上下文进行综合处理,适用于复杂查询",
 	"Input commands": "输入命令",
 	"Input commands": "输入命令",
 	"Install from Github URL": "从 Github URL 安装",
 	"Install from Github URL": "从 Github URL 安装",
 	"Instant Auto-Send After Voice Transcription": "语音转录文字后即时自动发送",
 	"Instant Auto-Send After Voice Transcription": "语音转录文字后即时自动发送",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "重复惩罚(Presence Penalty)",
 	"Presence Penalty": "重复惩罚(Presence Penalty)",
 	"Previous 30 days": "过去 30 天",
 	"Previous 30 days": "过去 30 天",
 	"Previous 7 days": "过去 7 天",
 	"Previous 7 days": "过去 7 天",
+	"Private": "私有",
 	"Profile Image": "用户头像",
 	"Profile Image": "用户头像",
 	"Prompt": "提示词 (Prompt)",
 	"Prompt": "提示词 (Prompt)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "提示(例如:给我讲一个关于罗马帝国的趣事。)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "提示(例如:给我讲一个关于罗马帝国的趣事。)",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "提示词更新成功",
 	"Prompt updated successfully": "提示词更新成功",
 	"Prompts": "提示词",
 	"Prompts": "提示词",
 	"Prompts Access": "访问提示词",
 	"Prompts Access": "访问提示词",
+	"Public": "公共",
 	"Pull \"{{searchValue}}\" from Ollama.com": "从 Ollama.com 拉取 \"{{searchValue}}\"",
 	"Pull \"{{searchValue}}\" from Ollama.com": "从 Ollama.com 拉取 \"{{searchValue}}\"",
 	"Pull a model from Ollama.com": "从 Ollama.com 拉取一个模型",
 	"Pull a model from Ollama.com": "从 Ollama.com 拉取一个模型",
 	"Query Generation Prompt": "查询生成提示词",
 	"Query Generation Prompt": "查询生成提示词",
@@ -1009,6 +1013,7 @@
 	"Theme": "主题",
 	"Theme": "主题",
 	"Thinking...": "正在思考...",
 	"Thinking...": "正在思考...",
 	"This action cannot be undone. Do you wish to continue?": "此操作无法撤销。是否确认继续?",
 	"This action cannot be undone. Do you wish to continue?": "此操作无法撤销。是否确认继续?",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "此频道创建于{{createdAt}},这里是{{channelName}}频道的开始",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "这将确保您的宝贵对话被安全地保存到后台数据库中。感谢!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "这将确保您的宝贵对话被安全地保存到后台数据库中。感谢!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "这是一个实验功能,可能不会如预期那样工作,而且可能随时发生变化。",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "这是一个实验功能,可能不会如预期那样工作,而且可能随时发生变化。",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "此选项控制刷新上下文时保留多少 Token。例如,如果设置为 2,则将保留对话上下文的最后 2 个 Token。保留上下文有助于保持对话的连续性,但可能会降低响应新主题的能力。",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "此选项控制刷新上下文时保留多少 Token。例如,如果设置为 2,则将保留对话上下文的最后 2 个 Token。保留上下文有助于保持对话的连续性,但可能会降低响应新主题的能力。",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "值更新成功",
 	"Valves updated successfully": "值更新成功",
 	"variable": "变量",
 	"variable": "变量",
 	"variable to have them replaced with clipboard content.": "变量将被剪贴板内容替换。",
 	"variable to have them replaced with clipboard content.": "变量将被剪贴板内容替换。",
+	"Verify Connection": "验证连接",
 	"Version": "版本",
 	"Version": "版本",
 	"Version {{selectedVersion}} of {{totalVersions}}": "版本 {{selectedVersion}}/{{totalVersions}}",
 	"Version {{selectedVersion}} of {{totalVersions}}": "版本 {{selectedVersion}}/{{totalVersions}}",
 	"View Replies": "查看回复",
 	"View Replies": "查看回复",

+ 6 - 0
src/lib/i18n/locales/zh-TW/translation.json

@@ -270,6 +270,7 @@
 	"Default Prompt Suggestions": "預設提示詞建議",
 	"Default Prompt Suggestions": "預設提示詞建議",
 	"Default to 389 or 636 if TLS is enabled": "如果啓用了 TLS 則預設為 389 或 636",
 	"Default to 389 or 636 if TLS is enabled": "如果啓用了 TLS 則預設為 389 或 636",
 	"Default to ALL": "預設到所有",
 	"Default to ALL": "預設到所有",
+	"Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "",
 	"Default User Role": "預設使用者角色",
 	"Default User Role": "預設使用者角色",
 	"Delete": "刪除",
 	"Delete": "刪除",
 	"Delete a model": "刪除模型",
 	"Delete a model": "刪除模型",
@@ -583,6 +584,7 @@
 	"Include `--api` flag when running stable-diffusion-webui": "執行 stable-diffusion-webui 時包含 `--api` 參數",
 	"Include `--api` flag when running stable-diffusion-webui": "執行 stable-diffusion-webui 時包含 `--api` 參數",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "影響算法對生成文本回饋的反應速度。較低的學習率會導致調整速度較慢,而較高的學習率會使算法反應更靈敏。",
 	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "影響算法對生成文本回饋的反應速度。較低的學習率會導致調整速度較慢,而較高的學習率會使算法反應更靈敏。",
 	"Info": "資訊",
 	"Info": "資訊",
+	"Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "",
 	"Input commands": "輸入命令",
 	"Input commands": "輸入命令",
 	"Install from Github URL": "從 GitHub URL 安裝",
 	"Install from Github URL": "從 GitHub URL 安裝",
 	"Instant Auto-Send After Voice Transcription": "語音轉錄後立即自動傳送",
 	"Instant Auto-Send After Voice Transcription": "語音轉錄後立即自動傳送",
@@ -806,6 +808,7 @@
 	"Presence Penalty": "在場懲罰",
 	"Presence Penalty": "在場懲罰",
 	"Previous 30 days": "過去 30 天",
 	"Previous 30 days": "過去 30 天",
 	"Previous 7 days": "過去 7 天",
 	"Previous 7 days": "過去 7 天",
+	"Private": "",
 	"Profile Image": "個人檔案圖片",
 	"Profile Image": "個人檔案圖片",
 	"Prompt": "提示詞",
 	"Prompt": "提示詞",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "提示詞(例如:告訴我關於羅馬帝國的一些趣事)",
 	"Prompt (e.g. Tell me a fun fact about the Roman Empire)": "提示詞(例如:告訴我關於羅馬帝國的一些趣事)",
@@ -815,6 +818,7 @@
 	"Prompt updated successfully": "提示詞更新成功",
 	"Prompt updated successfully": "提示詞更新成功",
 	"Prompts": "提示詞",
 	"Prompts": "提示詞",
 	"Prompts Access": "提示詞存取",
 	"Prompts Access": "提示詞存取",
+	"Public": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "從 Ollama.com 下載「{{searchValue}}」",
 	"Pull \"{{searchValue}}\" from Ollama.com": "從 Ollama.com 下載「{{searchValue}}」",
 	"Pull a model from Ollama.com": "從 Ollama.com 下載模型",
 	"Pull a model from Ollama.com": "從 Ollama.com 下載模型",
 	"Query Generation Prompt": "查詢生成提示詞",
 	"Query Generation Prompt": "查詢生成提示詞",
@@ -1009,6 +1013,7 @@
 	"Theme": "主題",
 	"Theme": "主題",
 	"Thinking...": "正在思考...",
 	"Thinking...": "正在思考...",
 	"This action cannot be undone. Do you wish to continue?": "此操作無法復原。您確定要繼續進行嗎?",
 	"This action cannot be undone. Do you wish to continue?": "此操作無法復原。您確定要繼續進行嗎?",
+	"This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "這確保您寶貴的對話會安全地儲存到您的後端資料庫。謝謝!",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "這確保您寶貴的對話會安全地儲存到您的後端資料庫。謝謝!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "這是一個實驗性功能,它可能無法如預期運作,並且可能會隨時變更。",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "這是一個實驗性功能,它可能無法如預期運作,並且可能會隨時變更。",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "此選項控制在刷新上下文時保留多少 token。例如,如果設定為 2,則會保留對話上下文的最後 2 個 token。保留上下文有助於保持對話的連貫性,但也可能降低對新主題的回應能力。",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "此選項控制在刷新上下文時保留多少 token。例如,如果設定為 2,則會保留對話上下文的最後 2 個 token。保留上下文有助於保持對話的連貫性,但也可能降低對新主題的回應能力。",
@@ -1118,6 +1123,7 @@
 	"Valves updated successfully": "閥門更新成功",
 	"Valves updated successfully": "閥門更新成功",
 	"variable": "變數",
 	"variable": "變數",
 	"variable to have them replaced with clipboard content.": "變數,以便將其替換為剪貼簿內容。",
 	"variable to have them replaced with clipboard content.": "變數,以便將其替換為剪貼簿內容。",
+	"Verify Connection": "",
 	"Version": "版本",
 	"Version": "版本",
 	"Version {{selectedVersion}} of {{totalVersions}}": "第 {{selectedVersion}} 版,共 {{totalVersions}} 版",
 	"Version {{selectedVersion}} of {{totalVersions}}": "第 {{selectedVersion}} 版,共 {{totalVersions}} 版",
 	"View Replies": "檢視回覆",
 	"View Replies": "檢視回覆",