Browse Source

chore: format

Timothy Jaeryang Baek 2 weeks ago
parent
commit
4c65728443
53 changed files with 158 additions and 160 deletions
  1. 2 4
      src/lib/components/chat/Settings/Advanced/AdvancedParams.svelte
  2. 3 3
      src/lib/i18n/locales/ar-BH/translation.json
  3. 3 3
      src/lib/i18n/locales/ar/translation.json
  4. 3 3
      src/lib/i18n/locales/bg-BG/translation.json
  5. 3 3
      src/lib/i18n/locales/bn-BD/translation.json
  6. 3 3
      src/lib/i18n/locales/bo-TB/translation.json
  7. 3 3
      src/lib/i18n/locales/ca-ES/translation.json
  8. 3 3
      src/lib/i18n/locales/ceb-PH/translation.json
  9. 3 3
      src/lib/i18n/locales/cs-CZ/translation.json
  10. 3 3
      src/lib/i18n/locales/da-DK/translation.json
  11. 3 3
      src/lib/i18n/locales/de-DE/translation.json
  12. 3 3
      src/lib/i18n/locales/dg-DG/translation.json
  13. 3 3
      src/lib/i18n/locales/el-GR/translation.json
  14. 3 3
      src/lib/i18n/locales/en-GB/translation.json
  15. 3 3
      src/lib/i18n/locales/en-US/translation.json
  16. 3 3
      src/lib/i18n/locales/es-ES/translation.json
  17. 3 3
      src/lib/i18n/locales/et-EE/translation.json
  18. 3 3
      src/lib/i18n/locales/eu-ES/translation.json
  19. 3 3
      src/lib/i18n/locales/fa-IR/translation.json
  20. 3 3
      src/lib/i18n/locales/fi-FI/translation.json
  21. 3 3
      src/lib/i18n/locales/fr-CA/translation.json
  22. 3 3
      src/lib/i18n/locales/fr-FR/translation.json
  23. 3 3
      src/lib/i18n/locales/he-IL/translation.json
  24. 3 3
      src/lib/i18n/locales/hi-IN/translation.json
  25. 3 3
      src/lib/i18n/locales/hr-HR/translation.json
  26. 3 3
      src/lib/i18n/locales/hu-HU/translation.json
  27. 3 3
      src/lib/i18n/locales/id-ID/translation.json
  28. 3 3
      src/lib/i18n/locales/ie-GA/translation.json
  29. 3 3
      src/lib/i18n/locales/it-IT/translation.json
  30. 3 3
      src/lib/i18n/locales/ja-JP/translation.json
  31. 3 3
      src/lib/i18n/locales/ka-GE/translation.json
  32. 3 3
      src/lib/i18n/locales/ko-KR/translation.json
  33. 3 3
      src/lib/i18n/locales/lt-LT/translation.json
  34. 3 3
      src/lib/i18n/locales/ms-MY/translation.json
  35. 3 3
      src/lib/i18n/locales/nb-NO/translation.json
  36. 3 3
      src/lib/i18n/locales/nl-NL/translation.json
  37. 3 3
      src/lib/i18n/locales/pa-IN/translation.json
  38. 3 3
      src/lib/i18n/locales/pl-PL/translation.json
  39. 3 3
      src/lib/i18n/locales/pt-BR/translation.json
  40. 3 3
      src/lib/i18n/locales/pt-PT/translation.json
  41. 3 3
      src/lib/i18n/locales/ro-RO/translation.json
  42. 3 3
      src/lib/i18n/locales/ru-RU/translation.json
  43. 3 3
      src/lib/i18n/locales/sk-SK/translation.json
  44. 3 3
      src/lib/i18n/locales/sr-RS/translation.json
  45. 3 3
      src/lib/i18n/locales/sv-SE/translation.json
  46. 3 3
      src/lib/i18n/locales/th-TH/translation.json
  47. 3 3
      src/lib/i18n/locales/tk-TW/translation.json
  48. 3 3
      src/lib/i18n/locales/tr-TR/translation.json
  49. 3 3
      src/lib/i18n/locales/uk-UA/translation.json
  50. 3 3
      src/lib/i18n/locales/ur-PK/translation.json
  51. 3 3
      src/lib/i18n/locales/vi-VN/translation.json
  52. 3 3
      src/lib/i18n/locales/zh-CN/translation.json
  53. 3 3
      src/lib/i18n/locales/zh-TW/translation.json

+ 2 - 4
src/lib/components/chat/Settings/Advanced/AdvancedParams.svelte

@@ -1099,7 +1099,7 @@
 	<div class=" py-0.5 w-full justify-between">
 		<Tooltip
 			content={$i18n.t(
-				'This option enables or disables the use of the Ollama Think feature, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.'
+				'This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.'
 			)}
 			placement="top-start"
 			className="inline-tooltip"
@@ -1129,9 +1129,7 @@
 
 	<div class=" py-0.5 w-full justify-between">
 		<Tooltip
-			content={$i18n.t(
-				'This option enables or disables the use of the Ollama Think feature, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.'
-			)}
+			content={$i18n.t('The format to return a response in. Format can be json or a JSON schema.')}
 			placement="top-start"
 			className="inline-tooltip"
 		>

+ 3 - 3
src/lib/i18n/locales/ar-BH/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "",
 	"Invalid file format.": "",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "",
 	"Invalid Tag": "تاق غير صالحة",
 	"is typing...": "",
 	"January": "يناير",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "JWT تجريبي",
 	"JWT Token": "JWT Token",
 	"Kagi Search API Key": "",
-	"Keep Alive": "Keep Alive",
 	"Keep in Sidebar": "",
 	"Key": "",
 	"Keyboard shortcuts": "اختصارات لوحة المفاتيح",
@@ -1023,7 +1021,6 @@
 	"Rename": "إعادة تسمية",
 	"Reorder Models": "",
 	"Reply in Thread": "",
-	"Request Mode": "وضع الطلب",
 	"Reranking Engine": "",
 	"Reranking Model": "إعادة تقييم النموذج",
 	"Reset": "",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
 	"The LDAP attribute that maps to the username that users use to sign in.": "",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "وهذا يضمن حفظ محادثاتك القيمة بشكل آمن في قاعدة بياناتك الخلفية. شكرًا لك!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This model is not publicly available. Please select another model.": "",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",

+ 3 - 3
src/lib/i18n/locales/ar/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "",
 	"Invalid file format.": "تنسيق ملف غير صالح.",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "",
 	"Invalid Tag": "تاق غير صالحة",
 	"is typing...": "يكتب...",
 	"January": "يناير",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "JWT تجريبي",
 	"JWT Token": "JWT Token",
 	"Kagi Search API Key": "مفتاح API لـ Kagi Search",
-	"Keep Alive": "Keep Alive",
 	"Keep in Sidebar": "",
 	"Key": "المفتاح",
 	"Keyboard shortcuts": "اختصارات لوحة المفاتيح",
@@ -1023,7 +1021,6 @@
 	"Rename": "إعادة تسمية",
 	"Reorder Models": "إعادة ترتيب النماذج",
 	"Reply in Thread": "الرد داخل سلسلة الرسائل",
-	"Request Mode": "وضع الطلب",
 	"Reranking Engine": "",
 	"Reranking Model": "إعادة تقييم النموذج",
 	"Reset": "إعادة تعيين",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "يحدد حجم الدفعة عدد طلبات النصوص التي تتم معالجتها معًا. الحجم الأكبر يمكن أن يزيد الأداء والسرعة، ولكنه يحتاج أيضًا إلى ذاكرة أكبر.",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "المطورون خلف هذا المكون الإضافي هم متطوعون شغوفون من المجتمع. إذا وجدت هذا المكون مفيدًا، فكر في المساهمة في تطويره.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "قائمة التقييم تعتمد على نظام Elo ويتم تحديثها في الوقت الفعلي.",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "السمة LDAP التي تتوافق مع البريد الإلكتروني الذي يستخدمه المستخدمون لتسجيل الدخول.",
 	"The LDAP attribute that maps to the username that users use to sign in.": "السمة LDAP التي تتوافق مع اسم المستخدم الذي يستخدمه المستخدمون لتسجيل الدخول.",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "وهذا يضمن حفظ محادثاتك القيمة بشكل آمن في قاعدة بياناتك الخلفية. شكرًا لك!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "هذه ميزة تجريبية، وقد لا تعمل كما هو متوقع وقد تتغير في أي وقت.",
 	"This model is not publicly available. Please select another model.": "",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "هذا الخيار يحدد عدد الرموز التي يتم الاحتفاظ بها عند تحديث السياق. مثلاً، إذا تم ضبطه على 2، سيتم الاحتفاظ بآخر رمزين من السياق. الحفاظ على السياق يساعد في استمرارية المحادثة، لكنه قد يحد من التفاعل مع مواضيع جديدة.",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "يحدد هذا الخيار الحد الأقصى لعدد الرموز التي يمكن للنموذج توليدها في الرد. زيادته تتيح للنموذج تقديم إجابات أطول، لكنها قد تزيد من احتمالية توليد محتوى غير مفيد أو غير ذي صلة.",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "سيؤدي هذا الخيار إلى حذف جميع الملفات الحالية في المجموعة واستبدالها بالملفات التي تم تحميلها حديثًا.",
 	"This response was generated by \"{{model}}\"": "تم توليد هذا الرد بواسطة \"{{model}}\"",

+ 3 - 3
src/lib/i18n/locales/bg-BG/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "",
 	"Invalid file format.": "Невалиден формат на файла.",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "",
 	"Invalid Tag": "Невалиден таг",
 	"is typing...": "пише...",
 	"January": "Януари",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "JWT изтичане",
 	"JWT Token": "JWT токен",
 	"Kagi Search API Key": "API ключ за Kagi Search",
-	"Keep Alive": "Поддържай активен",
 	"Keep in Sidebar": "",
 	"Key": "Ключ",
 	"Keyboard shortcuts": "Клавиши за бърз достъп",
@@ -1023,7 +1021,6 @@
 	"Rename": "Преименуване",
 	"Reorder Models": "Преорганизиране на моделите",
 	"Reply in Thread": "Отговори в тред",
-	"Request Mode": "Режим на заявка",
 	"Reranking Engine": "Двигател за пренареждане",
 	"Reranking Model": "Модел за преподреждане",
 	"Reset": "Нулиране",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Разработчиците зад този плъгин са страстни доброволци от общността. Ако намирате този плъгин полезен, моля, обмислете да допринесете за неговото развитие.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Класацията за оценка се базира на рейтинговата система Elo и се обновява в реално време.",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "LDAP атрибутът, който съответства на имейла, който потребителите използват за вписване.",
 	"The LDAP attribute that maps to the username that users use to sign in.": "LDAP атрибутът, който съответства на потребителското име, което потребителите използват за вписване.",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Това гарантира, че ценните ви разговори се запазват сигурно във вашата бекенд база данни. Благодарим ви!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Това е експериментална функция, може да не работи според очакванията и подлежи на промяна по всяко време.",
 	"This model is not publicly available. Please select another model.": "",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Тази опция ще изтрие всички съществуващи файлове в колекцията и ще ги замени с новокачени файлове.",
 	"This response was generated by \"{{model}}\"": "Този отговор беше генериран от \"{{model}}\"",

+ 3 - 3
src/lib/i18n/locales/bn-BD/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "",
 	"Invalid file format.": "",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "",
 	"Invalid Tag": "অবৈধ ট্যাগ",
 	"is typing...": "",
 	"January": "জানুয়ারী",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "JWT-র মেয়াদ",
 	"JWT Token": "JWT টোকেন",
 	"Kagi Search API Key": "",
-	"Keep Alive": "সচল রাখুন",
 	"Keep in Sidebar": "",
 	"Key": "",
 	"Keyboard shortcuts": "কিবোর্ড শর্টকাটসমূহ",
@@ -1023,7 +1021,6 @@
 	"Rename": "রেনেম",
 	"Reorder Models": "",
 	"Reply in Thread": "",
-	"Request Mode": "রিকোয়েস্ট মোড",
 	"Reranking Engine": "",
 	"Reranking Model": "রির্যাক্টিং মডেল",
 	"Reset": "",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
 	"The LDAP attribute that maps to the username that users use to sign in.": "",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "এটা নিশ্চিত করে যে, আপনার গুরুত্বপূর্ণ আলোচনা নিরাপদে আপনার ব্যাকএন্ড ডেটাবেজে সংরক্ষিত আছে। ধন্যবাদ!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This model is not publicly available. Please select another model.": "",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",

+ 3 - 3
src/lib/i18n/locales/bo-TB/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "",
 	"Invalid file format.": "ཡིག་ཆའི་བཀོད་པ་ནུས་མེད།",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "JSON schema ནུས་མེད།",
 	"Invalid Tag": "རྟགས་ནུས་མེད།",
 	"is typing...": "ཡིག་འབྲུ་རྒྱག་བཞིན་པ།...",
 	"January": "ཟླ་བ་དང་པོ།",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "JWT དུས་ཚོད་རྫོགས་པ།",
 	"JWT Token": "JWT Token",
 	"Kagi Search API Key": "Kagi Search API ལྡེ་མིག",
-	"Keep Alive": "གསོན་པོར་གནས་པ།",
 	"Keep in Sidebar": "",
 	"Key": "ལྡེ་མིག",
 	"Keyboard shortcuts": "མཐེབ་གནོན་མྱུར་ལམ།",
@@ -1023,7 +1021,6 @@
 	"Rename": "མིང་བསྐྱར་འདོགས།",
 	"Reorder Models": "དཔེ་དབྱིབས་བསྐྱར་སྒྲིག",
 	"Reply in Thread": "བརྗོད་གཞིའི་ནང་ལན་འདེབས།",
-	"Request Mode": "རེ་ཞུའི་མ་དཔེ།",
 	"Reranking Engine": "",
 	"Reranking Model": "བསྐྱར་སྒྲིག་དཔེ་དབྱིབས།",
 	"Reset": "སླར་སྒྲིག",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "ཚན་ཆུང་གི་ཆེ་ཆུང་གིས་ཡིག་རྐྱང་རེ་ཞུ་ག་ཚོད་མཉམ་དུ་ཐེངས་གཅིག་ལ་སྒྲུབ་དགོས་གཏན་འཁེལ་བྱེད། ཚན་ཆུང་ཆེ་བ་ཡིས་དཔེ་དབྱིབས་ཀྱི་ལས་ཆོད་དང་མྱུར་ཚད་མང་དུ་གཏོང་ཐུབ། འོན་ཀྱང་དེས་དྲན་ཤེས་མང་བ་དགོས།",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "plugin འདིའི་རྒྱབ་ཀྱི་གསར་སྤེལ་བ་དག་ནི་སྤྱི་ཚོགས་ནས་ཡིན་པའི་སེམས་ཤུགས་ཅན་གྱི་དང་བླངས་པ་ཡིན། གལ་ཏེ་ཁྱེད་ཀྱིས་ plugin འདི་ཕན་ཐོགས་ཡོད་པ་མཐོང་ན། དེའི་གསར་སྤེལ་ལ་ཞལ་འདེབས་གནང་བར་བསམ་ཞིབ་གནང་རོགས།",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "གདེང་འཇོག་འགྲན་རེས་རེའུ་མིག་དེ་ Elo སྐར་མ་སྤྲོད་པའི་མ་ལག་ལ་གཞི་བཅོལ་ཡོད། དེ་མིན་དུས་ཐོག་ཏུ་གསར་སྒྱུར་བྱེད་ཀྱི་ཡོད།",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "བེད་སྤྱོད་མཁན་ཚོས་ནང་འཛུལ་བྱེད་སྐབས་བེད་སྤྱོད་གཏོང་བའི་ཡིག་ཟམ་ལ་སྦྲེལ་བའི་ LDAP ཁྱད་ཆོས།",
 	"The LDAP attribute that maps to the username that users use to sign in.": "བེད་སྤྱོད་མཁན་ཚོས་ནང་འཛུལ་བྱེད་སྐབས་བེད་སྤྱོད་གཏོང་བའི་བེད་སྤྱོད་མིང་ལ་སྦྲེལ་བའི་ LDAP ཁྱད་ཆོས།",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "འདིས་ཁྱེད་ཀྱི་རྩ་ཆེའི་ཁ་བརྡ་དག་བདེ་འཇགས་ངང་ཁྱེད་ཀྱི་རྒྱབ་སྣེ་གནས་ཚུལ་མཛོད་དུ་ཉར་ཚགས་བྱེད་པ་ཁག་ཐེག་བྱེད། ཐུགས་རྗེ་ཆེ།",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "འདི་ནི་ཚོད་ལྟའི་རང་བཞིན་གྱི་ཁྱད་ཆོས་ཤིག་ཡིན། དེ་རེ་སྒུག་ལྟར་ལས་ཀ་བྱེད་མི་སྲིད། དེ་མིན་དུས་ཚོད་གང་རུང་ལ་འགྱུར་བ་འགྲོ་སྲིད།",
 	"This model is not publicly available. Please select another model.": "",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "འདེམས་ཀ་འདིས་ནང་དོན་གསར་སྒྱུར་བྱེད་སྐབས་ཊོཀ་ཀེན་ག་ཚོད་ཉར་ཚགས་བྱེད་དགོས་ཚོད་འཛིན་བྱེད། དཔེར་ན། གལ་ཏེ་ ༢ ལ་བཀོད་སྒྲིག་བྱས་ན། ཁ་བརྡའི་ནང་དོན་གྱི་ཊོཀ་ཀེན་མཐའ་མ་ ༢ ཉར་ཚགས་བྱེད་ངེས། ནང་དོན་ཉར་ཚགས་བྱས་ན་ཁ་བརྡའི་རྒྱུན་མཐུད་རང་བཞིན་རྒྱུན་སྲུང་བྱེད་པར་རོགས་པ་བྱེད་ཐུབ། འོན་ཀྱང་དེས་བརྗོད་གཞི་གསར་པར་ལན་འདེབས་བྱེད་པའི་ནུས་པ་ཉུང་དུ་གཏོང་སྲིད།",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "འདེམས་ཀ་འདིས་དཔེ་དབྱིབས་ཀྱིས་དེའི་ལན་ནང་བཟོ་ཐུབ་པའི་ཊོཀ་ཀེན་གྱི་གྲངས་མང་ཤོས་འཇོག་པ། ཚད་བཀག་འདི་མང་དུ་བཏང་ན་དཔེ་དབྱིབས་ཀྱིས་ལན་རིང་བ་སྤྲོད་པར་གནང་བ་སྤྲོད། འོན་ཀྱང་དེས་ཕན་ཐོགས་མེད་པའམ་འབྲེལ་མེད་ཀྱི་ནང་དོན་བཟོ་བའི་ཆགས་ཚུལ་མང་དུ་གཏོང་སྲིད།",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "འདེམས་ཀ་འདིས་བསྡུ་གསོག་ནང་གི་ཡོད་པའི་ཡིག་ཆ་ཡོངས་རྫོགས་བསུབ་ནས་དེ་དག་གསར་དུ་སྤར་བའི་ཡིག་ཆས་ཚབ་བྱེད་ངེས།",
 	"This response was generated by \"{{model}}\"": "ལན་འདི་ \"{{model}}\" ཡིས་བཟོས་པ།",

+ 3 - 3
src/lib/i18n/locales/ca-ES/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "Continguts del fitxer no vàlids",
 	"Invalid file format.": "Format d'arxiu no vàlid.",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "Esquema JSON no vàlid",
 	"Invalid Tag": "Etiqueta no vàlida",
 	"is typing...": "està escrivint...",
 	"January": "Gener",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "Caducitat del JWT",
 	"JWT Token": "Token JWT",
 	"Kagi Search API Key": "Clau API de Kagi Search",
-	"Keep Alive": "Manté actiu",
 	"Keep in Sidebar": "",
 	"Key": "Clau",
 	"Keyboard shortcuts": "Dreceres de teclat",
@@ -1023,7 +1021,6 @@
 	"Rename": "Canviar el nom",
 	"Reorder Models": "Reordenar els models",
 	"Reply in Thread": "Respondre al fil",
-	"Request Mode": "Mode de sol·licitud",
 	"Reranking Engine": "Motor de valoració",
 	"Reranking Model": "Model de reavaluació",
 	"Reset": "Restableix",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "La mida del lot determina quantes sol·licituds de text es processen alhora. Una mida de lot més gran pot augmentar el rendiment i la velocitat del model, però també requereix més memòria.",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Els desenvolupadors d'aquest complement són voluntaris apassionats de la comunitat. Si trobeu útil aquest complement, considereu contribuir al seu desenvolupament.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "La classificació d'avaluació es basa en el sistema de qualificació Elo i s'actualitza en temps real.",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "L'atribut LDAP que s'associa al correu que els usuaris utilitzen per iniciar la sessió.",
 	"The LDAP attribute that maps to the username that users use to sign in.": "L'atribut LDAP que mapeja el nom d'usuari amb l'usuari que vol iniciar sessió",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Això assegura que les teves converses valuoses queden desades de manera segura a la teva base de dades. Gràcies!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Aquesta és una funció experimental, és possible que no funcioni com s'espera i està subjecta a canvis en qualsevol moment.",
 	"This model is not publicly available. Please select another model.": "Aquest model no està disponible públicament. Seleccioneu-ne un altre.",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "Aquesta opció controla quants tokens es conserven en actualitzar el context. Per exemple, si s'estableix en 2, es conservaran els darrers 2 tokens del context de conversa. Preservar el context pot ajudar a mantenir la continuïtat d'una conversa, però pot reduir la capacitat de respondre a nous temes.",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "Aquesta opció estableix el nombre màxim de tokens que el model pot generar en la seva resposta. Augmentar aquest límit permet que el model proporcioni respostes més llargues, però també pot augmentar la probabilitat que es generi contingut poc útil o irrellevant.",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Aquesta opció eliminarà tots els fitxers existents de la col·lecció i els substituirà per fitxers recentment penjats.",
 	"This response was generated by \"{{model}}\"": "Aquesta resposta l'ha generat el model \"{{model}}\"",

+ 3 - 3
src/lib/i18n/locales/ceb-PH/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "",
 	"Invalid file format.": "",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "",
 	"Invalid Tag": "",
 	"is typing...": "",
 	"January": "",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "Pag-expire sa JWT",
 	"JWT Token": "JWT token",
 	"Kagi Search API Key": "",
-	"Keep Alive": "Padayon nga aktibo",
 	"Keep in Sidebar": "",
 	"Key": "",
 	"Keyboard shortcuts": "Mga shortcut sa keyboard",
@@ -1023,7 +1021,6 @@
 	"Rename": "",
 	"Reorder Models": "",
 	"Reply in Thread": "",
-	"Request Mode": "Query mode",
 	"Reranking Engine": "",
 	"Reranking Model": "",
 	"Reset": "",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
 	"The LDAP attribute that maps to the username that users use to sign in.": "",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Kini nagsiguro nga ang imong bililhon nga mga panag-istoryahanay luwas nga natipig sa imong backend database. ",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This model is not publicly available. Please select another model.": "",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",

+ 3 - 3
src/lib/i18n/locales/cs-CZ/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "",
 	"Invalid file format.": "Neplatný formát souboru.",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "",
 	"Invalid Tag": "Neplatný tag",
 	"is typing...": "",
 	"January": "Leden",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "Vypršení JWT",
 	"JWT Token": "JWT Token (JSON Web Token)",
 	"Kagi Search API Key": "",
-	"Keep Alive": "Udržovat spojení",
 	"Keep in Sidebar": "",
 	"Key": "",
 	"Keyboard shortcuts": "Klávesové zkratky",
@@ -1023,7 +1021,6 @@
 	"Rename": "Přejmenovat",
 	"Reorder Models": "",
 	"Reply in Thread": "",
-	"Request Mode": "Režim žádosti",
 	"Reranking Engine": "",
 	"Reranking Model": "Model pro přehodnocení pořadí",
 	"Reset": "režim Reset",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Vývojáři stojící za tímto pluginem jsou zapálení dobrovolníci z komunity. Pokud považujete tento plugin za užitečný, zvažte příspěvek k jeho vývoji.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Hodnotící žebříček je založen na systému hodnocení Elo a je aktualizován v reálném čase.",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
 	"The LDAP attribute that maps to the username that users use to sign in.": "",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "To zajišťuje, že vaše cenné konverzace jsou bezpečně uloženy ve vaší backendové databázi. Děkujeme!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Jedná se o experimentální funkci, nemusí fungovat podle očekávání a může být kdykoliv změněna.",
 	"This model is not publicly available. Please select another model.": "",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Tato volba odstraní všechny existující soubory ve sbírce a nahradí je nově nahranými soubory.",
 	"This response was generated by \"{{model}}\"": "Tato odpověď byla vygenerována pomocí \"{{model}}\"",

+ 3 - 3
src/lib/i18n/locales/da-DK/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "Ugyldigt filindhold",
 	"Invalid file format.": "Ugyldigt filformat.",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "Ugyldigt JSON-schema",
 	"Invalid Tag": "Ugyldigt tag",
 	"is typing...": "er i gang med at skrive...",
 	"January": "Januar",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "JWT-udløb",
 	"JWT Token": "JWT-token",
 	"Kagi Search API Key": "",
-	"Keep Alive": "Hold i live",
 	"Keep in Sidebar": "",
 	"Key": "Nøgle",
 	"Keyboard shortcuts": "Tastaturgenveje",
@@ -1023,7 +1021,6 @@
 	"Rename": "Omdøb",
 	"Reorder Models": "Omarranger modeller",
 	"Reply in Thread": "Svar i tråd",
-	"Request Mode": "Forespørgselstilstand",
 	"Reranking Engine": "",
 	"Reranking Model": "Omarrangeringsmodel",
 	"Reset": "Nulstil",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Udviklerne bag dette plugin er passionerede frivillige fra fællesskabet. Hvis du finder dette plugin nyttigt, kan du overveje at bidrage til dets udvikling.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
 	"The LDAP attribute that maps to the username that users use to sign in.": "",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Dette sikrer, at dine værdifulde samtaler gemmes sikkert i din backend-database. Tak!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Dette er en eksperimentel funktion, den fungerer muligvis ikke som forventet og kan ændres når som helst.",
 	"This model is not publicly available. Please select another model.": "",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Denne indstilling sletter alle eksisterende filer i samlingen og erstatter dem med nyligt uploadede filer.",
 	"This response was generated by \"{{model}}\"": "Dette svar blev genereret af \"{{model}}\"",

+ 3 - 3
src/lib/i18n/locales/de-DE/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "Ungültiger Dateiinhalt",
 	"Invalid file format.": "Ungültiges Dateiformat.",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "Ungültiges JSON-Schema",
 	"Invalid Tag": "Ungültiger Tag",
 	"is typing...": "schreibt ...",
 	"January": "Januar",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "JWT-Ablauf",
 	"JWT Token": "JWT-Token",
 	"Kagi Search API Key": "Kagi Search API-Schlüssel",
-	"Keep Alive": "Verbindung aufrechterhalten",
 	"Keep in Sidebar": "",
 	"Key": "Schlüssel",
 	"Keyboard shortcuts": "Tastenkombinationen",
@@ -1023,7 +1021,6 @@
 	"Rename": "Umbenennen",
 	"Reorder Models": "Modelle neu anordnen",
 	"Reply in Thread": "Im Thread antworten",
-	"Request Mode": "Anforderungsmodus",
 	"Reranking Engine": "Reranking-Engine",
 	"Reranking Model": "Reranking-Modell",
 	"Reset": "Zurücksetzen",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Die Entwickler hinter diesem Plugin sind leidenschaftliche Freiwillige aus der Community. Wenn Sie dieses Plugin hilfreich finden, erwägen Sie bitte, zu seiner Entwicklung beizutragen.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Die Bewertungs-Bestenliste basiert auf dem Elo-Bewertungssystem und wird in Echtzeit aktualisiert.",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "Das LDAP-Attribut, das der Mail zugeordnet ist, die Benutzer zum Anmelden verwenden.",
 	"The LDAP attribute that maps to the username that users use to sign in.": "Das LDAP-Attribut, das dem Benutzernamen zugeordnet ist, den Benutzer zum Anmelden verwenden.",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Dies stellt sicher, dass Ihre wertvollen Chats sicher in Ihrer Backend-Datenbank gespeichert werden. Vielen Dank!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Dies ist eine experimentelle Funktion, sie funktioniert möglicherweise nicht wie erwartet und kann jederzeit geändert werden.",
 	"This model is not publicly available. Please select another model.": "Dieses Modell ist nicht öffentlich verfügbar. Bitte wählen Sie ein anderes Modell aus.",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "Diese Option steuert, wie viele Token beim Aktualisieren des Kontexts beibehalten werden. Wenn beispielsweise 2 eingestellt ist, werden die letzten 2 Tokens des Gesprächskontexts beibehalten. Das Beibehalten des Kontexts kann helfen, die Kontinuität eines Gesprächs zu wahren, kann aber die Fähigkeit, auf neue Themen zu reagieren, einschränken.",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "Diese Option legt die maximale Anzahl von Token fest, die das Modell in seiner Antwort generieren kann. Eine Erhöhung dieses Limits ermöglicht längere Antworten, kann aber auch die Wahrscheinlichkeit erhöhen, dass unbrauchbare oder irrelevante Inhalte generiert werden.",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Diese Option löscht alle vorhandenen Dateien in der Sammlung und ersetzt sie durch neu hochgeladene Dateien.",
 	"This response was generated by \"{{model}}\"": "Diese Antwort wurde von \"{{model}}\" generiert",

+ 3 - 3
src/lib/i18n/locales/dg-DG/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "",
 	"Invalid file format.": "",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "",
 	"Invalid Tag": "",
 	"is typing...": "",
 	"January": "",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "JWT Expire",
 	"JWT Token": "JWT Borken",
 	"Kagi Search API Key": "",
-	"Keep Alive": "Keep Wow",
 	"Keep in Sidebar": "",
 	"Key": "",
 	"Keyboard shortcuts": "Keyboard Barkcuts",
@@ -1023,7 +1021,6 @@
 	"Rename": "",
 	"Reorder Models": "",
 	"Reply in Thread": "",
-	"Request Mode": "Request Bark",
 	"Reranking Engine": "",
 	"Reranking Model": "",
 	"Reset": "",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
 	"The LDAP attribute that maps to the username that users use to sign in.": "",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "This ensures that your valuable conversations are securely saved to your backend database. Thank you! Much secure!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This model is not publicly available. Please select another model.": "",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",

+ 3 - 3
src/lib/i18n/locales/el-GR/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "",
 	"Invalid file format.": "Μη έγκυρη μορφή αρχείου.",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "",
 	"Invalid Tag": "Μη έγκυρη Ετικέτα",
 	"is typing...": "",
 	"January": "Ιανουάριος",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "Λήξη JWT",
 	"JWT Token": "Token JWT",
 	"Kagi Search API Key": "",
-	"Keep Alive": "Διατήρηση Ζωντανής Σύνδεσης",
 	"Keep in Sidebar": "",
 	"Key": "Κλειδί",
 	"Keyboard shortcuts": "Συντομεύσεις Πληκτρολογίου",
@@ -1023,7 +1021,6 @@
 	"Rename": "Μετονομασία",
 	"Reorder Models": "Επαναταξινόμηση Μοντέλων",
 	"Reply in Thread": "",
-	"Request Mode": "Λειτουργία Αιτήματος",
 	"Reranking Engine": "",
 	"Reranking Model": "Μοντέλο Επαναταξινόμησης",
 	"Reset": "Επαναφορά",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Οι προγραμματιστές πίσω από αυτό το plugin είναι παθιασμένοι εθελοντές από την κοινότητα. Αν βρείτε αυτό το plugin χρήσιμο, παρακαλώ σκεφτείτε να συνεισφέρετε στην ανάπτυξή του.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Η κατάταξη αξιολόγησης βασίζεται στο σύστημα βαθμολόγησης Elo και ενημερώνεται σε πραγματικό χρόνο.",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
 	"The LDAP attribute that maps to the username that users use to sign in.": "Το χαρακτηριστικό LDAP που αντιστοιχεί στο όνομα χρήστη που χρησιμοποιούν οι χρήστες για να συνδεθούν.",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Αυτό διασφαλίζει ότι οι πολύτιμες συνομιλίες σας αποθηκεύονται με ασφάλεια στη βάση δεδομένων backend σας. Ευχαριστούμε!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Αυτή είναι μια πειραματική λειτουργία, μπορεί να μην λειτουργεί όπως αναμένεται και υπόκειται σε αλλαγές οποιαδήποτε στιγμή.",
 	"This model is not publicly available. Please select another model.": "",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Αυτή η επιλογή θα διαγράψει όλα τα υπάρχοντα αρχεία στη συλλογή και θα τα αντικαταστήσει με νέα ανεβασμένα αρχεία.",
 	"This response was generated by \"{{model}}\"": "Αυτή η απάντηση δημιουργήθηκε από \"{{model}}\"",

+ 3 - 3
src/lib/i18n/locales/en-GB/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "",
 	"Invalid file format.": "",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "",
 	"Invalid Tag": "",
 	"is typing...": "",
 	"January": "",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "",
 	"JWT Token": "",
 	"Kagi Search API Key": "",
-	"Keep Alive": "",
 	"Keep in Sidebar": "",
 	"Key": "",
 	"Keyboard shortcuts": "",
@@ -1023,7 +1021,6 @@
 	"Rename": "",
 	"Reorder Models": "",
 	"Reply in Thread": "",
-	"Request Mode": "",
 	"Reranking Engine": "",
 	"Reranking Model": "",
 	"Reset": "",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
 	"The LDAP attribute that maps to the username that users use to sign in.": "",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This model is not publicly available. Please select another model.": "",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",

+ 3 - 3
src/lib/i18n/locales/en-US/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "",
 	"Invalid file format.": "",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "",
 	"Invalid Tag": "",
 	"is typing...": "",
 	"January": "",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "",
 	"JWT Token": "",
 	"Kagi Search API Key": "",
-	"Keep Alive": "",
 	"Keep in Sidebar": "",
 	"Key": "",
 	"Keyboard shortcuts": "",
@@ -1023,7 +1021,6 @@
 	"Rename": "",
 	"Reorder Models": "",
 	"Reply in Thread": "",
-	"Request Mode": "",
 	"Reranking Engine": "",
 	"Reranking Model": "",
 	"Reset": "",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
 	"The LDAP attribute that maps to the username that users use to sign in.": "",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This model is not publicly available. Please select another model.": "",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",

+ 3 - 3
src/lib/i18n/locales/es-ES/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "",
 	"Invalid file format.": "Formato de archivo Inválido.",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "Esquema JSON Inválido",
 	"Invalid Tag": "Etiqueta Inválida",
 	"is typing...": "está escribiendo...",
 	"January": "Enero",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "Expiración del JSON Web Token (JWT)",
 	"JWT Token": "JSON Web Token",
 	"Kagi Search API Key": "Clave API de Kagi Search",
-	"Keep Alive": "Mantener Vivo",
 	"Keep in Sidebar": "",
 	"Key": "Clave",
 	"Keyboard shortcuts": "Atajos de teclado",
@@ -1023,7 +1021,6 @@
 	"Rename": "Renombrar",
 	"Reorder Models": "Reordenar Modelos",
 	"Reply in Thread": "Responder en Hilo",
-	"Request Mode": "Modo de Petición",
 	"Reranking Engine": "",
 	"Reranking Model": "Modelo de Reclasificación",
 	"Reset": "Reiniciar",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "El tamaño de lote determina cuántas solicitudes de texto se procesan juntas de una vez. Un tamaño de lote más alto puede aumentar el rendimiento y la velocidad del modelo, pero también requiere más memoria.",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Quienes desarollaron este complemento son apasionados voluntarios/as de la comunidad. Si este complemento te es útil, por favor considera contribuir a su desarrollo.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "La tabla clasificatoria de evaluación se basa en el sistema de clasificación Elo y se actualiza en tiempo real.",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "El atributo LDAP que mapea el correo que los usuarios utilizan para iniciar sesión.",
 	"The LDAP attribute that maps to the username that users use to sign in.": "El atributo LDAP que mapea el nombre de usuario que los usuarios utilizan para iniciar sesión.",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Esto garantiza que sus valiosas conversaciones se guardan de forma segura en tu base de datos del servidor trasero (backend). ¡Gracias!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Esta es una característica experimental, por lo que puede no funcionar como se esperaba y está sujeta a cambios en cualquier momento.",
 	"This model is not publicly available. Please select another model.": "",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "Esta opción controla cuántos tokens se conservan cuando se actualiza el contexto. Por ejemplo, si se establece en 2, se conservarán los primeros 2 tokens del contexto de la conversación. Conservar el contexto puede ayudar a mantener la continuidad de una conversación, pero puede reducir la habilidad para responder a nuevos temas.",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "Esta opción establece el número máximo de tokens que el modelo puede generar en sus respuestas. Aumentar este límite permite al modelo proporcionar respuestas más largas, pero también puede aumentar la probabilidad de que se genere contenido inútil o irrelevante.",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Esta opción eliminará todos los archivos existentes en la colección y los reemplazará con los nuevos archivos subidos.",
 	"This response was generated by \"{{model}}\"": "Esta respuesta fue generada por \"{{model}}\"",

+ 3 - 3
src/lib/i18n/locales/et-EE/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "",
 	"Invalid file format.": "Vigane failiformaat.",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "",
 	"Invalid Tag": "Vigane silt",
 	"is typing...": "kirjutab...",
 	"January": "Jaanuar",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "JWT aegumine",
 	"JWT Token": "JWT token",
 	"Kagi Search API Key": "Kagi Search API võti",
-	"Keep Alive": "Hoia elus",
 	"Keep in Sidebar": "",
 	"Key": "Võti",
 	"Keyboard shortcuts": "Klaviatuuri otseteed",
@@ -1023,7 +1021,6 @@
 	"Rename": "Nimeta ümber",
 	"Reorder Models": "Muuda mudelite järjekorda",
 	"Reply in Thread": "Vasta lõimes",
-	"Request Mode": "Päringu režiim",
 	"Reranking Engine": "",
 	"Reranking Model": "Ümberjärjestamise mudel",
 	"Reset": "Lähtesta",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "Partii suurus määrab, mitu tekstipäringut töödeldakse korraga. Suurem partii suurus võib suurendada mudeli jõudlust ja kiirust, kuid see nõuab ka rohkem mälu.",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Selle pistikprogrammi taga olevad arendajad on kogukonna pühendunud vabatahtlikud. Kui leiate, et see pistikprogramm on kasulik, palun kaaluge selle arendamise toetamist.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Hindamise edetabel põhineb Elo hindamissüsteemil ja seda uuendatakse reaalajas.",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "LDAP atribuut, mis kaardistab e-posti, mida kasutajad kasutavad sisselogimiseks.",
 	"The LDAP attribute that maps to the username that users use to sign in.": "LDAP atribuut, mis kaardistab kasutajanime, mida kasutajad kasutavad sisselogimiseks.",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "See tagab, et teie väärtuslikud vestlused salvestatakse turvaliselt teie tagarakenduse andmebaasi. Täname!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "See on katsetuslik funktsioon, see ei pruugi toimida ootuspäraselt ja võib igal ajal muutuda.",
 	"This model is not publicly available. Please select another model.": "",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "See valik kontrollib, mitu tokenit säilitatakse konteksti värskendamisel. Näiteks kui see on määratud 2-le, säilitatakse vestluse konteksti viimased 2 tokenit. Konteksti säilitamine võib aidata säilitada vestluse järjepidevust, kuid võib vähendada võimet reageerida uutele teemadele.",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "See valik määrab maksimaalse tokenite arvu, mida mudel saab oma vastuses genereerida. Selle piirmäära suurendamine võimaldab mudelil anda pikemaid vastuseid, kuid võib suurendada ka ebavajaliku või ebaolulise sisu genereerimise tõenäosust.",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "See valik kustutab kõik olemasolevad failid kogust ja asendab need äsja üleslaaditud failidega.",
 	"This response was generated by \"{{model}}\"": "Selle vastuse genereeris \"{{model}}\"",

+ 3 - 3
src/lib/i18n/locales/eu-ES/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "",
 	"Invalid file format.": "Fitxategi formatu baliogabea.",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "",
 	"Invalid Tag": "Etiketa Baliogabea",
 	"is typing...": "",
 	"January": "Urtarrila",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "JWT Iraungitzea",
 	"JWT Token": "JWT Tokena",
 	"Kagi Search API Key": "",
-	"Keep Alive": "Mantendu Aktibo",
 	"Keep in Sidebar": "",
 	"Key": "Gakoa",
 	"Keyboard shortcuts": "Teklatuko lasterbideak",
@@ -1023,7 +1021,6 @@
 	"Rename": "Berrizendatu",
 	"Reorder Models": "Berrantolatu modeloak",
 	"Reply in Thread": "",
-	"Request Mode": "Eskaera modua",
 	"Reranking Engine": "",
 	"Reranking Model": "Berrantolatze modeloa",
 	"Reset": "Berrezarri",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Plugin honen atzean dauden garatzaileak komunitateko boluntario sutsuak dira. Plugin hau baliagarria iruditzen bazaizu, mesedez kontuan hartu bere garapenean laguntzea.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Ebaluazio sailkapena Elo sailkapen sisteman oinarritzen da eta denbora errealean eguneratzen da.",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
 	"The LDAP attribute that maps to the username that users use to sign in.": "Erabiltzaileek saioa hasteko erabiltzen duten erabiltzaile-izenarekin mapeatzen den LDAP atributua.",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Honek zure elkarrizketa baliotsuak modu seguruan zure backend datu-basean gordeko direla ziurtatzen du. Eskerrik asko!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Hau funtzionalitate esperimental bat da, baliteke espero bezala ez funtzionatzea eta edozein unetan aldaketak izatea.",
 	"This model is not publicly available. Please select another model.": "",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Aukera honek bilduman dauden fitxategi guztiak ezabatuko ditu eta berriki kargatutako fitxategiekin ordezkatuko ditu.",
 	"This response was generated by \"{{model}}\"": "Erantzun hau \"{{model}}\" modeloak sortu du",

+ 3 - 3
src/lib/i18n/locales/fa-IR/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "",
 	"Invalid file format.": "قالب فایل نامعتبر است.",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "طرح JSON نامعتبر",
 	"Invalid Tag": "تگ نامعتبر",
 	"is typing...": "در حال تایپ...",
 	"January": "ژانویه",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "JWT انقضای",
 	"JWT Token": "JWT توکن",
 	"Kagi Search API Key": "کلید API جستجوی کاگی",
-	"Keep Alive": "Keep Alive",
 	"Keep in Sidebar": "",
 	"Key": "کلید",
 	"Keyboard shortcuts": "میانبرهای صفحه کلید",
@@ -1023,7 +1021,6 @@
 	"Rename": "تغییر نام",
 	"Reorder Models": "ترتیب مجدد مدل\u200cها",
 	"Reply in Thread": "پاسخ در رشته",
-	"Request Mode": "حالت درخواست",
 	"Reranking Engine": "",
 	"Reranking Model": "مدل ری\u200cشناسی مجدد غیرفعال است",
 	"Reset": "بازنشانی",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "اندازه دسته تعیین می\u200cکند که چند درخواست متنی همزمان پردازش می\u200cشوند. اندازه دسته بزرگتر می\u200cتواند عملکرد و سرعت مدل را افزایش دهد، اما به حافظه بیشتری نیاز دارد.",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "توسعه\u200cدهندگان این افزونه داوطلبان مشتاق از جامعه هستند. اگر این افزونه را مفید می\u200cدانید، لطفاً در توسعه آن مشارکت کنید.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "تابلوی امتیازات ارزیابی بر اساس سیستم رتبه\u200cبندی Elo است و در زمان واقعی به\u200cروز می\u200cشود.",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "ویژگی LDAP که به ایمیلی که کاربران برای ورود استفاده می\u200cکنند نگاشت می\u200cشود.",
 	"The LDAP attribute that maps to the username that users use to sign in.": "ویژگی LDAP که به نام کاربری که کاربران برای ورود استفاده می\u200cکنند نگاشت می\u200cشود.",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "این اطمینان می\u200cدهد که مکالمات ارزشمند شما به طور امن در پایگاه داده پشتیبان ذخیره می\u200cشوند. متشکریم!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "این یک ویژگی آزمایشی است، ممکن است طبق انتظار کار نکند و در هر زمان ممکن است تغییر کند.",
 	"This model is not publicly available. Please select another model.": "",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "این گزینه کنترل می\u200cکند که هنگام تازه\u200cسازی متن، چند توکن حفظ شوند. برای مثال، اگر روی 2 تنظیم شود، 2 توکن آخر متن مکالمه حفظ خواهند شد. حفظ متن می\u200cتواند به حفظ پیوستگی مکالمه کمک کند، اما ممکن است توانایی پاسخ به موضوعات جدید را کاهش دهد.",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "این گزینه حداکثر تعداد توکن\u200cهایی را که مدل می\u200cتواند در پاسخ خود تولید کند تنظیم می\u200cکند. افزایش این محدودیت به مدل اجازه می\u200cدهد پاسخ\u200cهای طولانی\u200cتری ارائه دهد، اما ممکن است احتمال تولید محتوای بی\u200cفایده یا نامربوط را نیز افزایش دهد.",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "این گزینه تمام فایل\u200cهای موجود در مجموعه را حذف کرده و با فایل\u200cهای جدید آپلود شده جایگزین می\u200cکند.",
 	"This response was generated by \"{{model}}\"": "این پاسخ توسط \"{{model}}\" تولید شده است",

+ 3 - 3
src/lib/i18n/locales/fi-FI/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "Virheellinen tiedostosisältö",
 	"Invalid file format.": "Virheellinen tiedostomuoto.",
 	"Invalid JSON file": "Virheellinen JSON tiedosto",
-	"Invalid JSON schema": "Virheellinen JSON kaava",
 	"Invalid Tag": "Virheellinen tagi",
 	"is typing...": "Kirjoittaa...",
 	"January": "tammikuu",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "JWT-vanheneminen",
 	"JWT Token": "JWT-token",
 	"Kagi Search API Key": "Kagi Search API -avain",
-	"Keep Alive": "Pysy aktiivisena",
 	"Keep in Sidebar": "",
 	"Key": "Avain",
 	"Keyboard shortcuts": "Pikanäppäimet",
@@ -1023,7 +1021,6 @@
 	"Rename": "Nimeä uudelleen",
 	"Reorder Models": "Uudelleenjärjestä malleja",
 	"Reply in Thread": "Vastauksia ",
-	"Request Mode": "Pyyntötila",
 	"Reranking Engine": "Uudelleenpisteytymismallin moottori",
 	"Reranking Model": "Uudelleenpisteytymismalli",
 	"Reset": "Palauta",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "Eräkoko määrittää, kuinka monta tekstipyyntöä käsitellään kerralla. Suurempi eräkoko voi parantaa mallin suorituskykyä ja nopeutta, mutta se vaatii myös enemmän muistia.",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Tämän lisäosan takana olevat kehittäjät ovat intohimoisia vapaaehtoisyhteisöstä. Jos koet tämän lisäosan hyödylliseksi, harkitse sen kehittämisen tukemista.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Arviointitulosluettelo perustuu Elo-luokitusjärjestelmään ja päivittyy reaaliajassa.",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "Syöteäänen kieli. Syöttökielen antaminen ISO-639-1-muodossa (esim. en) parantaa tarkkuutta ja viivettä. Jätä tyhjäksi, jos haluat kielen automaattisen tunnistuksen.",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "LDAP-määrite, joka yhdistää käyttäjien kirjautumiseen käyttämään sähköpostiin.",
 	"The LDAP attribute that maps to the username that users use to sign in.": "LDAP-määrite, joka vastaa käyttäjien kirjautumiskäyttäjänimeä.",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Tämä varmistaa, että arvokkaat keskustelusi tallennetaan turvallisesti backend-tietokantaasi. Kiitos!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Tämä on kokeellinen ominaisuus, se ei välttämättä toimi odotetulla tavalla ja se voi muuttua milloin tahansa.",
 	"This model is not publicly available. Please select another model.": "Tämä malli ei ole julkisesti saatavilla. Valitse toinen malli.",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "Tämä asetus määrittää, kuinka monta tokenia säilytetään kontekstia päivitettäessä. Jos arvoksi on asetettu esimerkiksi 2, keskustelukontekstin kaksi viimeistä tokenia säilytetään. Kontekstin säilyttäminen voi auttaa ylläpitämään keskustelun jatkuvuutta, mutta se voi heikentää kykyä vastata uusiin aiheisiin.",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "Tämä vaihtoehto asettaa mallin vastauksessaan luomien tokenien enimmäismäärän. Tämän rajan nostaminen antaa mallille mahdollisuuden tarjota pidempiä vastauksia, mutta se voi myös lisätä hyödyttömän tai epäolennaisen sisällön luomisen todennäköisyyttä.",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Tämä vaihtoehto poistaa kaikki kokoelman nykyiset tiedostot ja korvaa ne uusilla ladatuilla tiedostoilla.",
 	"This response was generated by \"{{model}}\"": "Tämän vastauksen tuotti \"{{model}}\"",

+ 3 - 3
src/lib/i18n/locales/fr-CA/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "",
 	"Invalid file format.": "",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "",
 	"Invalid Tag": "Étiquette non valide",
 	"is typing...": "",
 	"January": "Janvier",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "Expiration du jeton JWT",
 	"JWT Token": "Jeton JWT",
 	"Kagi Search API Key": "",
-	"Keep Alive": "Rester connecté",
 	"Keep in Sidebar": "",
 	"Key": "",
 	"Keyboard shortcuts": "Raccourcis clavier",
@@ -1023,7 +1021,6 @@
 	"Rename": "Renommer",
 	"Reorder Models": "",
 	"Reply in Thread": "",
-	"Request Mode": "Mode de Requête",
 	"Reranking Engine": "",
 	"Reranking Model": "Modèle de ré-ranking",
 	"Reset": "Réinitialiser",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
 	"The LDAP attribute that maps to the username that users use to sign in.": "",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Cela garantit que vos conversations précieuses soient sauvegardées en toute sécurité dans votre base de données backend. Merci !",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Il s'agit d'une fonctionnalité expérimentale, elle peut ne pas fonctionner comme prévu et est sujette à modification à tout moment.",
 	"This model is not publicly available. Please select another model.": "",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",

+ 3 - 3
src/lib/i18n/locales/fr-FR/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "",
 	"Invalid file format.": "Format de fichier non valide.",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "Schema JSon non valide",
 	"Invalid Tag": "Tag non valide",
 	"is typing...": "est en train d'écrire...",
 	"January": "Janvier",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "Expiration du token JWT",
 	"JWT Token": "Token JWT",
 	"Kagi Search API Key": "Clé API Kagi Search",
-	"Keep Alive": "Temps de maintien connecté",
 	"Keep in Sidebar": "",
 	"Key": "Clé",
 	"Keyboard shortcuts": "Raccourcis clavier",
@@ -1023,7 +1021,6 @@
 	"Rename": "Renommer",
 	"Reorder Models": "Réorganiser les modèles",
 	"Reply in Thread": "Répondre dans le fil de discussion",
-	"Request Mode": "Mode de requête",
 	"Reranking Engine": "",
 	"Reranking Model": "Modèle de ré-ranking",
 	"Reset": "Réinitialiser",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "La taille du lot détermine combien de requêtes texte sont traitées simultanément. Une taille plus grande améliore les performances mais consomme plus de mémoire.",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Les développeurs de ce plugin sont des bénévoles passionnés issus de la communauté. Si vous trouvez ce plugin utile, merci de contribuer à son développement.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Le classement d'évaluation est basé sur le système de notation Elo et est mis à jour en temps réel.",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "L'attribut LDAP qui correspond à l'adresse e-mail que les utilisateurs utilisent pour se connecter.",
 	"The LDAP attribute that maps to the username that users use to sign in.": "L'attribut LDAP qui correspond au nom d'utilisateur que les utilisateurs utilisent pour se connecter.",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Cela garantit que vos conversations précieuses soient sauvegardées en toute sécurité dans votre base de données backend. Merci !",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Il s'agit d'une fonctionnalité expérimentale, elle peut ne pas fonctionner comme prévu et est sujette à modification à tout moment.",
 	"This model is not publicly available. Please select another model.": "",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "Cette option détermine combien de Token sont conservés lors du rafraîchissement du contexte. Par exemple, avec une valeur de 2, les 2 derniers Token seront conservés. Cela aide à maintenir la continuité de la conversation, mais peut limiter la capacité à traiter de nouveaux sujets.",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "Cette option définit le nombre maximal de Token que le modèle peut générer dans sa réponse. Une valeur plus élevée permet des réponses plus longues, mais peut aussi générer du contenu moins pertinent.",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Cette option supprimera tous les fichiers existants dans la collection et les remplacera par les fichiers nouvellement téléchargés.",
 	"This response was generated by \"{{model}}\"": "Cette réponse a été générée par \"{{model}}\"",

+ 3 - 3
src/lib/i18n/locales/he-IL/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "",
 	"Invalid file format.": "",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "",
 	"Invalid Tag": "תג לא חוקי",
 	"is typing...": "מקליד...",
 	"January": "ינואר",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "תפוגת JWT",
 	"JWT Token": "אסימון JWT",
 	"Kagi Search API Key": "",
-	"Keep Alive": "השאר פעיל",
 	"Keep in Sidebar": "",
 	"Key": "",
 	"Keyboard shortcuts": "קיצורי מקלדת",
@@ -1023,7 +1021,6 @@
 	"Rename": "שנה שם",
 	"Reorder Models": "",
 	"Reply in Thread": "",
-	"Request Mode": "מצב בקשה",
 	"Reranking Engine": "",
 	"Reranking Model": "מודל דירוג מחדש",
 	"Reset": "",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
 	"The LDAP attribute that maps to the username that users use to sign in.": "",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "פעולה זו מבטיחה שהשיחות בעלות הערך שלך יישמרו באופן מאובטח במסד הנתונים העורפי שלך. תודה!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This model is not publicly available. Please select another model.": "",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",

+ 3 - 3
src/lib/i18n/locales/hi-IN/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "",
 	"Invalid file format.": "",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "",
 	"Invalid Tag": "अवैध टैग",
 	"is typing...": "",
 	"January": "जनवरी",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "JWT समाप्ति",
 	"JWT Token": "जट टोकन",
 	"Kagi Search API Key": "",
-	"Keep Alive": "क्रियाशील रहो",
 	"Keep in Sidebar": "",
 	"Key": "",
 	"Keyboard shortcuts": "कीबोर्ड शॉर्टकट",
@@ -1023,7 +1021,6 @@
 	"Rename": "नाम बदलें",
 	"Reorder Models": "",
 	"Reply in Thread": "",
-	"Request Mode": "अनुरोध मोड",
 	"Reranking Engine": "",
 	"Reranking Model": "रीरैकिंग मोड",
 	"Reset": "",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
 	"The LDAP attribute that maps to the username that users use to sign in.": "",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "यह सुनिश्चित करता है कि आपकी मूल्यवान बातचीत आपके बैकएंड डेटाबेस में सुरक्षित रूप से सहेजी गई है। धन्यवाद!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This model is not publicly available. Please select another model.": "",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",

+ 3 - 3
src/lib/i18n/locales/hr-HR/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "",
 	"Invalid file format.": "",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "",
 	"Invalid Tag": "Nevažeća oznaka",
 	"is typing...": "",
 	"January": "Siječanj",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "Isticanje JWT-a",
 	"JWT Token": "JWT token",
 	"Kagi Search API Key": "",
-	"Keep Alive": "Održavanje živim",
 	"Keep in Sidebar": "",
 	"Key": "",
 	"Keyboard shortcuts": "Tipkovnički prečaci",
@@ -1023,7 +1021,6 @@
 	"Rename": "Preimenuj",
 	"Reorder Models": "",
 	"Reply in Thread": "",
-	"Request Mode": "Način zahtjeva",
 	"Reranking Engine": "",
 	"Reranking Model": "Model za ponovno rangiranje",
 	"Reset": "",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
 	"The LDAP attribute that maps to the username that users use to sign in.": "",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Ovo osigurava da su vaši vrijedni razgovori sigurno spremljeni u bazu podataka. Hvala vam!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Ovo je eksperimentalna značajka, možda neće funkcionirati prema očekivanjima i podložna je promjenama u bilo kojem trenutku.",
 	"This model is not publicly available. Please select another model.": "",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",

+ 3 - 3
src/lib/i18n/locales/hu-HU/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "",
 	"Invalid file format.": "Érvénytelen fájlformátum.",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "Érvénytelen JSON séma",
 	"Invalid Tag": "Érvénytelen címke",
 	"is typing...": "ír...",
 	"January": "Január",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "JWT lejárat",
 	"JWT Token": "JWT token",
 	"Kagi Search API Key": "Kagi Search API kulcs",
-	"Keep Alive": "Kapcsolat fenntartása",
 	"Keep in Sidebar": "",
 	"Key": "Kulcs",
 	"Keyboard shortcuts": "Billentyűparancsok",
@@ -1023,7 +1021,6 @@
 	"Rename": "Átnevezés",
 	"Reorder Models": "Modellek átrendezése",
 	"Reply in Thread": "Válasz szálban",
-	"Request Mode": "Kérési mód",
 	"Reranking Engine": "",
 	"Reranking Model": "Újrarangsoroló modell",
 	"Reset": "Visszaállítás",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "A köteg méret meghatározza, hány szöveges kérést dolgoz fel egyszerre. Magasabb köteg méret növelheti a modell teljesítményét és sebességét, de több memóriát is igényel.",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "A bővítmény fejlesztői lelkes önkéntesek a közösségből. Ha hasznosnak találja ezt a bővítményt, kérjük, fontolja meg a fejlesztéséhez való hozzájárulást.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Az értékelési ranglista az Elo értékelési rendszeren alapul és valós időben frissül.",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "Az LDAP attribútum, amely a felhasználók bejelentkezéshez használt emailjéhez kapcsolódik.",
 	"The LDAP attribute that maps to the username that users use to sign in.": "Az LDAP attribútum, amely a felhasználók bejelentkezéshez használt felhasználónevéhez kapcsolódik.",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Ez biztosítja, hogy értékes beszélgetései biztonságosan mentésre kerüljenek a backend adatbázisban. Köszönjük!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Ez egy kísérleti funkció, lehet, hogy nem a várt módon működik és bármikor változhat.",
 	"This model is not publicly available. Please select another model.": "",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "Ez az opció szabályozza, hány token marad meg a kontextus frissítésekor. Például, ha 2-re van állítva, a beszélgetés kontextusának utolsó 2 tokenje megmarad. A kontextus megőrzése segíthet a beszélgetés folytonosságának fenntartásában, de csökkentheti az új témákra való reagálás képességét.",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "Ez az opció beállítja a modell által generálható tokenek maximális számát a válaszban. Ezen limit növelése hosszabb válaszokat tesz lehetővé, de növelheti a nem hasznos vagy irreleváns tartalom generálásának valószínűségét.",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Ez az opció törli az összes meglévő fájlt a gyűjteményben és lecseréli őket az újonnan feltöltött fájlokkal.",
 	"This response was generated by \"{{model}}\"": "Ezt a választ a \"{{model}}\" generálta",

+ 3 - 3
src/lib/i18n/locales/id-ID/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "",
 	"Invalid file format.": "",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "",
 	"Invalid Tag": "Tag tidak valid",
 	"is typing...": "",
 	"January": "Januari",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "Kedaluwarsa JWT",
 	"JWT Token": "Token JWT",
 	"Kagi Search API Key": "",
-	"Keep Alive": "Tetap Hidup",
 	"Keep in Sidebar": "",
 	"Key": "",
 	"Keyboard shortcuts": "Pintasan keyboard",
@@ -1023,7 +1021,6 @@
 	"Rename": "Ganti nama",
 	"Reorder Models": "",
 	"Reply in Thread": "",
-	"Request Mode": "Mode Permintaan",
 	"Reranking Engine": "",
 	"Reranking Model": "Model Pemeringkatan Ulang",
 	"Reset": "Atur Ulang",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
 	"The LDAP attribute that maps to the username that users use to sign in.": "",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Ini akan memastikan bahwa percakapan Anda yang berharga disimpan dengan aman ke basis data backend. Terima kasih!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Ini adalah fitur eksperimental, mungkin tidak berfungsi seperti yang diharapkan dan dapat berubah sewaktu-waktu.",
 	"This model is not publicly available. Please select another model.": "",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",

+ 3 - 3
src/lib/i18n/locales/ie-GA/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "Ábhar comhaid neamhbhailí",
 	"Invalid file format.": "Formáid comhaid neamhbhailí.",
 	"Invalid JSON file": "Comhad JSON neamhbhailí",
-	"Invalid JSON schema": "Scéimre JSON neamhbhailí",
 	"Invalid Tag": "Clib neamhbhailí",
 	"is typing...": "ag clóscríobh...",
 	"January": "Eanáir",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "Éag JWT",
 	"JWT Token": "Comhartha JWT",
 	"Kagi Search API Key": "Eochair API Chuardaigh Kagi",
-	"Keep Alive": "Coinnigh Beo",
 	"Keep in Sidebar": "",
 	"Key": "Eochair",
 	"Keyboard shortcuts": "Aicearraí méarchlár",
@@ -1023,7 +1021,6 @@
 	"Rename": "Athainmnigh",
 	"Reorder Models": "Múnlaí Athordú",
 	"Reply in Thread": "Freagra i Snáithe",
-	"Request Mode": "Mód Iarratais",
 	"Reranking Engine": "Inneall Athrangúcháin",
 	"Reranking Model": "Múnla Athrangú",
 	"Reset": "Athshocraigh",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "Cinneann méid an bhaisc cé mhéad iarratas téacs a phróiseáiltear le chéile ag an am céanna. Is féidir le méid baisc níos airde feidhmíocht agus luas an mhúnla a mhéadú, ach éilíonn sé níos mó cuimhne freisin.",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Is deonacha paiseanta ón bpobal iad na forbróirí taobh thiar den bhreiseán seo. Má aimsíonn an breiseán seo cabhrach leat, smaoinigh ar rannchuidiú lena fhorbairt.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Tá an clár ceannairí meastóireachta bunaithe ar chóras rátála Elo agus déantar é a nuashonrú i bhfíor-am.",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "Teanga an fhuaime ionchuir. Má sholáthraítear an teanga ionchuir i bhformáid ISO-639-1 (e.g. en), feabhsófar cruinneas agus moill. Fág bán é chun an teanga a bhrath go huathoibríoch.",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "An tréith LDAP a mhapálann don ríomhphost a úsáideann úsáideoirí chun síniú isteach.",
 	"The LDAP attribute that maps to the username that users use to sign in.": "An tréith LDAP a mhapálann don ainm úsáideora a úsáideann úsáideoirí chun síniú isteach.",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Cinntíonn sé seo go sábhálfar do chomhráite luachmhara go daingean i do bhunachar sonraí cúltaca Go raibh maith agat!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Is gné turgnamhach í seo, b'fhéidir nach bhfeidhmeoidh sé mar a bhíothas ag súil leis agus tá sé faoi réir athraithe ag am ar bith.",
 	"This model is not publicly available. Please select another model.": "Níl an tsamhail seo ar fáil go poiblí. Roghnaigh samhail eile le do thoil.",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "Rialaíonn an rogha seo cé mhéad comhartha a chaomhnaítear agus an comhthéacs á athnuachan. Mar shampla, má shocraítear go 2 é, coinneofar an 2 chomhartha dheireanacha de chomhthéacs an chomhrá. Is féidir le comhthéacs a chaomhnú cabhrú le leanúnachas comhrá a choinneáil, ach d’fhéadfadh sé laghdú a dhéanamh ar an gcumas freagairt do thopaicí nua.",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "Socraíonn an rogha seo an t-uaslíon comharthaí is féidir leis an tsamhail a ghiniúint ina fhreagra. Tríd an teorainn seo a mhéadú is féidir leis an tsamhail freagraí níos faide a sholáthar, ach d’fhéadfadh go méadódh sé an dóchúlacht go nginfear ábhar neamhchabhrach nó nach mbaineann le hábhar.",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Scriosfaidh an rogha seo gach comhad atá sa bhailiúchán agus cuirfear comhaid nua-uaslódála ina n-ionad.",
 	"This response was generated by \"{{model}}\"": "Gin an freagra seo ag \"{{model}}\"",

+ 3 - 3
src/lib/i18n/locales/it-IT/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "Contenuto del file non valido",
 	"Invalid file format.": "Formato file non valido.",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "Schema JSON non valido",
 	"Invalid Tag": "Tag non valido",
 	"is typing...": "sta digitando...",
 	"January": "Gennaio",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "Scadenza JWT",
 	"JWT Token": "Token JWT",
 	"Kagi Search API Key": "Chiave API di ricerca Kagi",
-	"Keep Alive": "Mantieni attivo",
 	"Keep in Sidebar": "",
 	"Key": "Chiave",
 	"Keyboard shortcuts": "Scorciatoie da tastiera",
@@ -1023,7 +1021,6 @@
 	"Rename": "Rinomina",
 	"Reorder Models": "Riordina Modelli",
 	"Reply in Thread": "Rispondi nel thread",
-	"Request Mode": "Modalità Richiesta",
 	"Reranking Engine": "Engine di Riclassificazione",
 	"Reranking Model": "Modello di Riclassificazione",
 	"Reset": "Ripristina",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "La dimensione del batch determina quanti richieste di testo vengono elaborate insieme in una sola volta. Una dimensione del batch più alta può aumentare le prestazioni e la velocità del modello, ma richiede anche più memoria.",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Gli sviluppatori dietro questo plugin sono volontari appassionati della comunità. Se trovi utile questo plugin, ti preghiamo di considerare di contribuire al suo sviluppo.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "La classifica di valutazione è basata sul sistema di rating Elo ed è aggiornata in tempo reale.",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "L'attributo LDAP che mappa alla mail che gli utenti usano per accedere.",
 	"The LDAP attribute that maps to the username that users use to sign in.": "L'attributo LDAP che mappa al nome utente che gli utenti usano per accedere.",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Ciò garantisce che le tue preziose conversazioni siano salvate in modo sicuro nel tuo database backend. Grazie!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Questa è una funzionalità sperimentale, potrebbe non funzionare come previsto ed è soggetta a modifiche in qualsiasi momento.",
 	"This model is not publicly available. Please select another model.": "Questo modello non è disponibile pubblicamente. Seleziona un altro modello.",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "Questa opzione controlla quanti token vengono preservati quando si aggiorna il contesto. Ad esempio, se impostato su 2, gli ultimi 2 token del contesto della conversazione verranno mantenuti. Preservare il contesto può aiutare a mantenere la continuità di una conversazione, ma potrebbe ridurre la capacità di rispondere a nuovi argomenti.",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "Questa opzione imposta il numero massimo di token che il modello può generare nella sua risposta. Aumentare questo limite consente al modello di fornire risposte più lunghe, ma potrebbe anche aumentare la probabilità che vengano generati contenuti non utili o irrilevanti.",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Questa opzione eliminerà tutti i file esistenti nella collezione e li sostituirà con i file appena caricati.",
 	"This response was generated by \"{{model}}\"": "Questa risposta è stata generata da \"{{model}}\"",

+ 3 - 3
src/lib/i18n/locales/ja-JP/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "無効なファイル内容",
 	"Invalid file format.": "無効なファイル形式",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "無効なJSONスキーマ",
 	"Invalid Tag": "無効なタグ",
 	"is typing...": "入力中...",
 	"January": "1月",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "JWT 有効期限",
 	"JWT Token": "JWT トークン",
 	"Kagi Search API Key": "Kagi Search APIキー",
-	"Keep Alive": "キープアライブ",
 	"Keep in Sidebar": "",
 	"Key": "キー",
 	"Keyboard shortcuts": "キーボードショートカット",
@@ -1023,7 +1021,6 @@
 	"Rename": "名前を変更",
 	"Reorder Models": "モデルを並べ替え",
 	"Reply in Thread": "スレッドで返信",
-	"Request Mode": "リクエストモード",
 	"Reranking Engine": "リランクエンジン",
 	"Reranking Model": "リランクモデル",
 	"Reset": "リセット",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "バッチサイズは一度に処理されるテキストリクエストの数を決定します。バッチサイズを高くすると、モデルのパフォーマンスと速度が向上しますが、メモリの使用量も増加します。",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "このプラグインはコミュニティの熱意のあるボランティアによって開発されています。このプラグインがお役に立った場合は、開発に貢献することを検討してください。",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "評価リーダーボードはElo評価システムに基づいており、実時間で更新されています。",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "ユーザーがサインインに使用するメールのLDAP属性。",
 	"The LDAP attribute that maps to the username that users use to sign in.": "ユーザーがサインインに使用するユーザー名のLDAP属性。",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "これは、貴重な会話がバックエンドデータベースに安全に保存されることを保証します。ありがとうございます!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "実験的機能であり正常動作しない場合があります。",
 	"This model is not publicly available. Please select another model.": "このモデルは公開されていません。別のモデルを選択してください。",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "このオプションは、コンテキストをリフレッシュする際に保持するトークンの数を制御します。例えば、2に設定すると、会話のコンテキストの最後の2つのトークンが保持されます。コンテキストを保持することで、会話の継続性を維持できますが、新しいトピックに応答する能力を低下させる可能性があります。",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "このオプションは、モデルが生成できるトークンの最大数を設定します。この制限を増加すると、モデルはより長い回答を生成できるようになりますが、不適切な内容や関連性の低い内容が生成される可能性も高まります。",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "このレスポンスは\"{{model}}\"によって生成されました。",

+ 3 - 3
src/lib/i18n/locales/ka-GE/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "",
 	"Invalid file format.": "არასწორი ფაილის ფორმატი.",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "",
 	"Invalid Tag": "არასწორი ჭდე",
 	"is typing...": "",
 	"January": "იანვარი",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "JWT-ის ვადა",
 	"JWT Token": "JWT ტოკენი",
 	"Kagi Search API Key": "",
-	"Keep Alive": "აქტიურად დატოვება",
 	"Keep in Sidebar": "",
 	"Key": "გასაღები",
 	"Keyboard shortcuts": "კლავიატურის მალსახმობები",
@@ -1023,7 +1021,6 @@
 	"Rename": "სახელის გადარქმევა",
 	"Reorder Models": "",
 	"Reply in Thread": "ნაკადში პასუხი",
-	"Request Mode": "მოთხოვნის რეჟიმი",
 	"Reranking Engine": "",
 	"Reranking Model": "Reranking მოდელი",
 	"Reset": "ჩამოყრა",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
 	"The LDAP attribute that maps to the username that users use to sign in.": "",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "ეს უზრუნველყოფს, რომ თქვენი ღირებული საუბრები უსაფრთხოდ შეინახება თქვენს უკანაბოლო მონაცემთა ბაზაში. მადლობა!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This model is not publicly available. Please select another model.": "",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",

+ 3 - 3
src/lib/i18n/locales/ko-KR/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "",
 	"Invalid file format.": "잘못된 파일 형식",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "잘못된 JSON 스키마",
 	"Invalid Tag": "잘못된 태그",
 	"is typing...": "입력 중...",
 	"January": "1월",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "JWT 만료",
 	"JWT Token": "JWT 토큰",
 	"Kagi Search API Key": "Kagi Search API 키",
-	"Keep Alive": "계속 유지하기",
 	"Keep in Sidebar": "",
 	"Key": "키",
 	"Keyboard shortcuts": "키보드 단축키",
@@ -1023,7 +1021,6 @@
 	"Rename": "이름 변경",
 	"Reorder Models": "모델 재정렬",
 	"Reply in Thread": "스레드로 답장하기",
-	"Request Mode": "요청 모드",
 	"Reranking Engine": "",
 	"Reranking Model": "Reranking 모델",
 	"Reset": "초기화",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "배치 크기에 따라 한 번에 처리되는 텍스트 요청의 수가 결정됩니다. 배치 크기가 크면 모델의 성능과 속도가 향상될 수 있지만 더 많은 메모리가 필요하기도 합니다.",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "이 플러그인은 커뮤니티의 열정적인 자원봉사자들이 개발했습니다. 유용하게 사용하셨다면 개발에 기여해 주시는 것도 고려해 주세요.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "평가 리더보드는 Elo 평가 시스템을 기반으로 하고 실시간으로 업데이트됩니다",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "사용자가 로그인하는 데 사용하는 메일에 매핑되는 LDAP 속성입니다.",
 	"The LDAP attribute that maps to the username that users use to sign in.": "사용자가 로그인할 때 사용하는 사용자 이름에 매핑되는 LDAP 속성입니다.",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "이렇게 하면 소중한 대화 내용이 백엔드 데이터베이스에 안전하게 저장됩니다. 감사합니다!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "이것은 실험적 기능으로, 예상대로 작동하지 않을 수 있으며 언제든지 변경될 수 있습니다.",
 	"This model is not publicly available. Please select another model.": "이 모델은 공개적으로 사용할 수 없습니다. 다른 모델을 선택해주세요.",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "이 옵션은 컨텍스트를 새로 고칠 때 보존되는 토큰의 수를 제어합니다. 예를 들어 2로 설정하면 대화 컨텍스트의 마지막 2개 토큰이 유지됩니다. 컨텍스트를 보존하면 대화의 연속성을 유지하는 데 도움이 될 수 있지만 새로운 주제에 대한 응답 능력이 감소할 수 있습니다.",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "이 옵션은 모델이 응답에서 생성할 수 있는 최대 토큰 수를 설정합니다. 이 한도를 늘리면 모델이 더 긴 답변을 제공할 수 있지만, 도움이 되지 않거나 관련 없는 콘텐츠가 생성될 가능성도 높아질 수 있습니다.",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "이 옵션을 선택하면 기존 컬렉션의 모든 파일이 삭제되고, 새로 업로드된 파일로 대체됩니다.",
 	"This response was generated by \"{{model}}\"": "\"{{model}}\"이 생성한 응답입니다",

+ 3 - 3
src/lib/i18n/locales/lt-LT/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "",
 	"Invalid file format.": "",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "",
 	"Invalid Tag": "Neteisinga žyma",
 	"is typing...": "",
 	"January": "Sausis",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "JWT išėjimas iš galiojimo",
 	"JWT Token": "JWT žetonas",
 	"Kagi Search API Key": "",
-	"Keep Alive": "Išlaikyti aktyviu",
 	"Keep in Sidebar": "",
 	"Key": "",
 	"Keyboard shortcuts": "Klaviatūros trumpiniai",
@@ -1023,7 +1021,6 @@
 	"Rename": "Pervadinti",
 	"Reorder Models": "",
 	"Reply in Thread": "",
-	"Request Mode": "Užklausos rėžimas",
 	"Reranking Engine": "",
 	"Reranking Model": "Reranking modelis",
 	"Reset": "Atkurti",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Šis modulis kuriamas savanorių. Palaikykite jų darbus finansiškai arba prisidėdami kodu.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
 	"The LDAP attribute that maps to the username that users use to sign in.": "",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Tai užtikrina, kad Jūsų pokalbiai saugiai saugojami duomenų bazėje. Ačiū!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Tai eksperimentinė funkcija ir gali veikti nevisada.",
 	"This model is not publicly available. Please select another model.": "",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",

+ 3 - 3
src/lib/i18n/locales/ms-MY/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "",
 	"Invalid file format.": "",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "",
 	"Invalid Tag": "Tag tidak sah",
 	"is typing...": "",
 	"January": "Januari",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "Tempoh Tamat JWT",
 	"JWT Token": "Token JWT",
 	"Kagi Search API Key": "",
-	"Keep Alive": "Kekalkan Hidup",
 	"Keep in Sidebar": "",
 	"Key": "",
 	"Keyboard shortcuts": "Pintasan papan kekunci",
@@ -1023,7 +1021,6 @@
 	"Rename": "Namakan Semula",
 	"Reorder Models": "",
 	"Reply in Thread": "",
-	"Request Mode": "Mod Permintaan",
 	"Reranking Engine": "",
 	"Reranking Model": "Model 'Reranking'",
 	"Reset": "Tetapkan Semula",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Pembangun di sebalik 'plugin' ini adalah sukarelawan yang bersemangat daripada komuniti. Jika anda mendapati 'plugin' ini membantu, sila pertimbangkan untuk menyumbang kepada pembangunannya.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
 	"The LDAP attribute that maps to the username that users use to sign in.": "",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Ini akan memastikan bahawa perbualan berharga anda disimpan dengan selamat ke pangkalan data 'backend' anda. Terima kasih!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "ni adalah ciri percubaan, ia mungkin tidak berfungsi seperti yang diharapkan dan tertakluk kepada perubahan pada bila-bila masa.",
 	"This model is not publicly available. Please select another model.": "",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",

+ 3 - 3
src/lib/i18n/locales/nb-NO/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "",
 	"Invalid file format.": "Ugyldig filformat.",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "",
 	"Invalid Tag": "Ugyldig etikett",
 	"is typing...": "Skriver...",
 	"January": "januar",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "JWT-utløp",
 	"JWT Token": "JWT-token",
 	"Kagi Search API Key": "API-nøkkel for Kagi Search",
-	"Keep Alive": "Hold i live",
 	"Keep in Sidebar": "",
 	"Key": "Nøkkel",
 	"Keyboard shortcuts": "Hurtigtaster",
@@ -1023,7 +1021,6 @@
 	"Rename": "Gi nytt navn",
 	"Reorder Models": "Sorter modeller på nytt",
 	"Reply in Thread": "Svar i tråd",
-	"Request Mode": "Forespørselsmodus",
 	"Reranking Engine": "",
 	"Reranking Model": "Omrangeringsmodell",
 	"Reset": "Tilbakestill",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Utviklerne bak denne utvidelsen er lidenskapelige frivillige fra fellesskapet. Hvis du finner denne utvidelsen nyttig, vennligst vurder å bidra til utviklingen.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Ledertavlens over evalueringer er basert på Elo-rangeringssystemet, og oppdateres i sanntid.",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "LDAP-attributtet som tilsvarer e-posten som brukerne bruker for å logge på.",
 	"The LDAP attribute that maps to the username that users use to sign in.": "LDAP-attributtet som tilsvarer brukernavnet som brukerne bruker for å logge på.",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Dette sikrer at de verdifulle samtalene dine lagres sikkert i backend-databasen din. Takk!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Dette er en eksperimentell funksjon. Det er mulig den ikke fungerer som forventet, og den kan endres når som helst.",
 	"This model is not publicly available. Please select another model.": "",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Dette alternativet sletter alle eksisterende filer i samlingen og erstatter dem med nyopplastede filer.",
 	"This response was generated by \"{{model}}\"": "Dette svaret er generert av \"{{modell}}\"",

+ 3 - 3
src/lib/i18n/locales/nl-NL/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "",
 	"Invalid file format.": "Ongeldig bestandsformaat",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "Ongeldig JSON-schema",
 	"Invalid Tag": "Ongeldige Tag",
 	"is typing...": "is aan het schrijven...",
 	"January": "Januari",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "JWT Expiration",
 	"JWT Token": "JWT Token",
 	"Kagi Search API Key": "Kagi Search API-sleutel",
-	"Keep Alive": "Houd Actief",
 	"Keep in Sidebar": "",
 	"Key": "Sleutel",
 	"Keyboard shortcuts": "Toetsenbord snelkoppelingen",
@@ -1023,7 +1021,6 @@
 	"Rename": "Hernoemen",
 	"Reorder Models": "Herschik modellen",
 	"Reply in Thread": "Antwoord in draad",
-	"Request Mode": "Request Modus",
 	"Reranking Engine": "",
 	"Reranking Model": "Reranking Model",
 	"Reset": "Herstellen",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "De batchgrootte bepaalt hoeveel tekstverzoeken tegelijk worden verwerkt. Een hogere batchgrootte kan de prestaties en snelheid van het model verhogen, maar vereist ook meer geheugen.",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "De ontwikkelaars achter deze plugin zijn gepassioneerde vrijwilligers uit de gemeenschap. Als je deze plugin nuttig vindt, overweeg dan om bij te dragen aan de ontwikkeling ervan.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Het beoordelingsklassement is gebaseerd op het Elo-classificatiesysteem en wordt in realtime bijgewerkt.",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "Het LDAP-attribuut dat verwijst naar de e-mail waarmee gebruikers zich aanmelden.",
 	"The LDAP attribute that maps to the username that users use to sign in.": "Het LDAP-attribuut dat verwijst naar de gebruikersnaam die gebruikers gebruiken om in te loggen.",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Dit zorgt ervoor dat je waardevolle gesprekken veilig worden opgeslagen in je backend database. Dank je wel!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Dit is een experimentele functie, het kan functioneren zoals verwacht en kan op elk moment veranderen.",
 	"This model is not publicly available. Please select another model.": "",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "Deze optie bepaalt hoeveel tokens bewaard blijven bij het verversen van de context. Als deze bijvoorbeeld op 2 staat, worden de laatste 2 tekens van de context van het gesprek bewaard. Het behouden van de context kan helpen om de continuïteit van een gesprek te behouden, maar het kan de mogelijkheid om te reageren op nieuwe onderwerpen verminderen.",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "Deze optie stelt het maximum aantal tokens in dat het model kan genereren in zijn antwoord. Door dit limiet te verhogen, kan het model langere antwoorden geven, maar het kan ook de kans vergroten dat er onbehulpzame of irrelevante inhoud wordt gegenereerd.",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Deze optie verwijdert alle bestaande bestanden in de collectie en vervangt ze door nieuw geüploade bestanden.",
 	"This response was generated by \"{{model}}\"": "Dit antwoord is gegenereerd door  \"{{model}}\"",

+ 3 - 3
src/lib/i18n/locales/pa-IN/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "",
 	"Invalid file format.": "",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "",
 	"Invalid Tag": "ਗਲਤ ਟੈਗ",
 	"is typing...": "",
 	"January": "ਜਨਵਰੀ",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "JWT ਮਿਆਦ ਖਤਮ",
 	"JWT Token": "JWT ਟੋਕਨ",
 	"Kagi Search API Key": "",
-	"Keep Alive": "ਜੀਵਿਤ ਰੱਖੋ",
 	"Keep in Sidebar": "",
 	"Key": "",
 	"Keyboard shortcuts": "ਕੀਬੋਰਡ ਸ਼ਾਰਟਕਟ",
@@ -1023,7 +1021,6 @@
 	"Rename": "ਨਾਮ ਬਦਲੋ",
 	"Reorder Models": "",
 	"Reply in Thread": "",
-	"Request Mode": "ਬੇਨਤੀ ਮੋਡ",
 	"Reranking Engine": "",
 	"Reranking Model": "ਮਾਡਲ ਮੁੜ ਰੈਂਕਿੰਗ",
 	"Reset": "",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
 	"The LDAP attribute that maps to the username that users use to sign in.": "",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "ਇਹ ਯਕੀਨੀ ਬਣਾਉਂਦਾ ਹੈ ਕਿ ਤੁਹਾਡੀਆਂ ਕੀਮਤੀ ਗੱਲਾਂ ਤੁਹਾਡੇ ਬੈਕਐਂਡ ਡਾਟਾਬੇਸ ਵਿੱਚ ਸੁਰੱਖਿਅਤ ਤੌਰ 'ਤੇ ਸੰਭਾਲੀਆਂ ਗਈਆਂ ਹਨ। ਧੰਨਵਾਦ!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This model is not publicly available. Please select another model.": "",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",

+ 3 - 3
src/lib/i18n/locales/pl-PL/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "",
 	"Invalid file format.": "Nieprawidłowy format pliku.",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "Nieprawidłowy schemat JSON",
 	"Invalid Tag": "Nieprawidłowy tag",
 	"is typing...": "Pisanie...",
 	"January": "Styczeń",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "Termin ważności JWT",
 	"JWT Token": "Token JWT",
 	"Kagi Search API Key": "Klucz API Kagi Search",
-	"Keep Alive": "Utrzymuj łączność",
 	"Keep in Sidebar": "",
 	"Key": "Klucz",
 	"Keyboard shortcuts": "Skróty klawiszowe",
@@ -1023,7 +1021,6 @@
 	"Rename": "Zmień nazwę",
 	"Reorder Models": "Przeorganizuj modele",
 	"Reply in Thread": "Odpowiedz w wątku",
-	"Request Mode": "Tryb żądania",
 	"Reranking Engine": "",
 	"Reranking Model": "Poprawa rankingu modelu",
 	"Reset": "Resetuj",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Twórcy tego wtyczki to entuzjaści, którzy działają jako wolontariusze ze społeczności. Jeśli uważasz, że ta wtyczka jest pomocna, rozważ wsparcie jej rozwoju.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Tablica wyników oceny opiera się na systemie rankingu Elo i jest aktualizowana w czasie rzeczywistym.",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "Atrybut LDAP, który mapuje się na adres e-mail używany przez użytkowników do logowania.",
 	"The LDAP attribute that maps to the username that users use to sign in.": "Atrybut LDAP, który mapuje się na nazwę użytkownika, którą użytkownicy używają do logowania.",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "To gwarantuje, że Twoje wartościowe rozmowy są bezpiecznie zapisywane w bazie danych backendowej. Dziękujemy!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "To jest funkcja eksperymentalna, może nie działać zgodnie z oczekiwaniami i jest podatna na zmiany w dowolnym momencie.",
 	"This model is not publicly available. Please select another model.": "",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Ta opcja usunie wszystkie istniejące pliki w kolekcji i zastąpi je nowo przesłanymi plikami.",
 	"This response was generated by \"{{model}}\"": "Ta odpowiedź została wygenerowana przez \"{{model}}\".",

+ 3 - 3
src/lib/i18n/locales/pt-BR/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "",
 	"Invalid file format.": "Formato de arquivo inválido.",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "",
 	"Invalid Tag": "Tag Inválida",
 	"is typing...": "",
 	"January": "Janeiro",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "Expiração do JWT",
 	"JWT Token": "Token JWT",
 	"Kagi Search API Key": "",
-	"Keep Alive": "Manter Vivo",
 	"Keep in Sidebar": "",
 	"Key": "Chave",
 	"Keyboard shortcuts": "Atalhos de Teclado",
@@ -1023,7 +1021,6 @@
 	"Rename": "Renomear",
 	"Reorder Models": "",
 	"Reply in Thread": "",
-	"Request Mode": "Modo de Solicitação",
 	"Reranking Engine": "",
 	"Reranking Model": "Modelo de Reclassificação",
 	"Reset": "Redefinir",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Os desenvolvedores por trás deste plugin são voluntários apaixonados da comunidade. Se você achar este plugin útil, considere contribuir para o seu desenvolvimento.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "A evolução do ranking de avaliação é baseada no sistema Elo e será atualizada em tempo real.",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
 	"The LDAP attribute that maps to the username that users use to sign in.": "O atributo LDAP que mapeia para o nome de usuário que os usuários usam para fazer login.",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Isso garante que suas conversas valiosas sejam salvas com segurança no banco de dados do backend. Obrigado!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Esta é uma funcionalidade experimental, pode não funcionar como esperado e está sujeita a alterações a qualquer momento.",
 	"This model is not publicly available. Please select another model.": "",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Essa opção deletará todos os arquivos existentes na coleção e todos eles serão substituídos.",
 	"This response was generated by \"{{model}}\"": "Esta resposta foi gerada por \"{{model}}\"",

+ 3 - 3
src/lib/i18n/locales/pt-PT/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "",
 	"Invalid file format.": "",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "",
 	"Invalid Tag": "Etiqueta Inválida",
 	"is typing...": "",
 	"January": "Janeiro",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "Expiração JWT",
 	"JWT Token": "Token JWT",
 	"Kagi Search API Key": "",
-	"Keep Alive": "Manter Vivo",
 	"Keep in Sidebar": "",
 	"Key": "",
 	"Keyboard shortcuts": "Atalhos de teclado",
@@ -1023,7 +1021,6 @@
 	"Rename": "Renomear",
 	"Reorder Models": "",
 	"Reply in Thread": "",
-	"Request Mode": "Modo de Pedido",
 	"Reranking Engine": "",
 	"Reranking Model": "Modelo de Reranking",
 	"Reset": "",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
 	"The LDAP attribute that maps to the username that users use to sign in.": "",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Isto garante que suas conversas valiosas sejam guardadas com segurança na sua base de dados de backend. Obrigado!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Isto é um recurso experimental, pode não funcionar conforme o esperado e está sujeito a alterações a qualquer momento.",
 	"This model is not publicly available. Please select another model.": "",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",

+ 3 - 3
src/lib/i18n/locales/ro-RO/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "",
 	"Invalid file format.": "Format de fișier invalid.",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "",
 	"Invalid Tag": "Etichetă Invalidă",
 	"is typing...": "",
 	"January": "Ianuarie",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "Expirarea JWT",
 	"JWT Token": "Token JWT",
 	"Kagi Search API Key": "",
-	"Keep Alive": "Menține Activ",
 	"Keep in Sidebar": "",
 	"Key": "",
 	"Keyboard shortcuts": "Scurtături de la Tastatură",
@@ -1023,7 +1021,6 @@
 	"Rename": "Redenumește",
 	"Reorder Models": "",
 	"Reply in Thread": "",
-	"Request Mode": "Mod de Cerere",
 	"Reranking Engine": "",
 	"Reranking Model": "Model de Rearanjare",
 	"Reset": "Resetează",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Dezvoltatorii din spatele acestui plugin sunt voluntari pasionați din comunitate. Dacă considerați acest plugin util, vă rugăm să luați în considerare contribuția la dezvoltarea sa.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Clasamentul de evaluare se bazează pe sistemul de rating Elo și este actualizat în timp real.",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
 	"The LDAP attribute that maps to the username that users use to sign in.": "",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Acest lucru asigură că conversațiile dvs. valoroase sunt salvate în siguranță în baza de date a backend-ului dvs. Mulțumim!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Aceasta este o funcție experimentală, poate să nu funcționeze așa cum vă așteptați și este supusă schimbării în orice moment.",
 	"This model is not publicly available. Please select another model.": "",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Această opțiune va șterge toate fișierelor existente din colecție și le va înlocui cu fișierele nou încărcate.",
 	"This response was generated by \"{{model}}\"": "Acest răspuns a fost generat de \"{{model}}\"",

+ 3 - 3
src/lib/i18n/locales/ru-RU/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "Недопустимое содержимое файла",
 	"Invalid file format.": "Неверный формат файла.",
 	"Invalid JSON file": "Недопустимый файл JSON",
-	"Invalid JSON schema": "Недопустимая схема JSON",
 	"Invalid Tag": "Недопустимый тег",
 	"is typing...": "печатает...",
 	"January": "Январь",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "Истечение срока JWT",
 	"JWT Token": "Токен JWT",
 	"Kagi Search API Key": "API ключ поиска Kagi",
-	"Keep Alive": "Поддерживать активность",
 	"Keep in Sidebar": "Оставить на боковой панели",
 	"Key": "Ключ",
 	"Keyboard shortcuts": "Горячие клавиши",
@@ -1023,7 +1021,6 @@
 	"Rename": "Переименовать",
 	"Reorder Models": "Изменение порядка моделей",
 	"Reply in Thread": "Ответить в обсуждении",
-	"Request Mode": "Режим запроса",
 	"Reranking Engine": "Движок реранжирования",
 	"Reranking Model": "Модель реранжирования",
 	"Reset": "Сбросить",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "Размер пакета определяет, сколько текстовых запросов обрабатывается одновременно. Больший размер пакета может повысить производительность и быстродействие модели, но также требует больше памяти.",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Разработчики этого плагина - увлеченные волонтеры из сообщества. Если вы считаете этот плагин полезным, пожалуйста, подумайте о том, чтобы внести свой вклад в его разработку.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Таблица лидеров оценки основана на рейтинговой системе Elo и обновляется в режиме реального времени.",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "Язык входного аудиосигнала.  Укажите язык ввода в формате ISO-639-1 (например, en), что повысит точность и время ожидания. Оставьте поле пустым для автоматического определения языка.",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "Атрибут LDAP, который сопоставляется с почтой, используемой пользователями для входа в систему.",
 	"The LDAP attribute that maps to the username that users use to sign in.": "Атрибут LDAP, который сопоставляется с именем пользователя, используемым пользователями для входа в систему.",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Это обеспечивает сохранение ваших ценных разговоров в безопасной базе данных на вашем сервере. Спасибо!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Это экспериментальная функция, она может работать не так, как ожидалось, и может быть изменена в любое время.",
 	"This model is not publicly available. Please select another model.": "Эта модель недоступна в открытом доступе. Пожалуйста, выберите другую модель.",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "Этот параметр определяет, сколько токенов сохраняется при обновлении контекста. Например, если задано значение 2, будут сохранены последние 2 токена контекста беседы. Сохранение контекста может помочь сохранить непрерывность беседы, но может уменьшить возможность отвечать на новые темы.",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "Этот параметр устанавливает максимальное количество токенов, которые модель может генерировать в своем ответе. Увеличение этого ограничения позволяет модели предоставлять более длинные ответы, но также может увеличить вероятность создания бесполезного или нерелевантного контента.",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Эта опция удалит все существующие файлы в коллекции и заменит их вновь загруженными файлами.",
 	"This response was generated by \"{{model}}\"": "Этот ответ был сгенерирован для \"{{model}}\"",

+ 3 - 3
src/lib/i18n/locales/sk-SK/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "",
 	"Invalid file format.": "Neplatný formát súboru.",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "",
 	"Invalid Tag": "Neplatný tag",
 	"is typing...": "",
 	"January": "Január",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "Vypršanie platnosti JWT (JSON Web Token)",
 	"JWT Token": "JWT Token (JSON Web Token)",
 	"Kagi Search API Key": "",
-	"Keep Alive": "Udržiavať spojenie",
 	"Keep in Sidebar": "",
 	"Key": "",
 	"Keyboard shortcuts": "Klávesové skratky",
@@ -1023,7 +1021,6 @@
 	"Rename": "Premenovať",
 	"Reorder Models": "",
 	"Reply in Thread": "",
-	"Request Mode": "Režim žiadosti",
 	"Reranking Engine": "",
 	"Reranking Model": "Model na prehodnotenie poradia",
 	"Reset": "režim Reset",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Vývojári stojaci za týmto pluginom sú zapálení dobrovoľníci z komunity. Ak považujete tento plugin za užitočný, zvážte príspevok na jeho vývoj.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Hodnotiaca tabuľka je založená na systéme hodnotenia Elo a aktualizuje sa v reálnom čase.",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
 	"The LDAP attribute that maps to the username that users use to sign in.": "",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Týmto je zaistené, že vaše cenné konverzácie sú bezpečne uložené vo vašej backendovej databáze. Ďakujeme!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Toto je experimentálna funkcia, nemusí fungovať podľa očakávania a môže byť kedykoľvek zmenená.",
 	"This model is not publicly available. Please select another model.": "",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Táto voľba odstráni všetky existujúce súbory v kolekcii a nahradí ich novo nahranými súbormi.",
 	"This response was generated by \"{{model}}\"": "Táto odpoveď bola vygenerovaná pomocou \"{{model}}\"",

+ 3 - 3
src/lib/i18n/locales/sr-RS/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "",
 	"Invalid file format.": "",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "",
 	"Invalid Tag": "Неисправна ознака",
 	"is typing...": "",
 	"January": "Јануар",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "Истек JWT-а",
 	"JWT Token": "JWT жетон",
 	"Kagi Search API Key": "",
-	"Keep Alive": "Одржи трајање",
 	"Keep in Sidebar": "",
 	"Key": "Кључ",
 	"Keyboard shortcuts": "Пречице на тастатури",
@@ -1023,7 +1021,6 @@
 	"Rename": "Преименуј",
 	"Reorder Models": "",
 	"Reply in Thread": "",
-	"Request Mode": "Режим захтева",
 	"Reranking Engine": "",
 	"Reranking Model": "Модел поновног рангирања",
 	"Reset": "Поврати",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
 	"The LDAP attribute that maps to the username that users use to sign in.": "",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Ово осигурава да су ваши вредни разговори безбедно сачувани у вашој бекенд бази података. Хвала вам!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This model is not publicly available. Please select another model.": "",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",

+ 3 - 3
src/lib/i18n/locales/sv-SE/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "",
 	"Invalid file format.": "",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "",
 	"Invalid Tag": "Ogiltig tagg",
 	"is typing...": "",
 	"January": "januari",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "JWT-utgångsdatum",
 	"JWT Token": "JWT-token",
 	"Kagi Search API Key": "",
-	"Keep Alive": "Keep Alive",
 	"Keep in Sidebar": "",
 	"Key": "",
 	"Keyboard shortcuts": "Tangentbordsgenvägar",
@@ -1023,7 +1021,6 @@
 	"Rename": "Byt namn",
 	"Reorder Models": "",
 	"Reply in Thread": "",
-	"Request Mode": "Frågeläge",
 	"Reranking Engine": "",
 	"Reranking Model": "Reranking modell",
 	"Reset": "",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Utvärderingens topplista är baserad på Elo-betygssystemet och uppdateras i realtid",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
 	"The LDAP attribute that maps to the username that users use to sign in.": "",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Detta säkerställer att dina värdefulla samtal sparas säkert till din backend-databas. Tack!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Detta är en experimentell funktion som kanske inte fungerar som förväntat och som kan komma att ändras när som helst.",
 	"This model is not publicly available. Please select another model.": "",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",

+ 3 - 3
src/lib/i18n/locales/th-TH/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "",
 	"Invalid file format.": "",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "",
 	"Invalid Tag": "แท็กไม่ถูกต้อง",
 	"is typing...": "",
 	"January": "มกราคม",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "การหมดอายุของ JWT",
 	"JWT Token": "โทเค็น JWT",
 	"Kagi Search API Key": "",
-	"Keep Alive": "คงอยู่",
 	"Keep in Sidebar": "",
 	"Key": "",
 	"Keyboard shortcuts": "ทางลัดแป้นพิมพ์",
@@ -1023,7 +1021,6 @@
 	"Rename": "เปลี่ยนชื่อ",
 	"Reorder Models": "",
 	"Reply in Thread": "",
-	"Request Mode": "โหมดคำขอ",
 	"Reranking Engine": "",
 	"Reranking Model": "จัดอันดับใหม่โมเดล",
 	"Reset": "รีเซ็ต",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "นักพัฒนาที่อยู่เบื้องหลังปลั๊กอินนี้เป็นอาสาสมัครที่มีชื่นชอบการแบ่งบัน หากคุณพบว่าปลั๊กอินนี้มีประโยชน์ โปรดพิจารณาสนับสนุนการพัฒนาของเขา",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
 	"The LDAP attribute that maps to the username that users use to sign in.": "",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "สิ่งนี้ทำให้มั่นใจได้ว่าการสนทนาที่มีค่าของคุณจะถูกบันทึกอย่างปลอดภัยในฐานข้อมูลแบ็กเอนด์ของคุณ ขอบคุณ!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "นี่เป็นฟีเจอร์ทดลอง อาจไม่ทำงานตามที่คาดไว้และอาจมีการเปลี่ยนแปลงได้ตลอดเวลา",
 	"This model is not publicly available. Please select another model.": "",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",

+ 3 - 3
src/lib/i18n/locales/tk-TW/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "",
 	"Invalid file format.": "",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "",
 	"Invalid Tag": "",
 	"is typing...": "",
 	"January": "",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "",
 	"JWT Token": "",
 	"Kagi Search API Key": "",
-	"Keep Alive": "",
 	"Keep in Sidebar": "",
 	"Key": "",
 	"Keyboard shortcuts": "",
@@ -1023,7 +1021,6 @@
 	"Rename": "",
 	"Reorder Models": "",
 	"Reply in Thread": "",
-	"Request Mode": "",
 	"Reranking Engine": "",
 	"Reranking Model": "",
 	"Reset": "",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
 	"The LDAP attribute that maps to the username that users use to sign in.": "",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
 	"This model is not publicly available. Please select another model.": "",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",

+ 3 - 3
src/lib/i18n/locales/tr-TR/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "",
 	"Invalid file format.": "Geçersiz dosya biçimi.",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "Geçersiz JSON şablonu",
 	"Invalid Tag": "Geçersiz etiket",
 	"is typing...": "yazıyor...",
 	"January": "Ocak",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "JWT Bitişi",
 	"JWT Token": "JWT Token",
 	"Kagi Search API Key": "Kagi Arama API Anahtarı",
-	"Keep Alive": "Canlı Tut",
 	"Keep in Sidebar": "",
 	"Key": "Anahtar",
 	"Keyboard shortcuts": "Klavye kısayolları",
@@ -1023,7 +1021,6 @@
 	"Rename": "Yeniden Adlandır",
 	"Reorder Models": "Modelleri Yeniden Sırala",
 	"Reply in Thread": "Konuya Yanıtla",
-	"Request Mode": "İstek Modu",
 	"Reranking Engine": "",
 	"Reranking Model": "Yeniden Sıralama Modeli",
 	"Reset": "Sıfırla",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Bu eklentinin arkasındaki geliştiriciler topluluktan tutkulu gönüllülerdir. Bu eklentinin yararlı olduğunu düşünüyorsanız, gelişimine katkıda bulunmayı düşünün.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
 	"The LDAP attribute that maps to the username that users use to sign in.": "",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Bu, önemli konuşmalarınızın güvenli bir şekilde arkayüz veritabanınıza kaydedildiğini garantiler. Teşekkür ederiz!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Bu deneysel bir özelliktir, beklendiği gibi çalışmayabilir ve her an değişiklik yapılabilir.",
 	"This model is not publicly available. Please select another model.": "",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Bu seçenek, koleksiyondaki tüm mevcut dosyaları silecek ve bunları yeni yüklenen dosyalarla değiştirecek.",
 	"This response was generated by \"{{model}}\"": "Bu yanıt \"{{model}}\" tarafından oluşturuldu",

+ 3 - 3
src/lib/i18n/locales/uk-UA/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "",
 	"Invalid file format.": "Неправильний формат файлу.",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "Невірна схема JSON",
 	"Invalid Tag": "Недійсний тег",
 	"is typing...": "друкує...",
 	"January": "Січень",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "Термін дії JWT",
 	"JWT Token": "Токен JWT",
 	"Kagi Search API Key": "Kagi Search API ключ",
-	"Keep Alive": "Зберегти активність",
 	"Keep in Sidebar": "",
 	"Key": "Ключ",
 	"Keyboard shortcuts": "Клавіатурні скорочення",
@@ -1023,7 +1021,6 @@
 	"Rename": "Переназвати",
 	"Reorder Models": "Переставити моделі",
 	"Reply in Thread": "Відповісти в потоці",
-	"Request Mode": "Режим запиту",
 	"Reranking Engine": "",
 	"Reranking Model": "Модель переранжування",
 	"Reset": "Скидання",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "Розмір пакету визначає, скільки текстових запитів обробляється одночасно. Більший розмір пакету може підвищити продуктивність і швидкість моделі, але також вимагає більше пам'яті.",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Розробники цього плагіна - пристрасні волонтери зі спільноти. Якщо ви вважаєте цей плагін корисним, будь ласка, зробіть свій внесок у його розвиток.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Таблиця лідерів оцінки базується на системі рейтингу Ело і оновлюється в реальному часі.",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "LDAP-атрибут, який відповідає за пошту, яку користувачі використовують для входу.",
 	"The LDAP attribute that maps to the username that users use to sign in.": "LDAP-атрибут, який відповідає за ім'я користувача, яке використовують користувачі для входу.",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Це забезпечує збереження ваших цінних розмов у безпечному бекенд-сховищі. Дякуємо!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Це експериментальна функція, вона може працювати не так, як очікувалося, і може бути змінена в будь-який час.",
 	"This model is not publicly available. Please select another model.": "",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "Ця опція контролює, скільки токенів зберігається при оновленні контексту. Наприклад, якщо встановити значення 2, останні 2 токени контексту розмови будуть збережені. Збереження контексту допомагає підтримувати послідовність розмови, але може зменшити здатність реагувати на нові теми.",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "Ця опція встановлює максимальну кількість токенів, які модель може згенерувати у своїй відповіді. Збільшення цього ліміту дозволяє моделі надавати довші відповіді, але також може підвищити ймовірність генерації непотрібного або нерелевантного контенту.",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Цей варіант видалить усі існуючі файли в колекції та замінить їх новими завантаженими файлами.",
 	"This response was generated by \"{{model}}\"": "Цю відповідь згенеровано за допомогою \"{{model}}\"",

+ 3 - 3
src/lib/i18n/locales/ur-PK/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "",
 	"Invalid file format.": "غلط فائل فارمیٹ",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "",
 	"Invalid Tag": "غلط ٹیگ",
 	"is typing...": "",
 	"January": "جنوری",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "JWT کی میعاد ختم ہونا",
 	"JWT Token": "JWT ٹوکن",
 	"Kagi Search API Key": "",
-	"Keep Alive": "زندہ رکھیں",
 	"Keep in Sidebar": "",
 	"Key": "",
 	"Keyboard shortcuts": "کی بورڈ شارٹ کٹس",
@@ -1023,7 +1021,6 @@
 	"Rename": "تبدیل نام کریں",
 	"Reorder Models": "",
 	"Reply in Thread": "",
-	"Request Mode": "درخواست کا موڈ",
 	"Reranking Engine": "",
 	"Reranking Model": "دوبارہ درجہ بندی کا ماڈل",
 	"Reset": "ری سیٹ",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "اس پلگ ان کے پیچھے موجود ڈویلپرز کمیونٹی کے پرجوش رضاکار ہیں اگر آپ کو یہ پلگ ان مددگار لگتا ہے تو برائے مہربانی اس کی ترقی میں اپنا حصہ ڈالنے پر غور کریں",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "تشخیصی لیڈربورڈ ایلو ریٹنگ سسٹم پر مبنی ہے اور یہ حقیقی وقت میں اپ ڈیٹ ہوتا ہے",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
 	"The LDAP attribute that maps to the username that users use to sign in.": "",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "یہ یقینی بناتا ہے کہ آپ کی قیمتی گفتگو محفوظ طریقے سے آپ کے بیک اینڈ ڈیٹا بیس میں محفوظ کی گئی ہیں شکریہ!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "یہ ایک تجرباتی خصوصیت ہے، یہ متوقع طور پر کام نہ کر سکتی ہو اور کسی بھی وقت تبدیل کی جا سکتی ہے",
 	"This model is not publicly available. Please select another model.": "",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "اس اختیار سے مجموعہ میں موجود تمام فائلز حذف ہو جائیں گی اور ان کی جگہ نئی اپ لوڈ کردہ فائلز لی جائیں گی",
 	"This response was generated by \"{{model}}\"": "یہ جواب \"{{model}}\" کے ذریعہ تیار کیا گیا",

+ 3 - 3
src/lib/i18n/locales/vi-VN/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "",
 	"Invalid file format.": "Định dạng tệp không hợp lệ.",
 	"Invalid JSON file": "",
-	"Invalid JSON schema": "Lược đồ JSON không hợp lệ",
 	"Invalid Tag": "Tag không hợp lệ",
 	"is typing...": "đang gõ...",
 	"January": "Tháng 1",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "JWT Hết hạn",
 	"JWT Token": "Token JWT",
 	"Kagi Search API Key": "Khóa API Kagi Search",
-	"Keep Alive": "Giữ kết nối",
 	"Keep in Sidebar": "",
 	"Key": "Khóa",
 	"Keyboard shortcuts": "Phím tắt",
@@ -1023,7 +1021,6 @@
 	"Rename": "Đổi tên",
 	"Reorder Models": "Sắp xếp lại Mô hình",
 	"Reply in Thread": "Trả lời trong Luồng",
-	"Request Mode": "Chế độ Yêu cầu",
 	"Reranking Engine": "",
 	"Reranking Model": "Reranking Model",
 	"Reset": "Xóa toàn bộ",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "Kích thước lô xác định có bao nhiêu yêu cầu văn bản được xử lý cùng một lúc. Kích thước lô cao hơn có thể tăng hiệu suất và tốc độ của mô hình, nhưng nó cũng đòi hỏi nhiều bộ nhớ hơn.",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Các nhà phát triển đằng sau plugin này là những tình nguyện viên nhiệt huyết của cộng đồng. Nếu bạn thấy plugin này hữu ích, vui lòng cân nhắc đóng góp cho sự phát triển của nó.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Bảng xếp hạng đánh giá dựa trên hệ thống xếp hạng Elo và được cập nhật theo thời gian thực.",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "Thuộc tính LDAP ánh xạ tới mail mà người dùng sử dụng để đăng nhập.",
 	"The LDAP attribute that maps to the username that users use to sign in.": "Thuộc tính LDAP ánh xạ tới tên người dùng mà người dùng sử dụng để đăng nhập.",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Điều này đảm bảo rằng các nội dung chat có giá trị của bạn được lưu an toàn vào cơ sở dữ liệu backend của bạn. Cảm ơn bạn!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Đây là tính năng thử nghiệm, có thể không hoạt động như mong đợi và có thể thay đổi bất kỳ lúc nào.",
 	"This model is not publicly available. Please select another model.": "",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "Tùy chọn này kiểm soát số lượng token được bảo tồn khi làm mới ngữ cảnh. Ví dụ: nếu đặt thành 2, 2 token cuối cùng của ngữ cảnh hội thoại sẽ được giữ lại. Bảo tồn ngữ cảnh có thể giúp duy trì tính liên tục của cuộc trò chuyện, nhưng nó có thể làm giảm khả năng phản hồi các chủ đề mới.",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "Tùy chọn này đặt số lượng token tối đa mà mô hình có thể tạo ra trong phản hồi của nó. Tăng giới hạn này cho phép mô hình cung cấp câu trả lời dài hơn, nhưng nó cũng có thể làm tăng khả năng tạo ra nội dung không hữu ích hoặc không liên quan.",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Tùy chọn này sẽ xóa tất cả các tệp hiện có trong bộ sưu tập và thay thế chúng bằng các tệp mới được tải lên.",
 	"This response was generated by \"{{model}}\"": "Phản hồi này được tạo bởi \"{{model}}\"",

+ 3 - 3
src/lib/i18n/locales/zh-CN/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "无效的文件内容",
 	"Invalid file format.": "无效文件格式。",
 	"Invalid JSON file": "无效的 JSON 文件",
-	"Invalid JSON schema": "无效的 JSON schema",
 	"Invalid Tag": "无效标签",
 	"is typing...": "输入中...",
 	"January": "一月",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "JWT 过期",
 	"JWT Token": "JWT 令牌",
 	"Kagi Search API Key": "Kagi 搜索 API 密钥",
-	"Keep Alive": "保持活动",
 	"Keep in Sidebar": "保留在侧边栏",
 	"Key": "密匙",
 	"Keyboard shortcuts": "键盘快捷键",
@@ -1023,7 +1021,6 @@
 	"Rename": "重命名",
 	"Reorder Models": "重新排序模型",
 	"Reply in Thread": "在主题中回复",
-	"Request Mode": "请求模式",
 	"Reranking Engine": "重排序引擎",
 	"Reranking Model": "重排序模型",
 	"Reset": "重置",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "批处理大小决定了一次可以处理多少个文本请求。更高的批处理大小可以提高模型的性能和速度,但也需要更多内存。",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "本插件的开发者是社区中充满热情的志愿者。如果此插件有帮助到您,请考虑为开发贡献一份力量。",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "排行榜基于 Elo 评级系统并实时更新。",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "输入音频的语言。以 ISO-639-1 格式(例如:en)指定输入语言可提高准确性和响应速度。留空则自动检测语言。",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "映射到用户登录时使用的邮箱的 LDAP 属性。",
 	"The LDAP attribute that maps to the username that users use to sign in.": "映射到用户登录时使用的用户名的 LDAP 属性。",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "这将确保您的宝贵对话被安全地保存到后台数据库中。感谢!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "这是一个实验性功能,可能不会如预期那样工作,而且可能随时发生变化。",
 	"This model is not publicly available. Please select another model.": "此模型未公开。请选择其他模型",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "此选项控制刷新上下文时保留多少 Token。例如,如果设置为 2,则将保留对话上下文的最后 2 个 Token。保留上下文有助于保持对话的连续性,但可能会降低响应新主题的能力。",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "此项用于设置模型在其响应中可以生成的最大 Token 数。增加此限制可让模型提供更长的答案,但也可能增加生成无用或不相关内容的可能性。",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "此选项将会删除文件集中所有文件,并用新上传的文件替换。",
 	"This response was generated by \"{{model}}\"": "此回复由 \"{{model}}\" 生成",

+ 3 - 3
src/lib/i18n/locales/zh-TW/translation.json

@@ -718,7 +718,6 @@
 	"Invalid file content": "檔案內容無效",
 	"Invalid file format.": "檔案格式無效。",
 	"Invalid JSON file": "JSON 檔案無效",
-	"Invalid JSON schema": "JSON Schema 無效",
 	"Invalid Tag": "無效標籤",
 	"is typing...": "正在輸入...",
 	"January": "1 月",
@@ -733,7 +732,6 @@
 	"JWT Expiration": "JWT 過期時間",
 	"JWT Token": "JWT Token",
 	"Kagi Search API Key": "Kagi 搜尋 API 金鑰",
-	"Keep Alive": "保持連線",
 	"Keep in Sidebar": "",
 	"Key": "金鑰",
 	"Keyboard shortcuts": "鍵盤快捷鍵",
@@ -1023,7 +1021,6 @@
 	"Rename": "重新命名",
 	"Reorder Models": "重新排序模型",
 	"Reply in Thread": "在討論串中回覆",
-	"Request Mode": "請求模式",
 	"Reranking Engine": "重新排序引擎",
 	"Reranking Model": "重新排序模型",
 	"Reset": "重設",
@@ -1195,6 +1192,7 @@
 	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "批次大小決定一次處理多少文字請求。較高的批次大小可以提高模型的效能和速度,但也需要更多記憶體。",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "這個外掛背後的開發者是來自社群的熱情志願者。如果您覺得這個外掛很有幫助,請考慮為其開發做出貢獻。",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "評估排行榜基於 Elo 評分系統,並即時更新。",
+	"The format to return a response in. Format can be json or a JSON schema.": "",
 	"The language of the input audio. Supplying the input language in ISO-639-1 (e.g. en) format will improve accuracy and latency. Leave blank to automatically detect the language.": "輸入音訊的語言。以 ISO-639-1 格式(例如:en)提供輸入語言將提高準確性和減少延遲。留空則自動偵測語言。",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "對映到使用者用於登入的使用者郵箱的 LDAP 屬性。",
 	"The LDAP attribute that maps to the username that users use to sign in.": "對映到使用者用於登入的使用者名稱的 LDAP 屬性。",
@@ -1212,7 +1210,9 @@
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "這確保您寶貴的對話會安全地儲存到您的後端資料庫。謝謝!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "這是一個實驗性功能,它可能無法如預期運作,並且可能會隨時變更。",
 	"This model is not publicly available. Please select another model.": "此模型未開放公眾使用,請選擇其他模型。",
+	"This option controls how long the model will stay loaded into memory following the request (default: 5m)": "",
 	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "此選項控制在重新整理上下文時保留多少 token。例如,如果設定為 2,則會保留對話上下文的最後 2 個 token。保留上下文有助於保持對話的連貫性,但也可能降低對新主題的回應能力。",
+	"This option enables or disables the use of the reasoning feature in Ollama, which allows the model to think before generating a response. When enabled, the model can take a moment to process the conversation context and generate a more thoughtful response.": "",
 	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "此選項設定模型在其回應中可以生成的最大 token 數量。增加此限制允許模型提供更長的答案,但也可能增加產生無用或不相關內容的可能性。",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "此選項將刪除集合中的所有現有檔案,並用新上傳的檔案取代它們。",
 	"This response was generated by \"{{model}}\"": "此回應由「{{model}}」產生",