|
@@ -19,7 +19,9 @@ from open_webui.apps.audio.main import app as audio_app
|
|
from open_webui.apps.images.main import app as images_app
|
|
from open_webui.apps.images.main import app as images_app
|
|
from open_webui.apps.ollama.main import app as ollama_app
|
|
from open_webui.apps.ollama.main import app as ollama_app
|
|
from open_webui.apps.ollama.main import (
|
|
from open_webui.apps.ollama.main import (
|
|
- generate_openai_chat_completion as generate_ollama_chat_completion,
|
|
|
|
|
|
+ GenerateChatCompletionForm,
|
|
|
|
+ generate_chat_completion as generate_ollama_chat_completion,
|
|
|
|
+ generate_openai_chat_completion as generate_ollama_openai_chat_completion,
|
|
)
|
|
)
|
|
from open_webui.apps.ollama.main import get_all_models as get_ollama_models
|
|
from open_webui.apps.ollama.main import get_all_models as get_ollama_models
|
|
from open_webui.apps.openai.main import app as openai_app
|
|
from open_webui.apps.openai.main import app as openai_app
|
|
@@ -135,6 +137,12 @@ from open_webui.utils.utils import (
|
|
)
|
|
)
|
|
from open_webui.utils.webhook import post_webhook
|
|
from open_webui.utils.webhook import post_webhook
|
|
|
|
|
|
|
|
+from open_webui.utils.payload import convert_payload_openai_to_ollama
|
|
|
|
+from open_webui.utils.response import (
|
|
|
|
+ convert_response_ollama_to_openai,
|
|
|
|
+ convert_streaming_response_ollama_to_openai,
|
|
|
|
+)
|
|
|
|
+
|
|
if SAFE_MODE:
|
|
if SAFE_MODE:
|
|
print("SAFE MODE ENABLED")
|
|
print("SAFE MODE ENABLED")
|
|
Functions.deactivate_all_functions()
|
|
Functions.deactivate_all_functions()
|
|
@@ -1048,7 +1056,18 @@ async def generate_chat_completions(form_data: dict, user=Depends(get_verified_u
|
|
if model.get("pipe"):
|
|
if model.get("pipe"):
|
|
return await generate_function_chat_completion(form_data, user=user)
|
|
return await generate_function_chat_completion(form_data, user=user)
|
|
if model["owned_by"] == "ollama":
|
|
if model["owned_by"] == "ollama":
|
|
- return await generate_ollama_chat_completion(form_data, user=user)
|
|
|
|
|
|
+ # Using /ollama/api/chat endpoint
|
|
|
|
+ form_data = convert_payload_openai_to_ollama(form_data)
|
|
|
|
+ form_data = GenerateChatCompletionForm(**form_data)
|
|
|
|
+ response = await generate_ollama_chat_completion(form_data=form_data, user=user)
|
|
|
|
+ if form_data.stream:
|
|
|
|
+ response.headers["content-type"] = "text/event-stream"
|
|
|
|
+ return StreamingResponse(
|
|
|
|
+ convert_streaming_response_ollama_to_openai(response),
|
|
|
|
+ headers=dict(response.headers),
|
|
|
|
+ )
|
|
|
|
+ else:
|
|
|
|
+ return convert_response_ollama_to_openai(response)
|
|
else:
|
|
else:
|
|
return await generate_openai_chat_completion(form_data, user=user)
|
|
return await generate_openai_chat_completion(form_data, user=user)
|
|
|
|
|
|
@@ -1399,9 +1418,10 @@ async def generate_title(form_data: dict, user=Depends(get_verified_user)):
|
|
# Check if the user has a custom task model
|
|
# Check if the user has a custom task model
|
|
# If the user has a custom task model, use that model
|
|
# If the user has a custom task model, use that model
|
|
task_model_id = get_task_model_id(model_id)
|
|
task_model_id = get_task_model_id(model_id)
|
|
-
|
|
|
|
print(task_model_id)
|
|
print(task_model_id)
|
|
|
|
|
|
|
|
+ model = app.state.MODELS[task_model_id]
|
|
|
|
+
|
|
if app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE != "":
|
|
if app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE != "":
|
|
template = app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE
|
|
template = app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE
|
|
else:
|
|
else:
|
|
@@ -1440,9 +1460,9 @@ Prompt: {{prompt:middletruncate:8000}}"""
|
|
"chat_id": form_data.get("chat_id", None),
|
|
"chat_id": form_data.get("chat_id", None),
|
|
"metadata": {"task": str(TASKS.TITLE_GENERATION)},
|
|
"metadata": {"task": str(TASKS.TITLE_GENERATION)},
|
|
}
|
|
}
|
|
-
|
|
|
|
log.debug(payload)
|
|
log.debug(payload)
|
|
|
|
|
|
|
|
+ # Handle pipeline filters
|
|
try:
|
|
try:
|
|
payload = filter_pipeline(payload, user)
|
|
payload = filter_pipeline(payload, user)
|
|
except Exception as e:
|
|
except Exception as e:
|
|
@@ -1456,7 +1476,6 @@ Prompt: {{prompt:middletruncate:8000}}"""
|
|
status_code=status.HTTP_400_BAD_REQUEST,
|
|
status_code=status.HTTP_400_BAD_REQUEST,
|
|
content={"detail": str(e)},
|
|
content={"detail": str(e)},
|
|
)
|
|
)
|
|
-
|
|
|
|
if "chat_id" in payload:
|
|
if "chat_id" in payload:
|
|
del payload["chat_id"]
|
|
del payload["chat_id"]
|
|
|
|
|
|
@@ -1484,6 +1503,8 @@ async def generate_search_query(form_data: dict, user=Depends(get_verified_user)
|
|
task_model_id = get_task_model_id(model_id)
|
|
task_model_id = get_task_model_id(model_id)
|
|
print(task_model_id)
|
|
print(task_model_id)
|
|
|
|
|
|
|
|
+ model = app.state.MODELS[task_model_id]
|
|
|
|
+
|
|
if app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE != "":
|
|
if app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE != "":
|
|
template = app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE
|
|
template = app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE
|
|
else:
|
|
else:
|
|
@@ -1516,9 +1537,9 @@ Search Query:"""
|
|
),
|
|
),
|
|
"metadata": {"task": str(TASKS.QUERY_GENERATION)},
|
|
"metadata": {"task": str(TASKS.QUERY_GENERATION)},
|
|
}
|
|
}
|
|
|
|
+ log.debug(payload)
|
|
|
|
|
|
- print(payload)
|
|
|
|
-
|
|
|
|
|
|
+ # Handle pipeline filters
|
|
try:
|
|
try:
|
|
payload = filter_pipeline(payload, user)
|
|
payload = filter_pipeline(payload, user)
|
|
except Exception as e:
|
|
except Exception as e:
|
|
@@ -1532,7 +1553,6 @@ Search Query:"""
|
|
status_code=status.HTTP_400_BAD_REQUEST,
|
|
status_code=status.HTTP_400_BAD_REQUEST,
|
|
content={"detail": str(e)},
|
|
content={"detail": str(e)},
|
|
)
|
|
)
|
|
-
|
|
|
|
if "chat_id" in payload:
|
|
if "chat_id" in payload:
|
|
del payload["chat_id"]
|
|
del payload["chat_id"]
|
|
|
|
|
|
@@ -1555,12 +1575,13 @@ async def generate_emoji(form_data: dict, user=Depends(get_verified_user)):
|
|
task_model_id = get_task_model_id(model_id)
|
|
task_model_id = get_task_model_id(model_id)
|
|
print(task_model_id)
|
|
print(task_model_id)
|
|
|
|
|
|
|
|
+ model = app.state.MODELS[task_model_id]
|
|
|
|
+
|
|
template = '''
|
|
template = '''
|
|
Your task is to reflect the speaker's likely facial expression through a fitting emoji. Interpret emotions from the message and reflect their facial expression using fitting, diverse emojis (e.g., 😊, 😢, 😡, 😱).
|
|
Your task is to reflect the speaker's likely facial expression through a fitting emoji. Interpret emotions from the message and reflect their facial expression using fitting, diverse emojis (e.g., 😊, 😢, 😡, 😱).
|
|
|
|
|
|
Message: """{{prompt}}"""
|
|
Message: """{{prompt}}"""
|
|
'''
|
|
'''
|
|
-
|
|
|
|
content = title_generation_template(
|
|
content = title_generation_template(
|
|
template,
|
|
template,
|
|
form_data["prompt"],
|
|
form_data["prompt"],
|
|
@@ -1584,9 +1605,9 @@ Message: """{{prompt}}"""
|
|
"chat_id": form_data.get("chat_id", None),
|
|
"chat_id": form_data.get("chat_id", None),
|
|
"metadata": {"task": str(TASKS.EMOJI_GENERATION)},
|
|
"metadata": {"task": str(TASKS.EMOJI_GENERATION)},
|
|
}
|
|
}
|
|
-
|
|
|
|
log.debug(payload)
|
|
log.debug(payload)
|
|
|
|
|
|
|
|
+ # Handle pipeline filters
|
|
try:
|
|
try:
|
|
payload = filter_pipeline(payload, user)
|
|
payload = filter_pipeline(payload, user)
|
|
except Exception as e:
|
|
except Exception as e:
|
|
@@ -1600,7 +1621,6 @@ Message: """{{prompt}}"""
|
|
status_code=status.HTTP_400_BAD_REQUEST,
|
|
status_code=status.HTTP_400_BAD_REQUEST,
|
|
content={"detail": str(e)},
|
|
content={"detail": str(e)},
|
|
)
|
|
)
|
|
-
|
|
|
|
if "chat_id" in payload:
|
|
if "chat_id" in payload:
|
|
del payload["chat_id"]
|
|
del payload["chat_id"]
|
|
|
|
|
|
@@ -1620,8 +1640,10 @@ async def generate_moa_response(form_data: dict, user=Depends(get_verified_user)
|
|
|
|
|
|
# Check if the user has a custom task model
|
|
# Check if the user has a custom task model
|
|
# If the user has a custom task model, use that model
|
|
# If the user has a custom task model, use that model
|
|
- model_id = get_task_model_id(model_id)
|
|
|
|
- print(model_id)
|
|
|
|
|
|
+ task_model_id = get_task_model_id(model_id)
|
|
|
|
+ print(task_model_id)
|
|
|
|
+
|
|
|
|
+ model = app.state.MODELS[task_model_id]
|
|
|
|
|
|
template = """You have been provided with a set of responses from various models to the latest user query: "{{prompt}}"
|
|
template = """You have been provided with a set of responses from various models to the latest user query: "{{prompt}}"
|
|
|
|
|
|
@@ -1636,13 +1658,12 @@ Responses from models: {{responses}}"""
|
|
)
|
|
)
|
|
|
|
|
|
payload = {
|
|
payload = {
|
|
- "model": model_id,
|
|
|
|
|
|
+ "model": task_model_id,
|
|
"messages": [{"role": "user", "content": content}],
|
|
"messages": [{"role": "user", "content": content}],
|
|
"stream": form_data.get("stream", False),
|
|
"stream": form_data.get("stream", False),
|
|
"chat_id": form_data.get("chat_id", None),
|
|
"chat_id": form_data.get("chat_id", None),
|
|
"metadata": {"task": str(TASKS.MOA_RESPONSE_GENERATION)},
|
|
"metadata": {"task": str(TASKS.MOA_RESPONSE_GENERATION)},
|
|
}
|
|
}
|
|
-
|
|
|
|
log.debug(payload)
|
|
log.debug(payload)
|
|
|
|
|
|
try:
|
|
try:
|
|
@@ -1658,7 +1679,6 @@ Responses from models: {{responses}}"""
|
|
status_code=status.HTTP_400_BAD_REQUEST,
|
|
status_code=status.HTTP_400_BAD_REQUEST,
|
|
content={"detail": str(e)},
|
|
content={"detail": str(e)},
|
|
)
|
|
)
|
|
-
|
|
|
|
if "chat_id" in payload:
|
|
if "chat_id" in payload:
|
|
del payload["chat_id"]
|
|
del payload["chat_id"]
|
|
|
|
|