|
@@ -18,11 +18,17 @@ from open_webui.models.files import Files
|
|
|
|
|
|
from open_webui.retrieval.vector.main import GetResult
|
|
|
|
|
|
+
|
|
|
from open_webui.env import (
|
|
|
SRC_LOG_LEVELS,
|
|
|
OFFLINE_MODE,
|
|
|
ENABLE_FORWARD_USER_INFO_HEADERS,
|
|
|
)
|
|
|
+from open_webui.config import (
|
|
|
+ RAG_EMBEDDING_QUERY_PREFIX,
|
|
|
+ RAG_EMBEDDING_PASSAGE_PREFIX,
|
|
|
+ RAG_EMBEDDING_PREFIX_FIELD_NAME
|
|
|
+)
|
|
|
|
|
|
log = logging.getLogger(__name__)
|
|
|
log.setLevel(SRC_LOG_LEVELS["RAG"])
|
|
@@ -47,7 +53,7 @@ class VectorSearchRetriever(BaseRetriever):
|
|
|
) -> list[Document]:
|
|
|
result = VECTOR_DB_CLIENT.search(
|
|
|
collection_name=self.collection_name,
|
|
|
- vectors=[self.embedding_function(query)],
|
|
|
+ vectors=[self.embedding_function(query,RAG_EMBEDDING_QUERY_PREFIX)],
|
|
|
limit=self.top_k,
|
|
|
)
|
|
|
|
|
@@ -250,7 +256,7 @@ def query_collection(
|
|
|
) -> dict:
|
|
|
results = []
|
|
|
for query in queries:
|
|
|
- query_embedding = embedding_function(query)
|
|
|
+ query_embedding = embedding_function(query, RAG_EMBEDDING_QUERY_PREFIX)
|
|
|
for collection_name in collection_names:
|
|
|
if collection_name:
|
|
|
try:
|
|
@@ -328,33 +334,33 @@ def get_embedding_function(
|
|
|
embedding_batch_size,
|
|
|
):
|
|
|
if embedding_engine == "":
|
|
|
- return lambda query, user=None: embedding_function.encode(query).tolist()
|
|
|
+ return lambda query, prefix, user=None: embedding_function.encode(query, prompt = prefix if prefix else None).tolist()
|
|
|
elif embedding_engine in ["ollama", "openai"]:
|
|
|
- func = lambda query, user=None: generate_embeddings(
|
|
|
+ func = lambda query, prefix, user=None: generate_embeddings(
|
|
|
engine=embedding_engine,
|
|
|
model=embedding_model,
|
|
|
text=query,
|
|
|
+ prefix=prefix,
|
|
|
url=url,
|
|
|
key=key,
|
|
|
user=user,
|
|
|
)
|
|
|
-
|
|
|
- def generate_multiple(query, user, func):
|
|
|
+ def generate_multiple(query, prefix, user, func):
|
|
|
if isinstance(query, list):
|
|
|
embeddings = []
|
|
|
for i in range(0, len(query), embedding_batch_size):
|
|
|
embeddings.extend(
|
|
|
- func(query[i : i + embedding_batch_size], user=user)
|
|
|
+ func(query[i : i + embedding_batch_size], prefix=prefix, user=user)
|
|
|
)
|
|
|
return embeddings
|
|
|
else:
|
|
|
- return func(query, user)
|
|
|
-
|
|
|
- return lambda query, user=None: generate_multiple(query, user, func)
|
|
|
+ return func(query, prefix, user)
|
|
|
+ return lambda query, prefix, user=None: generate_multiple(query, prefix, user, func)
|
|
|
else:
|
|
|
raise ValueError(f"Unknown embedding engine: {embedding_engine}")
|
|
|
|
|
|
|
|
|
+
|
|
|
def get_sources_from_files(
|
|
|
request,
|
|
|
files,
|
|
@@ -572,9 +578,17 @@ def generate_openai_batch_embeddings(
|
|
|
texts: list[str],
|
|
|
url: str = "https://api.openai.com/v1",
|
|
|
key: str = "",
|
|
|
- user: UserModel = None,
|
|
|
+ prefix: str = None,
|
|
|
+ user: UserModel = None
|
|
|
) -> Optional[list[list[float]]]:
|
|
|
try:
|
|
|
+ json_data = {
|
|
|
+ "input": texts,
|
|
|
+ "model": model
|
|
|
+ }
|
|
|
+ if isinstance(RAG_EMBEDDING_PREFIX_FIELD_NAME,str) and isinstance(prefix,str):
|
|
|
+ json_data[RAG_EMBEDDING_PREFIX_FIELD_NAME] = prefix
|
|
|
+
|
|
|
r = requests.post(
|
|
|
f"{url}/embeddings",
|
|
|
headers={
|
|
@@ -591,7 +605,7 @@ def generate_openai_batch_embeddings(
|
|
|
else {}
|
|
|
),
|
|
|
},
|
|
|
- json={"input": texts, "model": model},
|
|
|
+ json=json_data,
|
|
|
)
|
|
|
r.raise_for_status()
|
|
|
data = r.json()
|
|
@@ -605,9 +619,21 @@ def generate_openai_batch_embeddings(
|
|
|
|
|
|
|
|
|
def generate_ollama_batch_embeddings(
|
|
|
- model: str, texts: list[str], url: str, key: str = "", user: UserModel = None
|
|
|
+ model: str,
|
|
|
+ texts: list[str],
|
|
|
+ url: str,
|
|
|
+ key: str = "",
|
|
|
+ prefix: str = None,
|
|
|
+ user: UserModel = None
|
|
|
) -> Optional[list[list[float]]]:
|
|
|
try:
|
|
|
+ json_data = {
|
|
|
+ "input": texts,
|
|
|
+ "model": model
|
|
|
+ }
|
|
|
+ if isinstance(RAG_EMBEDDING_PREFIX_FIELD_NAME,str) and isinstance(prefix,str):
|
|
|
+ json_data[RAG_EMBEDDING_PREFIX_FIELD_NAME] = prefix
|
|
|
+
|
|
|
r = requests.post(
|
|
|
f"{url}/api/embed",
|
|
|
headers={
|
|
@@ -624,7 +650,7 @@ def generate_ollama_batch_embeddings(
|
|
|
else {}
|
|
|
),
|
|
|
},
|
|
|
- json={"input": texts, "model": model},
|
|
|
+ json=json_data,
|
|
|
)
|
|
|
r.raise_for_status()
|
|
|
data = r.json()
|
|
@@ -638,33 +664,32 @@ def generate_ollama_batch_embeddings(
|
|
|
return None
|
|
|
|
|
|
|
|
|
-def generate_embeddings(engine: str, model: str, text: Union[str, list[str]], **kwargs):
|
|
|
+def generate_embeddings(engine: str, model: str, text: Union[str, list[str]], prefix: Union[str , None] = None, **kwargs):
|
|
|
url = kwargs.get("url", "")
|
|
|
key = kwargs.get("key", "")
|
|
|
user = kwargs.get("user")
|
|
|
|
|
|
+ if prefix is not None and RAG_EMBEDDING_PREFIX_FIELD_NAME is None:
|
|
|
+ if isinstance(text, list):
|
|
|
+ text = [f'{prefix}{text_element}' for text_element in text]
|
|
|
+ else:
|
|
|
+ text = f'{prefix}{text}'
|
|
|
+
|
|
|
if engine == "ollama":
|
|
|
if isinstance(text, list):
|
|
|
embeddings = generate_ollama_batch_embeddings(
|
|
|
- **{"model": model, "texts": text, "url": url, "key": key, "user": user}
|
|
|
+ **{"model": model, "texts": text, "url": url, "key": key, "prefix": prefix, "user": user}
|
|
|
)
|
|
|
else:
|
|
|
embeddings = generate_ollama_batch_embeddings(
|
|
|
- **{
|
|
|
- "model": model,
|
|
|
- "texts": [text],
|
|
|
- "url": url,
|
|
|
- "key": key,
|
|
|
- "user": user,
|
|
|
- }
|
|
|
+ **{"model": model, "texts": [text], "url": url, "key": key, "prefix": prefix, "user": user}
|
|
|
)
|
|
|
return embeddings[0] if isinstance(text, str) else embeddings
|
|
|
elif engine == "openai":
|
|
|
if isinstance(text, list):
|
|
|
- embeddings = generate_openai_batch_embeddings(model, text, url, key, user)
|
|
|
+ embeddings = generate_openai_batch_embeddings(model, text, url, key, prefix, user)
|
|
|
else:
|
|
|
- embeddings = generate_openai_batch_embeddings(model, [text], url, key, user)
|
|
|
-
|
|
|
+ embeddings = generate_openai_batch_embeddings(model, [text], url, key, prefix, user)
|
|
|
return embeddings[0] if isinstance(text, str) else embeddings
|
|
|
|
|
|
|
|
@@ -700,9 +725,10 @@ class RerankCompressor(BaseDocumentCompressor):
|
|
|
else:
|
|
|
from sentence_transformers import util
|
|
|
|
|
|
- query_embedding = self.embedding_function(query)
|
|
|
+ query_embedding = self.embedding_function(query, RAG_EMBEDDING_QUERY_PREFIX)
|
|
|
document_embedding = self.embedding_function(
|
|
|
- [doc.page_content for doc in documents]
|
|
|
+ [doc.page_content for doc in documents],
|
|
|
+ RAG_EMBEDDING_PASSAGE_PREFIX
|
|
|
)
|
|
|
scores = util.cos_sim(query_embedding, document_embedding)[0]
|
|
|
|