utils.py 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751
  1. import logging
  2. import os
  3. from typing import Optional, Union
  4. import requests
  5. import hashlib
  6. from huggingface_hub import snapshot_download
  7. from langchain.retrievers import ContextualCompressionRetriever, EnsembleRetriever
  8. from langchain_community.retrievers import BM25Retriever
  9. from langchain_core.documents import Document
  10. from open_webui.config import VECTOR_DB
  11. from open_webui.retrieval.vector.connector import VECTOR_DB_CLIENT
  12. from open_webui.models.users import UserModel
  13. from open_webui.models.files import Files
  14. from open_webui.retrieval.vector.main import GetResult
  15. from open_webui.env import (
  16. SRC_LOG_LEVELS,
  17. OFFLINE_MODE,
  18. ENABLE_FORWARD_USER_INFO_HEADERS,
  19. )
  20. from open_webui.config import (
  21. RAG_EMBEDDING_QUERY_PREFIX,
  22. RAG_EMBEDDING_PASSAGE_PREFIX,
  23. RAG_EMBEDDING_PREFIX_FIELD_NAME
  24. )
  25. log = logging.getLogger(__name__)
  26. log.setLevel(SRC_LOG_LEVELS["RAG"])
  27. from typing import Any
  28. from langchain_core.callbacks import CallbackManagerForRetrieverRun
  29. from langchain_core.retrievers import BaseRetriever
  30. class VectorSearchRetriever(BaseRetriever):
  31. collection_name: Any
  32. embedding_function: Any
  33. top_k: int
  34. def _get_relevant_documents(
  35. self,
  36. query: str,
  37. *,
  38. run_manager: CallbackManagerForRetrieverRun,
  39. ) -> list[Document]:
  40. result = VECTOR_DB_CLIENT.search(
  41. collection_name=self.collection_name,
  42. vectors=[self.embedding_function(query,RAG_EMBEDDING_QUERY_PREFIX)],
  43. limit=self.top_k,
  44. )
  45. ids = result.ids[0]
  46. metadatas = result.metadatas[0]
  47. documents = result.documents[0]
  48. results = []
  49. for idx in range(len(ids)):
  50. results.append(
  51. Document(
  52. metadata=metadatas[idx],
  53. page_content=documents[idx],
  54. )
  55. )
  56. return results
  57. def query_doc(
  58. collection_name: str, query_embedding: list[float], k: int, user: UserModel = None
  59. ):
  60. try:
  61. result = VECTOR_DB_CLIENT.search(
  62. collection_name=collection_name,
  63. vectors=[query_embedding],
  64. limit=k,
  65. )
  66. if result:
  67. log.info(f"query_doc:result {result.ids} {result.metadatas}")
  68. return result
  69. except Exception as e:
  70. log.exception(f"Error querying doc {collection_name} with limit {k}: {e}")
  71. raise e
  72. def get_doc(collection_name: str, user: UserModel = None):
  73. try:
  74. result = VECTOR_DB_CLIENT.get(collection_name=collection_name)
  75. if result:
  76. log.info(f"query_doc:result {result.ids} {result.metadatas}")
  77. return result
  78. except Exception as e:
  79. log.exception(f"Error getting doc {collection_name}: {e}")
  80. raise e
  81. def query_doc_with_hybrid_search(
  82. collection_name: str,
  83. collection_result: GetResult,
  84. query: str,
  85. embedding_function,
  86. k: int,
  87. reranking_function,
  88. k_reranker: int,
  89. r: float,
  90. ) -> dict:
  91. try:
  92. bm25_retriever = BM25Retriever.from_texts(
  93. texts=collection_result.documents[0],
  94. metadatas=collection_result.metadatas[0],
  95. )
  96. bm25_retriever.k = k
  97. vector_search_retriever = VectorSearchRetriever(
  98. collection_name=collection_name,
  99. embedding_function=embedding_function,
  100. top_k=k,
  101. )
  102. ensemble_retriever = EnsembleRetriever(
  103. retrievers=[bm25_retriever, vector_search_retriever], weights=[0.5, 0.5]
  104. )
  105. compressor = RerankCompressor(
  106. embedding_function=embedding_function,
  107. top_n=k_reranker,
  108. reranking_function=reranking_function,
  109. r_score=r,
  110. )
  111. compression_retriever = ContextualCompressionRetriever(
  112. base_compressor=compressor, base_retriever=ensemble_retriever
  113. )
  114. result = compression_retriever.invoke(query)
  115. distances = [d.metadata.get("score") for d in result]
  116. documents = [d.page_content for d in result]
  117. metadatas = [d.metadata for d in result]
  118. # retrieve only min(k, k_reranker) items, sort and cut by distance if k < k_reranker
  119. if k < k_reranker:
  120. sorted_items = sorted(
  121. zip(distances, metadatas, documents), key=lambda x: x[0], reverse=True
  122. )
  123. sorted_items = sorted_items[:k]
  124. distances, documents, metadatas = map(list, zip(*sorted_items))
  125. result = {
  126. "distances": [distances],
  127. "documents": [documents],
  128. "metadatas": [metadatas],
  129. }
  130. log.info(
  131. "query_doc_with_hybrid_search:result "
  132. + f'{result["metadatas"]} {result["distances"]}'
  133. )
  134. return result
  135. except Exception as e:
  136. raise e
  137. def merge_get_results(get_results: list[dict]) -> dict:
  138. # Initialize lists to store combined data
  139. combined_documents = []
  140. combined_metadatas = []
  141. combined_ids = []
  142. for data in get_results:
  143. combined_documents.extend(data["documents"][0])
  144. combined_metadatas.extend(data["metadatas"][0])
  145. combined_ids.extend(data["ids"][0])
  146. # Create the output dictionary
  147. result = {
  148. "documents": [combined_documents],
  149. "metadatas": [combined_metadatas],
  150. "ids": [combined_ids],
  151. }
  152. return result
  153. def merge_and_sort_query_results(query_results: list[dict], k: int) -> dict:
  154. # Initialize lists to store combined data
  155. combined = dict() # To store documents with unique document hashes
  156. for data in query_results:
  157. distances = data["distances"][0]
  158. documents = data["documents"][0]
  159. metadatas = data["metadatas"][0]
  160. for distance, document, metadata in zip(distances, documents, metadatas):
  161. if isinstance(document, str):
  162. doc_hash = hashlib.md5(
  163. document.encode()
  164. ).hexdigest() # Compute a hash for uniqueness
  165. if doc_hash not in combined.keys():
  166. combined[doc_hash] = (distance, document, metadata)
  167. continue # if doc is new, no further comparison is needed
  168. # if doc is alredy in, but new distance is better, update
  169. if distance > combined[doc_hash][0]:
  170. combined[doc_hash] = (distance, document, metadata)
  171. combined = list(combined.values())
  172. # Sort the list based on distances
  173. combined.sort(key=lambda x: x[0], reverse=True)
  174. # Slice to keep only the top k elements
  175. sorted_distances, sorted_documents, sorted_metadatas = (
  176. zip(*combined[:k]) if combined else ([], [], [])
  177. )
  178. # Create and return the output dictionary
  179. return {
  180. "distances": [list(sorted_distances)],
  181. "documents": [list(sorted_documents)],
  182. "metadatas": [list(sorted_metadatas)],
  183. }
  184. def get_all_items_from_collections(collection_names: list[str]) -> dict:
  185. results = []
  186. for collection_name in collection_names:
  187. if collection_name:
  188. try:
  189. result = get_doc(collection_name=collection_name)
  190. if result is not None:
  191. results.append(result.model_dump())
  192. except Exception as e:
  193. log.exception(f"Error when querying the collection: {e}")
  194. else:
  195. pass
  196. return merge_get_results(results)
  197. def query_collection(
  198. collection_names: list[str],
  199. queries: list[str],
  200. embedding_function,
  201. k: int,
  202. ) -> dict:
  203. results = []
  204. for query in queries:
  205. query_embedding = embedding_function(query, RAG_EMBEDDING_QUERY_PREFIX)
  206. for collection_name in collection_names:
  207. if collection_name:
  208. try:
  209. result = query_doc(
  210. collection_name=collection_name,
  211. k=k,
  212. query_embedding=query_embedding,
  213. )
  214. if result is not None:
  215. results.append(result.model_dump())
  216. except Exception as e:
  217. log.exception(f"Error when querying the collection: {e}")
  218. else:
  219. pass
  220. return merge_and_sort_query_results(results, k=k)
  221. def query_collection_with_hybrid_search(
  222. collection_names: list[str],
  223. queries: list[str],
  224. embedding_function,
  225. k: int,
  226. reranking_function,
  227. k_reranker: int,
  228. r: float,
  229. ) -> dict:
  230. results = []
  231. error = False
  232. # Fetch collection data once per collection sequentially
  233. # Avoid fetching the same data multiple times later
  234. collection_results = {}
  235. for collection_name in collection_names:
  236. try:
  237. collection_results[collection_name] = VECTOR_DB_CLIENT.get(
  238. collection_name=collection_name
  239. )
  240. except Exception as e:
  241. log.exception(f"Failed to fetch collection {collection_name}: {e}")
  242. collection_results[collection_name] = None
  243. for collection_name in collection_names:
  244. try:
  245. for query in queries:
  246. result = query_doc_with_hybrid_search(
  247. collection_name=collection_name,
  248. collection_result=collection_results[collection_name],
  249. query=query,
  250. embedding_function=embedding_function,
  251. k=k,
  252. reranking_function=reranking_function,
  253. k_reranker=k_reranker,
  254. r=r,
  255. )
  256. results.append(result)
  257. except Exception as e:
  258. log.exception(
  259. "Error when querying the collection with " f"hybrid_search: {e}"
  260. )
  261. error = True
  262. if error:
  263. raise Exception(
  264. "Hybrid search failed for all collections. Using Non hybrid search as fallback."
  265. )
  266. return merge_and_sort_query_results(results, k=k)
  267. def get_embedding_function(
  268. embedding_engine,
  269. embedding_model,
  270. embedding_function,
  271. url,
  272. key,
  273. embedding_batch_size,
  274. ):
  275. if embedding_engine == "":
  276. return lambda query, prefix, user=None: embedding_function.encode(query, prompt = prefix if prefix else None).tolist()
  277. elif embedding_engine in ["ollama", "openai"]:
  278. func = lambda query, prefix, user=None: generate_embeddings(
  279. engine=embedding_engine,
  280. model=embedding_model,
  281. text=query,
  282. prefix=prefix,
  283. url=url,
  284. key=key,
  285. user=user,
  286. )
  287. def generate_multiple(query, prefix, user, func):
  288. if isinstance(query, list):
  289. embeddings = []
  290. for i in range(0, len(query), embedding_batch_size):
  291. embeddings.extend(
  292. func(query[i : i + embedding_batch_size], prefix=prefix, user=user)
  293. )
  294. return embeddings
  295. else:
  296. return func(query, prefix, user)
  297. return lambda query, prefix, user=None: generate_multiple(query, prefix, user, func)
  298. else:
  299. raise ValueError(f"Unknown embedding engine: {embedding_engine}")
  300. def get_sources_from_files(
  301. request,
  302. files,
  303. queries,
  304. embedding_function,
  305. k,
  306. reranking_function,
  307. k_reranker,
  308. r,
  309. hybrid_search,
  310. full_context=False,
  311. ):
  312. log.debug(
  313. f"files: {files} {queries} {embedding_function} {reranking_function} {full_context}"
  314. )
  315. extracted_collections = []
  316. relevant_contexts = []
  317. for file in files:
  318. context = None
  319. if file.get("docs"):
  320. # BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL
  321. context = {
  322. "documents": [[doc.get("content") for doc in file.get("docs")]],
  323. "metadatas": [[doc.get("metadata") for doc in file.get("docs")]],
  324. }
  325. elif file.get("context") == "full":
  326. # Manual Full Mode Toggle
  327. context = {
  328. "documents": [[file.get("file").get("data", {}).get("content")]],
  329. "metadatas": [[{"file_id": file.get("id"), "name": file.get("name")}]],
  330. }
  331. elif (
  332. file.get("type") != "web_search"
  333. and request.app.state.config.BYPASS_EMBEDDING_AND_RETRIEVAL
  334. ):
  335. # BYPASS_EMBEDDING_AND_RETRIEVAL
  336. if file.get("type") == "collection":
  337. file_ids = file.get("data", {}).get("file_ids", [])
  338. documents = []
  339. metadatas = []
  340. for file_id in file_ids:
  341. file_object = Files.get_file_by_id(file_id)
  342. if file_object:
  343. documents.append(file_object.data.get("content", ""))
  344. metadatas.append(
  345. {
  346. "file_id": file_id,
  347. "name": file_object.filename,
  348. "source": file_object.filename,
  349. }
  350. )
  351. context = {
  352. "documents": [documents],
  353. "metadatas": [metadatas],
  354. }
  355. elif file.get("id"):
  356. file_object = Files.get_file_by_id(file.get("id"))
  357. if file_object:
  358. context = {
  359. "documents": [[file_object.data.get("content", "")]],
  360. "metadatas": [
  361. [
  362. {
  363. "file_id": file.get("id"),
  364. "name": file_object.filename,
  365. "source": file_object.filename,
  366. }
  367. ]
  368. ],
  369. }
  370. elif file.get("file").get("data"):
  371. context = {
  372. "documents": [[file.get("file").get("data", {}).get("content")]],
  373. "metadatas": [
  374. [file.get("file").get("data", {}).get("metadata", {})]
  375. ],
  376. }
  377. else:
  378. collection_names = []
  379. if file.get("type") == "collection":
  380. if file.get("legacy"):
  381. collection_names = file.get("collection_names", [])
  382. else:
  383. collection_names.append(file["id"])
  384. elif file.get("collection_name"):
  385. collection_names.append(file["collection_name"])
  386. elif file.get("id"):
  387. if file.get("legacy"):
  388. collection_names.append(f"{file['id']}")
  389. else:
  390. collection_names.append(f"file-{file['id']}")
  391. collection_names = set(collection_names).difference(extracted_collections)
  392. if not collection_names:
  393. log.debug(f"skipping {file} as it has already been extracted")
  394. continue
  395. if full_context:
  396. try:
  397. context = get_all_items_from_collections(collection_names)
  398. except Exception as e:
  399. log.exception(e)
  400. else:
  401. try:
  402. context = None
  403. if file.get("type") == "text":
  404. context = file["content"]
  405. else:
  406. if hybrid_search:
  407. try:
  408. context = query_collection_with_hybrid_search(
  409. collection_names=collection_names,
  410. queries=queries,
  411. embedding_function=embedding_function,
  412. k=k,
  413. reranking_function=reranking_function,
  414. k_reranker=k_reranker,
  415. r=r,
  416. )
  417. except Exception as e:
  418. log.debug(
  419. "Error when using hybrid search, using"
  420. " non hybrid search as fallback."
  421. )
  422. if (not hybrid_search) or (context is None):
  423. context = query_collection(
  424. collection_names=collection_names,
  425. queries=queries,
  426. embedding_function=embedding_function,
  427. k=k,
  428. )
  429. except Exception as e:
  430. log.exception(e)
  431. extracted_collections.extend(collection_names)
  432. if context:
  433. if "data" in file:
  434. del file["data"]
  435. relevant_contexts.append({**context, "file": file})
  436. sources = []
  437. for context in relevant_contexts:
  438. try:
  439. if "documents" in context:
  440. if "metadatas" in context:
  441. source = {
  442. "source": context["file"],
  443. "document": context["documents"][0],
  444. "metadata": context["metadatas"][0],
  445. }
  446. if "distances" in context and context["distances"]:
  447. source["distances"] = context["distances"][0]
  448. sources.append(source)
  449. except Exception as e:
  450. log.exception(e)
  451. return sources
  452. def get_model_path(model: str, update_model: bool = False):
  453. # Construct huggingface_hub kwargs with local_files_only to return the snapshot path
  454. cache_dir = os.getenv("SENTENCE_TRANSFORMERS_HOME")
  455. local_files_only = not update_model
  456. if OFFLINE_MODE:
  457. local_files_only = True
  458. snapshot_kwargs = {
  459. "cache_dir": cache_dir,
  460. "local_files_only": local_files_only,
  461. }
  462. log.debug(f"model: {model}")
  463. log.debug(f"snapshot_kwargs: {snapshot_kwargs}")
  464. # Inspiration from upstream sentence_transformers
  465. if (
  466. os.path.exists(model)
  467. or ("\\" in model or model.count("/") > 1)
  468. and local_files_only
  469. ):
  470. # If fully qualified path exists, return input, else set repo_id
  471. return model
  472. elif "/" not in model:
  473. # Set valid repo_id for model short-name
  474. model = "sentence-transformers" + "/" + model
  475. snapshot_kwargs["repo_id"] = model
  476. # Attempt to query the huggingface_hub library to determine the local path and/or to update
  477. try:
  478. model_repo_path = snapshot_download(**snapshot_kwargs)
  479. log.debug(f"model_repo_path: {model_repo_path}")
  480. return model_repo_path
  481. except Exception as e:
  482. log.exception(f"Cannot determine model snapshot path: {e}")
  483. return model
  484. def generate_openai_batch_embeddings(
  485. model: str,
  486. texts: list[str],
  487. url: str = "https://api.openai.com/v1",
  488. key: str = "",
  489. prefix: str = None,
  490. user: UserModel = None
  491. ) -> Optional[list[list[float]]]:
  492. try:
  493. json_data = {
  494. "input": texts,
  495. "model": model
  496. }
  497. if isinstance(RAG_EMBEDDING_PREFIX_FIELD_NAME,str) and isinstance(prefix,str):
  498. json_data[RAG_EMBEDDING_PREFIX_FIELD_NAME] = prefix
  499. r = requests.post(
  500. f"{url}/embeddings",
  501. headers={
  502. "Content-Type": "application/json",
  503. "Authorization": f"Bearer {key}",
  504. **(
  505. {
  506. "X-OpenWebUI-User-Name": user.name,
  507. "X-OpenWebUI-User-Id": user.id,
  508. "X-OpenWebUI-User-Email": user.email,
  509. "X-OpenWebUI-User-Role": user.role,
  510. }
  511. if ENABLE_FORWARD_USER_INFO_HEADERS and user
  512. else {}
  513. ),
  514. },
  515. json=json_data,
  516. )
  517. r.raise_for_status()
  518. data = r.json()
  519. if "data" in data:
  520. return [elem["embedding"] for elem in data["data"]]
  521. else:
  522. raise "Something went wrong :/"
  523. except Exception as e:
  524. log.exception(f"Error generating openai batch embeddings: {e}")
  525. return None
  526. def generate_ollama_batch_embeddings(
  527. model: str,
  528. texts: list[str],
  529. url: str,
  530. key: str = "",
  531. prefix: str = None,
  532. user: UserModel = None
  533. ) -> Optional[list[list[float]]]:
  534. try:
  535. json_data = {
  536. "input": texts,
  537. "model": model
  538. }
  539. if isinstance(RAG_EMBEDDING_PREFIX_FIELD_NAME,str) and isinstance(prefix,str):
  540. json_data[RAG_EMBEDDING_PREFIX_FIELD_NAME] = prefix
  541. r = requests.post(
  542. f"{url}/api/embed",
  543. headers={
  544. "Content-Type": "application/json",
  545. "Authorization": f"Bearer {key}",
  546. **(
  547. {
  548. "X-OpenWebUI-User-Name": user.name,
  549. "X-OpenWebUI-User-Id": user.id,
  550. "X-OpenWebUI-User-Email": user.email,
  551. "X-OpenWebUI-User-Role": user.role,
  552. }
  553. if ENABLE_FORWARD_USER_INFO_HEADERS
  554. else {}
  555. ),
  556. },
  557. json=json_data,
  558. )
  559. r.raise_for_status()
  560. data = r.json()
  561. if "embeddings" in data:
  562. return data["embeddings"]
  563. else:
  564. raise "Something went wrong :/"
  565. except Exception as e:
  566. log.exception(f"Error generating ollama batch embeddings: {e}")
  567. return None
  568. def generate_embeddings(engine: str, model: str, text: Union[str, list[str]], prefix: Union[str , None] = None, **kwargs):
  569. url = kwargs.get("url", "")
  570. key = kwargs.get("key", "")
  571. user = kwargs.get("user")
  572. if prefix is not None and RAG_EMBEDDING_PREFIX_FIELD_NAME is None:
  573. if isinstance(text, list):
  574. text = [f'{prefix}{text_element}' for text_element in text]
  575. else:
  576. text = f'{prefix}{text}'
  577. if engine == "ollama":
  578. if isinstance(text, list):
  579. embeddings = generate_ollama_batch_embeddings(
  580. **{"model": model, "texts": text, "url": url, "key": key, "prefix": prefix, "user": user}
  581. )
  582. else:
  583. embeddings = generate_ollama_batch_embeddings(
  584. **{"model": model, "texts": [text], "url": url, "key": key, "prefix": prefix, "user": user}
  585. )
  586. return embeddings[0] if isinstance(text, str) else embeddings
  587. elif engine == "openai":
  588. if isinstance(text, list):
  589. embeddings = generate_openai_batch_embeddings(model, text, url, key, prefix, user)
  590. else:
  591. embeddings = generate_openai_batch_embeddings(model, [text], url, key, prefix, user)
  592. return embeddings[0] if isinstance(text, str) else embeddings
  593. import operator
  594. from typing import Optional, Sequence
  595. from langchain_core.callbacks import Callbacks
  596. from langchain_core.documents import BaseDocumentCompressor, Document
  597. class RerankCompressor(BaseDocumentCompressor):
  598. embedding_function: Any
  599. top_n: int
  600. reranking_function: Any
  601. r_score: float
  602. class Config:
  603. extra = "forbid"
  604. arbitrary_types_allowed = True
  605. def compress_documents(
  606. self,
  607. documents: Sequence[Document],
  608. query: str,
  609. callbacks: Optional[Callbacks] = None,
  610. ) -> Sequence[Document]:
  611. reranking = self.reranking_function is not None
  612. if reranking:
  613. scores = self.reranking_function.predict(
  614. [(query, doc.page_content) for doc in documents]
  615. )
  616. else:
  617. from sentence_transformers import util
  618. query_embedding = self.embedding_function(query, RAG_EMBEDDING_QUERY_PREFIX)
  619. document_embedding = self.embedding_function(
  620. [doc.page_content for doc in documents],
  621. RAG_EMBEDDING_PASSAGE_PREFIX
  622. )
  623. scores = util.cos_sim(query_embedding, document_embedding)[0]
  624. docs_with_scores = list(zip(documents, scores.tolist()))
  625. if self.r_score:
  626. docs_with_scores = [
  627. (d, s) for d, s in docs_with_scores if s >= self.r_score
  628. ]
  629. result = sorted(docs_with_scores, key=operator.itemgetter(1), reverse=True)
  630. final_results = []
  631. for doc, doc_score in result[: self.top_n]:
  632. metadata = doc.metadata
  633. metadata["score"] = doc_score
  634. doc = Document(
  635. page_content=doc.page_content,
  636. metadata=metadata,
  637. )
  638. final_results.append(doc)
  639. return final_results