opensearch.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274
  1. import logging
  2. import time
  3. from typing import Any, Dict, List, Optional, Set, Tuple, Union
  4. from tqdm import tqdm
  5. try:
  6. from opensearchpy import OpenSearch
  7. from opensearchpy.helpers import bulk
  8. except ImportError:
  9. raise ImportError(
  10. "OpenSearch requires extra dependencies. Install with `pip install --upgrade embedchain[opensearch]`"
  11. ) from None
  12. from langchain.embeddings.openai import OpenAIEmbeddings
  13. from langchain.vectorstores import OpenSearchVectorSearch
  14. from embedchain.config import OpenSearchDBConfig
  15. from embedchain.helpers.json_serializable import register_deserializable
  16. from embedchain.vectordb.base import BaseVectorDB
  17. @register_deserializable
  18. class OpenSearchDB(BaseVectorDB):
  19. """
  20. OpenSearch as vector database
  21. """
  22. BATCH_SIZE = 100
  23. def __init__(self, config: OpenSearchDBConfig):
  24. """OpenSearch as vector database.
  25. :param config: OpenSearch domain config
  26. :type config: OpenSearchDBConfig
  27. """
  28. if config is None:
  29. raise ValueError("OpenSearchDBConfig is required")
  30. self.config = config
  31. self.client = OpenSearch(
  32. hosts=[self.config.opensearch_url],
  33. http_auth=self.config.http_auth,
  34. **self.config.extra_params,
  35. )
  36. info = self.client.info()
  37. logging.info(f"Connected to {info['version']['distribution']}. Version: {info['version']['number']}")
  38. # Remove auth credentials from config after successful connection
  39. super().__init__(config=self.config)
  40. def _initialize(self):
  41. logging.info(self.client.info())
  42. index_name = self._get_index()
  43. if self.client.indices.exists(index=index_name):
  44. print(f"Index '{index_name}' already exists.")
  45. return
  46. index_body = {
  47. "settings": {"knn": True},
  48. "mappings": {
  49. "properties": {
  50. "text": {"type": "text"},
  51. "embeddings": {
  52. "type": "knn_vector",
  53. "index": False,
  54. "dimension": self.config.vector_dimension,
  55. },
  56. }
  57. },
  58. }
  59. self.client.indices.create(index_name, body=index_body)
  60. print(self.client.indices.get(index_name))
  61. def _get_or_create_db(self):
  62. """Called during initialization"""
  63. return self.client
  64. def _get_or_create_collection(self, name):
  65. """Note: nothing to return here. Discuss later"""
  66. def get(
  67. self, ids: Optional[List[str]] = None, where: Optional[Dict[str, any]] = None, limit: Optional[int] = None
  68. ) -> Set[str]:
  69. """
  70. Get existing doc ids present in vector database
  71. :param ids: _list of doc ids to check for existence
  72. :type ids: List[str]
  73. :param where: to filter data
  74. :type where: Dict[str, any]
  75. :return: ids
  76. :type: Set[str]
  77. """
  78. query = {}
  79. if ids:
  80. query["query"] = {"bool": {"must": [{"ids": {"values": ids}}]}}
  81. else:
  82. query["query"] = {"bool": {"must": []}}
  83. if "app_id" in where:
  84. app_id = where["app_id"]
  85. query["query"]["bool"]["must"].append({"term": {"metadata.app_id.keyword": app_id}})
  86. # OpenSearch syntax is different from Elasticsearch
  87. response = self.client.search(index=self._get_index(), body=query, _source=True, size=limit)
  88. docs = response["hits"]["hits"]
  89. ids = [doc["_id"] for doc in docs]
  90. doc_ids = [doc["_source"]["metadata"]["doc_id"] for doc in docs]
  91. # Result is modified for compatibility with other vector databases
  92. # TODO: Add method in vector database to return result in a standard format
  93. result = {"ids": ids, "metadatas": []}
  94. for doc_id in doc_ids:
  95. result["metadatas"].append({"doc_id": doc_id})
  96. return result
  97. def add(
  98. self,
  99. embeddings: List[List[str]],
  100. documents: List[str],
  101. metadatas: List[object],
  102. ids: List[str],
  103. skip_embedding: bool,
  104. **kwargs: Optional[Dict[str, any]],
  105. ):
  106. """Add data in vector database.
  107. Args:
  108. embeddings (List[List[str]]): List of embeddings to add.
  109. documents (List[str]): List of texts to add.
  110. metadatas (List[object]): List of metadata associated with docs.
  111. ids (List[str]): IDs of docs.
  112. skip_embedding (bool): If True, then embeddings are assumed to be already generated.
  113. """
  114. for batch_start in tqdm(range(0, len(documents), self.BATCH_SIZE), desc="Inserting batches in opensearch"):
  115. batch_end = batch_start + self.BATCH_SIZE
  116. batch_documents = documents[batch_start:batch_end]
  117. # Generate embeddings for the batch if not skipping embedding
  118. if not skip_embedding:
  119. batch_embeddings = self.embedder.embedding_fn(batch_documents)
  120. else:
  121. batch_embeddings = embeddings[batch_start:batch_end]
  122. # Create document entries for bulk upload
  123. batch_entries = [
  124. {
  125. "_index": self._get_index(),
  126. "_id": doc_id,
  127. "_source": {"text": text, "metadata": metadata, "embeddings": embedding},
  128. }
  129. for doc_id, text, metadata, embedding in zip(
  130. ids[batch_start:batch_end], batch_documents, metadatas[batch_start:batch_end], batch_embeddings
  131. )
  132. ]
  133. # Perform bulk operation
  134. bulk(self.client, batch_entries, **kwargs)
  135. self.client.indices.refresh(index=self._get_index())
  136. # Sleep to avoid rate limiting
  137. time.sleep(0.1)
  138. def query(
  139. self,
  140. input_query: List[str],
  141. n_results: int,
  142. where: Dict[str, any],
  143. skip_embedding: bool,
  144. citations: bool = False,
  145. **kwargs: Optional[Dict[str, Any]],
  146. ) -> Union[List[Tuple[str, Dict]], List[str]]:
  147. """
  148. query contents from vector data base based on vector similarity
  149. :param input_query: list of query string
  150. :type input_query: List[str]
  151. :param n_results: no of similar documents to fetch from database
  152. :type n_results: int
  153. :param where: Optional. to filter data
  154. :type where: Dict[str, any]
  155. :param skip_embedding: Optional. If True, then the input_query is assumed to be already embedded.
  156. :type skip_embedding: bool
  157. :param citations: we use citations boolean param to return context along with the answer.
  158. :type citations: bool, default is False.
  159. :return: The content of the document that matched your query,
  160. along with url of the source and doc_id (if citations flag is true)
  161. :rtype: List[str], if citations=False, otherwise List[Tuple[str, str, str]]
  162. """
  163. # TODO(rupeshbansal, deshraj): Add support for skip embeddings here if already exists
  164. embeddings = OpenAIEmbeddings()
  165. docsearch = OpenSearchVectorSearch(
  166. index_name=self._get_index(),
  167. embedding_function=embeddings,
  168. opensearch_url=f"{self.config.opensearch_url}",
  169. http_auth=self.config.http_auth,
  170. use_ssl=hasattr(self.config, "use_ssl") and self.config.use_ssl,
  171. verify_certs=hasattr(self.config, "verify_certs") and self.config.verify_certs,
  172. )
  173. pre_filter = {"match_all": {}} # default
  174. if "app_id" in where:
  175. app_id = where["app_id"]
  176. pre_filter = {"bool": {"must": [{"term": {"metadata.app_id.keyword": app_id}}]}}
  177. docs = docsearch.similarity_search_with_score(
  178. input_query,
  179. search_type="script_scoring",
  180. space_type="cosinesimil",
  181. vector_field="embeddings",
  182. text_field="text",
  183. metadata_field="metadata",
  184. pre_filter=pre_filter,
  185. k=n_results,
  186. **kwargs,
  187. )
  188. contexts = []
  189. for doc, score in docs:
  190. context = doc.page_content
  191. if citations:
  192. metadata = doc.metadata
  193. metadata["score"] = score
  194. contexts.append(tuple((context, metadata)))
  195. else:
  196. contexts.append(context)
  197. return contexts
  198. def set_collection_name(self, name: str):
  199. """
  200. Set the name of the collection. A collection is an isolated space for vectors.
  201. :param name: Name of the collection.
  202. :type name: str
  203. """
  204. if not isinstance(name, str):
  205. raise TypeError("Collection name must be a string")
  206. self.config.collection_name = name
  207. def count(self) -> int:
  208. """
  209. Count number of documents/chunks embedded in the database.
  210. :return: number of documents
  211. :rtype: int
  212. """
  213. query = {"query": {"match_all": {}}}
  214. response = self.client.count(index=self._get_index(), body=query)
  215. doc_count = response["count"]
  216. return doc_count
  217. def reset(self):
  218. """
  219. Resets the database. Deletes all embeddings irreversibly.
  220. """
  221. # Delete all data from the database
  222. if self.client.indices.exists(index=self._get_index()):
  223. # delete index in ES
  224. self.client.indices.delete(index=self._get_index())
  225. def delete(self, where):
  226. """Deletes a document from the OpenSearch index"""
  227. if "doc_id" not in where:
  228. raise ValueError("doc_id is required to delete a document")
  229. query = {"query": {"bool": {"must": [{"term": {"metadata.doc_id": where["doc_id"]}}]}}}
  230. self.client.delete_by_query(index=self._get_index(), body=query)
  231. def _get_index(self) -> str:
  232. """Get the OpenSearch index for a collection
  233. :return: OpenSearch index
  234. :rtype: str
  235. """
  236. return self.config.collection_name