elasticsearch_db.py 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138
  1. from typing import Any, Callable, Dict, List
  2. try:
  3. from elasticsearch import Elasticsearch
  4. from elasticsearch.helpers import bulk
  5. except ImportError:
  6. raise ImportError(
  7. "Elasticsearch requires extra dependencies. Install with `pip install embedchain[elasticsearch]`"
  8. ) from None
  9. from embedchain.config import ElasticsearchDBConfig
  10. from embedchain.helper_classes.json_serializable import register_deserializable
  11. from embedchain.models.VectorDimensions import VectorDimensions
  12. from embedchain.vectordb.base_vector_db import BaseVectorDB
  13. @register_deserializable
  14. class ElasticsearchDB(BaseVectorDB):
  15. def __init__(
  16. self,
  17. es_config: ElasticsearchDBConfig = None,
  18. embedding_fn: Callable[[list[str]], list[str]] = None,
  19. vector_dim: VectorDimensions = None,
  20. collection_name: str = None,
  21. ):
  22. """
  23. Elasticsearch as vector database
  24. :param es_config. elasticsearch database config to be used for connection
  25. :param embedding_fn: Function to generate embedding vectors.
  26. :param vector_dim: Vector dimension generated by embedding fn
  27. :param collection_name: Optional. Collection name for the database.
  28. """
  29. if not hasattr(embedding_fn, "__call__"):
  30. raise ValueError("Embedding function is not a function")
  31. if es_config is None:
  32. raise ValueError("ElasticsearchDBConfig is required")
  33. if vector_dim is None:
  34. raise ValueError("Vector Dimension is required to refer correct index and mapping")
  35. if collection_name is None:
  36. raise ValueError("collection name is required. It cannot be empty")
  37. self.embedding_fn = embedding_fn
  38. self.client = Elasticsearch(es_config.ES_URL, **es_config.ES_EXTRA_PARAMS)
  39. self.vector_dim = vector_dim
  40. self.es_index = f"{collection_name}_{self.vector_dim}"
  41. index_settings = {
  42. "mappings": {
  43. "properties": {
  44. "text": {"type": "text"},
  45. "embeddings": {"type": "dense_vector", "index": False, "dims": self.vector_dim},
  46. }
  47. }
  48. }
  49. if not self.client.indices.exists(index=self.es_index):
  50. # create index if not exist
  51. print("Creating index", self.es_index, index_settings)
  52. self.client.indices.create(index=self.es_index, body=index_settings)
  53. super().__init__()
  54. def _get_or_create_db(self):
  55. return self.client
  56. def _get_or_create_collection(self, name):
  57. """Note: nothing to return here. Discuss later"""
  58. def get(self, ids: List[str], where: Dict[str, any]) -> List[str]:
  59. """
  60. Get existing doc ids present in vector database
  61. :param ids: list of doc ids to check for existance
  62. :param where: Optional. to filter data
  63. """
  64. query = {"bool": {"must": [{"ids": {"values": ids}}]}}
  65. if "app_id" in where:
  66. app_id = where["app_id"]
  67. query["bool"]["must"].append({"term": {"metadata.app_id": app_id}})
  68. response = self.client.search(index=self.es_index, query=query, _source=False)
  69. docs = response["hits"]["hits"]
  70. ids = [doc["_id"] for doc in docs]
  71. return set(ids)
  72. def add(self, documents: List[str], metadatas: List[object], ids: List[str]) -> Any:
  73. """
  74. add data in vector database
  75. :param documents: list of texts to add
  76. :param metadatas: list of metadata associated with docs
  77. :param ids: ids of docs
  78. """
  79. docs = []
  80. embeddings = self.embedding_fn(documents)
  81. for id, text, metadata, embeddings in zip(ids, documents, metadatas, embeddings):
  82. docs.append(
  83. {
  84. "_index": self.es_index,
  85. "_id": id,
  86. "_source": {"text": text, "metadata": metadata, "embeddings": embeddings},
  87. }
  88. )
  89. bulk(self.client, docs)
  90. self.client.indices.refresh(index=self.es_index)
  91. return
  92. def query(self, input_query: List[str], n_results: int, where: Dict[str, any]) -> List[str]:
  93. """
  94. query contents from vector data base based on vector similarity
  95. :param input_query: list of query string
  96. :param n_results: no of similar documents to fetch from database
  97. :param where: Optional. to filter data
  98. """
  99. input_query_vector = self.embedding_fn(input_query)
  100. query_vector = input_query_vector[0]
  101. query = {
  102. "script_score": {
  103. "query": {"bool": {"must": [{"exists": {"field": "text"}}]}},
  104. "script": {
  105. "source": "cosineSimilarity(params.input_query_vector, 'embeddings') + 1.0",
  106. "params": {"input_query_vector": query_vector},
  107. },
  108. }
  109. }
  110. if "app_id" in where:
  111. app_id = where["app_id"]
  112. query["script_score"]["query"]["bool"]["must"] = [{"term": {"metadata.app_id": app_id}}]
  113. _source = ["text"]
  114. response = self.client.search(index=self.es_index, query=query, _source=_source, size=n_results)
  115. docs = response["hits"]["hits"]
  116. contents = [doc["_source"]["text"] for doc in docs]
  117. return contents
  118. def count(self) -> int:
  119. query = {"match_all": {}}
  120. response = self.client.count(index=self.es_index, query=query)
  121. doc_count = response["count"]
  122. return doc_count
  123. def reset(self):
  124. # Delete all data from the database
  125. if self.client.indices.exists(index=self.es_index):
  126. # delete index in Es
  127. self.client.indices.delete(index=self.es_index)