base.py 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220
  1. import logging
  2. import re
  3. from string import Template
  4. from typing import Any, Mapping, Optional
  5. from embedchain.config.base_config import BaseConfig
  6. from embedchain.helpers.json_serializable import register_deserializable
  7. logger = logging.getLogger(__name__)
  8. DEFAULT_PROMPT = """
  9. You are a Q&A expert system. Your responses must always be rooted in the context provided for each query. Here are some guidelines to follow:
  10. 1. Refrain from explicitly mentioning the context provided in your response.
  11. 2. The context should silently guide your answers without being directly acknowledged.
  12. 3. Do not use phrases such as 'According to the context provided', 'Based on the context, ...' etc.
  13. Context information:
  14. ----------------------
  15. $context
  16. ----------------------
  17. Query: $query
  18. Answer:
  19. """ # noqa:E501
  20. DEFAULT_PROMPT_WITH_HISTORY = """
  21. You are a Q&A expert system. Your responses must always be rooted in the context provided for each query. You are also provided with the conversation history with the user. Make sure to use relevant context from conversation history as needed.
  22. Here are some guidelines to follow:
  23. 1. Refrain from explicitly mentioning the context provided in your response.
  24. 2. The context should silently guide your answers without being directly acknowledged.
  25. 3. Do not use phrases such as 'According to the context provided', 'Based on the context, ...' etc.
  26. Context information:
  27. ----------------------
  28. $context
  29. ----------------------
  30. Conversation history:
  31. ----------------------
  32. $history
  33. ----------------------
  34. Query: $query
  35. Answer:
  36. """ # noqa:E501
  37. DOCS_SITE_DEFAULT_PROMPT = """
  38. You are an expert AI assistant for developer support product. Your responses must always be rooted in the context provided for each query. Wherever possible, give complete code snippet. Dont make up any code snippet on your own.
  39. Here are some guidelines to follow:
  40. 1. Refrain from explicitly mentioning the context provided in your response.
  41. 2. The context should silently guide your answers without being directly acknowledged.
  42. 3. Do not use phrases such as 'According to the context provided', 'Based on the context, ...' etc.
  43. Context information:
  44. ----------------------
  45. $context
  46. ----------------------
  47. Query: $query
  48. Answer:
  49. """ # noqa:E501
  50. DEFAULT_PROMPT_TEMPLATE = Template(DEFAULT_PROMPT)
  51. DEFAULT_PROMPT_WITH_HISTORY_TEMPLATE = Template(DEFAULT_PROMPT_WITH_HISTORY)
  52. DOCS_SITE_PROMPT_TEMPLATE = Template(DOCS_SITE_DEFAULT_PROMPT)
  53. query_re = re.compile(r"\$\{*query\}*")
  54. context_re = re.compile(r"\$\{*context\}*")
  55. history_re = re.compile(r"\$\{*history\}*")
  56. @register_deserializable
  57. class BaseLlmConfig(BaseConfig):
  58. """
  59. Config for the `query` method.
  60. """
  61. def __init__(
  62. self,
  63. number_documents: int = 3,
  64. template: Optional[Template] = None,
  65. prompt: Optional[Template] = None,
  66. model: Optional[str] = None,
  67. temperature: float = 0,
  68. max_tokens: int = 1000,
  69. top_p: float = 1,
  70. stream: bool = False,
  71. deployment_name: Optional[str] = None,
  72. system_prompt: Optional[str] = None,
  73. where: dict[str, Any] = None,
  74. query_type: Optional[str] = None,
  75. callbacks: Optional[list] = None,
  76. api_key: Optional[str] = None,
  77. base_url: Optional[str] = None,
  78. endpoint: Optional[str] = None,
  79. model_kwargs: Optional[dict[str, Any]] = None,
  80. http_client: Optional[Any] = None,
  81. http_async_client: Optional[Any] = None,
  82. local: Optional[bool] = False,
  83. default_headers: Optional[Mapping[str, str]] = None,
  84. ):
  85. """
  86. Initializes a configuration class instance for the LLM.
  87. Takes the place of the former `QueryConfig` or `ChatConfig`.
  88. :param number_documents: Number of documents to pull from the database as
  89. context, defaults to 1
  90. :type number_documents: int, optional
  91. :param template: The `Template` instance to use as a template for
  92. prompt, defaults to None (deprecated)
  93. :type template: Optional[Template], optional
  94. :param prompt: The `Template` instance to use as a template for
  95. prompt, defaults to None
  96. :type prompt: Optional[Template], optional
  97. :param model: Controls the OpenAI model used, defaults to None
  98. :type model: Optional[str], optional
  99. :param temperature: Controls the randomness of the model's output.
  100. Higher values (closer to 1) make output more random, lower values make it more deterministic, defaults to 0
  101. :type temperature: float, optional
  102. :param max_tokens: Controls how many tokens are generated, defaults to 1000
  103. :type max_tokens: int, optional
  104. :param top_p: Controls the diversity of words. Higher values (closer to 1) make word selection more diverse,
  105. defaults to 1
  106. :type top_p: float, optional
  107. :param stream: Control if response is streamed back to user, defaults to False
  108. :type stream: bool, optional
  109. :param deployment_name: t.b.a., defaults to None
  110. :type deployment_name: Optional[str], optional
  111. :param system_prompt: System prompt string, defaults to None
  112. :type system_prompt: Optional[str], optional
  113. :param where: A dictionary of key-value pairs to filter the database results., defaults to None
  114. :type where: dict[str, Any], optional
  115. :param api_key: The api key of the custom endpoint, defaults to None
  116. :type api_key: Optional[str], optional
  117. :param endpoint: The api url of the custom endpoint, defaults to None
  118. :type endpoint: Optional[str], optional
  119. :param model_kwargs: A dictionary of key-value pairs to pass to the model, defaults to None
  120. :type model_kwargs: Optional[Dict[str, Any]], optional
  121. :param callbacks: Langchain callback functions to use, defaults to None
  122. :type callbacks: Optional[list], optional
  123. :param query_type: The type of query to use, defaults to None
  124. :type query_type: Optional[str], optional
  125. :param local: If True, the model will be run locally, defaults to False (for huggingface provider)
  126. :type local: Optional[bool], optional
  127. :param default_headers: Set additional HTTP headers to be sent with requests to OpenAI
  128. :type default_headers: Optional[Mapping[str, str]], optional
  129. :raises ValueError: If the template is not valid as template should
  130. contain $context and $query (and optionally $history)
  131. :raises ValueError: Stream is not boolean
  132. """
  133. if template is not None:
  134. logger.warning(
  135. "The `template` argument is deprecated and will be removed in a future version. "
  136. + "Please use `prompt` instead."
  137. )
  138. if prompt is None:
  139. prompt = template
  140. if prompt is None:
  141. prompt = DEFAULT_PROMPT_TEMPLATE
  142. self.number_documents = number_documents
  143. self.temperature = temperature
  144. self.max_tokens = max_tokens
  145. self.model = model
  146. self.top_p = top_p
  147. self.deployment_name = deployment_name
  148. self.system_prompt = system_prompt
  149. self.query_type = query_type
  150. self.callbacks = callbacks
  151. self.api_key = api_key
  152. self.base_url = base_url
  153. self.endpoint = endpoint
  154. self.model_kwargs = model_kwargs
  155. self.http_client = http_client
  156. self.http_async_client = http_async_client
  157. self.local = local
  158. self.default_headers = default_headers
  159. if isinstance(prompt, str):
  160. prompt = Template(prompt)
  161. if self.validate_prompt(prompt):
  162. self.prompt = prompt
  163. else:
  164. raise ValueError("The 'prompt' should have 'query' and 'context' keys and potentially 'history' (if used).")
  165. if not isinstance(stream, bool):
  166. raise ValueError("`stream` should be bool")
  167. self.stream = stream
  168. self.where = where
  169. @staticmethod
  170. def validate_prompt(prompt: Template) -> Optional[re.Match[str]]:
  171. """
  172. validate the prompt
  173. :param prompt: the prompt to validate
  174. :type prompt: Template
  175. :return: valid (true) or invalid (false)
  176. :rtype: Optional[re.Match[str]]
  177. """
  178. return re.search(query_re, prompt.template) and re.search(context_re, prompt.template)
  179. @staticmethod
  180. def _validate_prompt_history(prompt: Template) -> Optional[re.Match[str]]:
  181. """
  182. validate the prompt with history
  183. :param prompt: the prompt to validate
  184. :type prompt: Template
  185. :return: valid (true) or invalid (false)
  186. :rtype: Optional[re.Match[str]]
  187. """
  188. return re.search(history_re, prompt.template)