base.py 3.4 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192
  1. from typing import Optional
  2. from embedchain.config.base_config import BaseConfig
  3. ANSWER_RELEVANCY_PROMPT = """
  4. Please provide $num_gen_questions questions from the provided answer.
  5. You must provide the complete question, if are not able to provide the complete question, return empty string ("").
  6. Please only provide one question per line without numbers or bullets to distinguish them.
  7. You must only provide the questions and no other text.
  8. $answer
  9. """ # noqa:E501
  10. CONTEXT_RELEVANCY_PROMPT = """
  11. Please extract relevant sentences from the provided context that is required to answer the given question.
  12. If no relevant sentences are found, or if you believe the question cannot be answered from the given context, return the empty string ("").
  13. While extracting candidate sentences you're not allowed to make any changes to sentences from given context or make up any sentences.
  14. You must only provide sentences from the given context and nothing else.
  15. Context: $context
  16. Question: $question
  17. """ # noqa:E501
  18. GROUNDEDNESS_ANSWER_CLAIMS_PROMPT = """
  19. Please provide one or more statements from each sentence of the provided answer.
  20. You must provide the symantically equivalent statements for each sentence of the answer.
  21. You must provide the complete statement, if are not able to provide the complete statement, return empty string ("").
  22. Please only provide one statement per line WITHOUT numbers or bullets.
  23. If the question provided is not being answered in the provided answer, return empty string ("").
  24. You must only provide the statements and no other text.
  25. $question
  26. $answer
  27. """ # noqa:E501
  28. GROUNDEDNESS_CLAIMS_INFERENCE_PROMPT = """
  29. Given the context and the provided claim statements, please provide a verdict for each claim statement whether it can be completely infered from the given context or not.
  30. Use only "1" (yes), "0" (no) and "-1" (null) for "yes", "no" or "null" respectively.
  31. You must provide one verdict per line, ONLY WITH "1", "0" or "-1" as per your verdict to the given statement and nothing else.
  32. You must provide the verdicts in the same order as the claim statements.
  33. Contexts:
  34. $context
  35. Claim statements:
  36. $claim_statements
  37. """ # noqa:E501
  38. class GroundednessConfig(BaseConfig):
  39. def __init__(
  40. self,
  41. model: str = "gpt-4",
  42. api_key: Optional[str] = None,
  43. answer_claims_prompt: str = GROUNDEDNESS_ANSWER_CLAIMS_PROMPT,
  44. claims_inference_prompt: str = GROUNDEDNESS_CLAIMS_INFERENCE_PROMPT,
  45. ):
  46. self.model = model
  47. self.api_key = api_key
  48. self.answer_claims_prompt = answer_claims_prompt
  49. self.claims_inference_prompt = claims_inference_prompt
  50. class AnswerRelevanceConfig(BaseConfig):
  51. def __init__(
  52. self,
  53. model: str = "gpt-4",
  54. embedder: str = "text-embedding-ada-002",
  55. api_key: Optional[str] = None,
  56. num_gen_questions: int = 1,
  57. prompt: str = ANSWER_RELEVANCY_PROMPT,
  58. ):
  59. self.model = model
  60. self.embedder = embedder
  61. self.api_key = api_key
  62. self.num_gen_questions = num_gen_questions
  63. self.prompt = prompt
  64. class ContextRelevanceConfig(BaseConfig):
  65. def __init__(
  66. self,
  67. model: str = "gpt-4",
  68. api_key: Optional[str] = None,
  69. language: str = "en",
  70. prompt: str = CONTEXT_RELEVANCY_PROMPT,
  71. ):
  72. self.model = model
  73. self.api_key = api_key
  74. self.language = language
  75. self.prompt = prompt