Parcourir la source

feat: pass QueryConfig to dry_run (#173)

gasolin il y a 2 ans
Parent
commit
5e6aef1e37
1 fichiers modifiés avec 2 ajouts et 2 suppressions
  1. 2 2
      embedchain/embedchain.py

+ 2 - 2
embedchain/embedchain.py

@@ -279,7 +279,7 @@ class EmbedChain:
         memory.chat_memory.add_ai_message(answer)
         memory.chat_memory.add_ai_message(answer)
         return answer
         return answer
 
 
-    def dry_run(self, input_query):
+    def dry_run(self, input_query, config: QueryConfig = None):
         """
         """
         A dry run does everything except send the resulting prompt to
         A dry run does everything except send the resulting prompt to
         the LLM. The purpose is to test the prompt, not the response.
         the LLM. The purpose is to test the prompt, not the response.
@@ -387,4 +387,4 @@ class OpenSourceApp(EmbedChain):
         response = gpt4all_model.generate(
         response = gpt4all_model.generate(
             prompt=prompt,
             prompt=prompt,
         )
         )
-        return response
+        return response