gpt4all.py 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566
  1. import os
  2. from pathlib import Path
  3. from typing import Iterable, Optional, Union
  4. from langchain.callbacks.stdout import StdOutCallbackHandler
  5. from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
  6. from embedchain.config import BaseLlmConfig
  7. from embedchain.helpers.json_serializable import register_deserializable
  8. from embedchain.llm.base import BaseLlm
  9. @register_deserializable
  10. class GPT4ALLLlm(BaseLlm):
  11. def __init__(self, config: Optional[BaseLlmConfig] = None):
  12. super().__init__(config=config)
  13. if self.config.model is None:
  14. self.config.model = "orca-mini-3b-gguf2-q4_0.gguf"
  15. self.instance = GPT4ALLLlm._get_instance(self.config.model)
  16. self.instance.streaming = self.config.stream
  17. def get_llm_model_answer(self, prompt):
  18. return self._get_answer(prompt=prompt, config=self.config)
  19. @staticmethod
  20. def _get_instance(model):
  21. try:
  22. from langchain.llms.gpt4all import GPT4All as LangchainGPT4All
  23. except ModuleNotFoundError:
  24. raise ModuleNotFoundError(
  25. "The GPT4All python package is not installed. Please install it with `pip install --upgrade embedchain[opensource]`" # noqa E501
  26. ) from None
  27. model_path = Path(model).expanduser()
  28. if os.path.isabs(model_path):
  29. if os.path.exists(model_path):
  30. return LangchainGPT4All(model=str(model_path))
  31. else:
  32. raise ValueError(f"Model does not exist at {model_path=}")
  33. else:
  34. return LangchainGPT4All(model=model, allow_download=True)
  35. def _get_answer(self, prompt: str, config: BaseLlmConfig) -> Union[str, Iterable]:
  36. if config.model and config.model != self.config.model:
  37. raise RuntimeError(
  38. "GPT4ALLLlm does not support switching models at runtime. Please create a new app instance."
  39. )
  40. messages = []
  41. if config.system_prompt:
  42. messages.append(config.system_prompt)
  43. messages.append(prompt)
  44. kwargs = {
  45. "temp": config.temperature,
  46. "max_tokens": config.max_tokens,
  47. }
  48. if config.top_p:
  49. kwargs["top_p"] = config.top_p
  50. callbacks = [StreamingStdOutCallbackHandler()] if config.stream else [StdOutCallbackHandler()]
  51. response = self.instance.generate(prompts=messages, callbacks=callbacks, **kwargs)
  52. answer = ""
  53. for generations in response.generations:
  54. answer += " ".join(map(lambda generation: generation.text, generations))
  55. return answer