ollama.py 1.2 KB

12345678910111213141516171819202122232425262728293031323334
  1. from typing import Iterable, Optional, Union
  2. from langchain.callbacks.manager import CallbackManager
  3. from langchain.callbacks.stdout import StdOutCallbackHandler
  4. from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
  5. from langchain.llms.ollama import Ollama
  6. from embedchain.config import BaseLlmConfig
  7. from embedchain.helpers.json_serializable import register_deserializable
  8. from embedchain.llm.base import BaseLlm
  9. @register_deserializable
  10. class OllamaLlm(BaseLlm):
  11. def __init__(self, config: Optional[BaseLlmConfig] = None):
  12. super().__init__(config=config)
  13. if self.config.model is None:
  14. self.config.model = "llama2"
  15. def get_llm_model_answer(self, prompt):
  16. return self._get_answer(prompt=prompt, config=self.config)
  17. def _get_answer(self, prompt: str, config: BaseLlmConfig) -> Union[str, Iterable]:
  18. callback_manager = [StreamingStdOutCallbackHandler()] if config.stream else [StdOutCallbackHandler()]
  19. llm = Ollama(
  20. model=config.model,
  21. system=config.system_prompt,
  22. temperature=config.temperature,
  23. top_p=config.top_p,
  24. callback_manager=CallbackManager(callback_manager),
  25. )
  26. return llm(prompt)