google.py 2.2 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162
  1. import logging
  2. import os
  3. from collections.abc import Generator
  4. from typing import Any, Optional, Union
  5. try:
  6. import google.generativeai as genai
  7. except ImportError:
  8. raise ImportError("GoogleLlm requires extra dependencies. Install with `pip install google-generativeai`") from None
  9. from embedchain.config import BaseLlmConfig
  10. from embedchain.helpers.json_serializable import register_deserializable
  11. from embedchain.llm.base import BaseLlm
  12. logger = logging.getLogger(__name__)
  13. @register_deserializable
  14. class GoogleLlm(BaseLlm):
  15. def __init__(self, config: Optional[BaseLlmConfig] = None):
  16. super().__init__(config)
  17. if not self.config.api_key and "GOOGLE_API_KEY" not in os.environ:
  18. raise ValueError("Please set the GOOGLE_API_KEY environment variable or pass it in the config.")
  19. api_key = self.config.api_key or os.getenv("GOOGLE_API_KEY")
  20. genai.configure(api_key=api_key)
  21. def get_llm_model_answer(self, prompt):
  22. if self.config.system_prompt:
  23. raise ValueError("GoogleLlm does not support `system_prompt`")
  24. response = self._get_answer(prompt)
  25. return response
  26. def _get_answer(self, prompt: str) -> Union[str, Generator[Any, Any, None]]:
  27. model_name = self.config.model or "gemini-pro"
  28. logger.info(f"Using Google LLM model: {model_name}")
  29. model = genai.GenerativeModel(model_name=model_name)
  30. generation_config_params = {
  31. "candidate_count": 1,
  32. "max_output_tokens": self.config.max_tokens,
  33. "temperature": self.config.temperature or 0.5,
  34. }
  35. if 0.0 <= self.config.top_p <= 1.0:
  36. generation_config_params["top_p"] = self.config.top_p
  37. else:
  38. raise ValueError("`top_p` must be > 0.0 and < 1.0")
  39. generation_config = genai.types.GenerationConfig(**generation_config_params)
  40. response = model.generate_content(
  41. prompt,
  42. generation_config=generation_config,
  43. stream=self.config.stream,
  44. )
  45. if self.config.stream:
  46. # TODO: Implement streaming
  47. response.resolve()
  48. return response.text
  49. else:
  50. return response.text