123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081 |
- import json
- from typing import Dict, List, Optional
- try:
- from together import Together
- except ImportError:
- raise ImportError("Together requires extra dependencies. Install with `pip install together`") from None
- from mem0.llms.base import LLMBase
- from mem0.configs.llms.base import BaseLlmConfig
- class TogetherLLM(LLMBase):
- def __init__(self, config: Optional[BaseLlmConfig] = None):
- super().__init__(config)
- if not self.config.model:
- self.config.model="mistralai/Mixtral-8x7B-Instruct-v0.1"
- self.client = Together()
-
- def _parse_response(self, response, tools):
- """
- Process the response based on whether tools are used or not.
- Args:
- response: The raw response from API.
- tools: The list of tools provided in the request.
- Returns:
- str or dict: The processed response.
- """
- if tools:
- processed_response = {
- "content": response.choices[0].message.content,
- "tool_calls": []
- }
-
- if response.choices[0].message.tool_calls:
- for tool_call in response.choices[0].message.tool_calls:
- processed_response["tool_calls"].append({
- "name": tool_call.function.name,
- "arguments": json.loads(tool_call.function.arguments)
- })
-
- return processed_response
- else:
- return response.choices[0].message.content
- def generate_response(
- self,
- messages: List[Dict[str, str]],
- response_format=None,
- tools: Optional[List[Dict]] = None,
- tool_choice: str = "auto",
- ):
- """
- Generate a response based on the given messages using TogetherAI.
- Args:
- messages (list): List of message dicts containing 'role' and 'content'.
- response_format (str or object, optional): Format of the response. Defaults to "text".
- tools (list, optional): List of tools that the model can call. Defaults to None.
- tool_choice (str, optional): Tool choice method. Defaults to "auto".
- Returns:
- str: The generated response.
- """
- params = {
- "model": self.config.model,
- "messages": messages,
- "temperature": self.config.temperature,
- "max_tokens": self.config.max_tokens,
- "top_p": self.config.top_p
- }
- if response_format:
- params["response_format"] = response_format
- if tools:
- params["tools"] = tools
- params["tool_choice"] = tool_choice
- response = self.client.chat.completions.create(**params)
- return self._parse_response(response, tools)
|