import pytest from unittest.mock import Mock, patch from mem0.llms import litellm from mem0.configs.llms.base import BaseLlmConfig @pytest.fixture def mock_litellm(): with patch('mem0.llms.litellm.litellm') as mock_litellm: yield mock_litellm def test_generate_response_with_unsupported_model(mock_litellm): config = BaseLlmConfig(model="unsupported-model", temperature=0.7, max_tokens=100, top_p=1) llm = litellm.LiteLLM(config) messages = [{"role": "user", "content": "Hello"}] mock_litellm.supports_function_calling.return_value = False with pytest.raises(ValueError, match="Model 'unsupported-model' in litellm does not support function calling."): llm.generate_response(messages) def test_generate_response_without_tools(mock_litellm): config = BaseLlmConfig(model="gpt-4o", temperature=0.7, max_tokens=100, top_p=1) llm = litellm.LiteLLM(config) messages = [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "Hello, how are you?"} ] mock_response = Mock() mock_response.choices = [Mock(message=Mock(content="I'm doing well, thank you for asking!"))] mock_litellm.completion.return_value = mock_response mock_litellm.supports_function_calling.return_value = True response = llm.generate_response(messages) mock_litellm.completion.assert_called_once_with( model="gpt-4o", messages=messages, temperature=0.7, max_tokens=100, top_p=1.0 ) assert response == "I'm doing well, thank you for asking!" def test_generate_response_with_tools(mock_litellm): config = BaseLlmConfig(model="gpt-4o", temperature=0.7, max_tokens=100, top_p=1) llm = litellm.LiteLLM(config) messages = [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "Add a new memory: Today is a sunny day."} ] tools = [ { "type": "function", "function": { "name": "add_memory", "description": "Add a memory", "parameters": { "type": "object", "properties": { "data": {"type": "string", "description": "Data to add to memory"} }, "required": ["data"], }, }, } ] mock_response = Mock() mock_message = Mock() mock_message.content = "I've added the memory for you." mock_tool_call = Mock() mock_tool_call.function.name = "add_memory" mock_tool_call.function.arguments = '{"data": "Today is a sunny day."}' mock_message.tool_calls = [mock_tool_call] mock_response.choices = [Mock(message=mock_message)] mock_litellm.completion.return_value = mock_response mock_litellm.supports_function_calling.return_value = True response = llm.generate_response(messages, tools=tools) mock_litellm.completion.assert_called_once_with( model="gpt-4o", messages=messages, temperature=0.7, max_tokens=100, top_p=1, tools=tools, tool_choice="auto" ) assert response["content"] == "I've added the memory for you." assert len(response["tool_calls"]) == 1 assert response["tool_calls"][0]["name"] == "add_memory" assert response["tool_calls"][0]["arguments"] == {'data': 'Today is a sunny day.'}