test_litellm.py 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100
  1. import pytest
  2. from unittest.mock import Mock, patch
  3. from mem0.llms import litellm
  4. from mem0.configs.llms.base import BaseLlmConfig
  5. @pytest.fixture
  6. def mock_litellm():
  7. with patch('mem0.llms.litellm.litellm') as mock_litellm:
  8. yield mock_litellm
  9. def test_generate_response_with_unsupported_model(mock_litellm):
  10. config = BaseLlmConfig(model="unsupported-model", temperature=0.7, max_tokens=100, top_p=1)
  11. llm = litellm.LiteLLM(config)
  12. messages = [{"role": "user", "content": "Hello"}]
  13. mock_litellm.supports_function_calling.return_value = False
  14. with pytest.raises(ValueError, match="Model 'unsupported-model' in litellm does not support function calling."):
  15. llm.generate_response(messages)
  16. def test_generate_response_without_tools(mock_litellm):
  17. config = BaseLlmConfig(model="gpt-4o", temperature=0.7, max_tokens=100, top_p=1)
  18. llm = litellm.LiteLLM(config)
  19. messages = [
  20. {"role": "system", "content": "You are a helpful assistant."},
  21. {"role": "user", "content": "Hello, how are you?"}
  22. ]
  23. mock_response = Mock()
  24. mock_response.choices = [Mock(message=Mock(content="I'm doing well, thank you for asking!"))]
  25. mock_litellm.completion.return_value = mock_response
  26. mock_litellm.supports_function_calling.return_value = True
  27. response = llm.generate_response(messages)
  28. mock_litellm.completion.assert_called_once_with(
  29. model="gpt-4o",
  30. messages=messages,
  31. temperature=0.7,
  32. max_tokens=100,
  33. top_p=1.0
  34. )
  35. assert response == "I'm doing well, thank you for asking!"
  36. def test_generate_response_with_tools(mock_litellm):
  37. config = BaseLlmConfig(model="gpt-4o", temperature=0.7, max_tokens=100, top_p=1)
  38. llm = litellm.LiteLLM(config)
  39. messages = [
  40. {"role": "system", "content": "You are a helpful assistant."},
  41. {"role": "user", "content": "Add a new memory: Today is a sunny day."}
  42. ]
  43. tools = [
  44. {
  45. "type": "function",
  46. "function": {
  47. "name": "add_memory",
  48. "description": "Add a memory",
  49. "parameters": {
  50. "type": "object",
  51. "properties": {
  52. "data": {"type": "string", "description": "Data to add to memory"}
  53. },
  54. "required": ["data"],
  55. },
  56. },
  57. }
  58. ]
  59. mock_response = Mock()
  60. mock_message = Mock()
  61. mock_message.content = "I've added the memory for you."
  62. mock_tool_call = Mock()
  63. mock_tool_call.function.name = "add_memory"
  64. mock_tool_call.function.arguments = '{"data": "Today is a sunny day."}'
  65. mock_message.tool_calls = [mock_tool_call]
  66. mock_response.choices = [Mock(message=mock_message)]
  67. mock_litellm.completion.return_value = mock_response
  68. mock_litellm.supports_function_calling.return_value = True
  69. response = llm.generate_response(messages, tools=tools)
  70. mock_litellm.completion.assert_called_once_with(
  71. model="gpt-4o",
  72. messages=messages,
  73. temperature=0.7,
  74. max_tokens=100,
  75. top_p=1,
  76. tools=tools,
  77. tool_choice="auto"
  78. )
  79. assert response["content"] == "I've added the memory for you."
  80. assert len(response["tool_calls"]) == 1
  81. assert response["tool_calls"][0]["name"] == "add_memory"
  82. assert response["tool_calls"][0]["arguments"] == {'data': 'Today is a sunny day.'}