test_gpt4all.py 2.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960
  1. import pytest
  2. from langchain_community.llms.gpt4all import GPT4All as LangchainGPT4All
  3. from embedchain.config import BaseLlmConfig
  4. from embedchain.llm.gpt4all import GPT4ALLLlm
  5. @pytest.fixture
  6. def config():
  7. config = BaseLlmConfig(
  8. temperature=0.7,
  9. max_tokens=50,
  10. top_p=0.8,
  11. stream=False,
  12. system_prompt="System prompt",
  13. model="orca-mini-3b-gguf2-q4_0.gguf",
  14. )
  15. yield config
  16. @pytest.fixture
  17. def gpt4all_with_config(config):
  18. return GPT4ALLLlm(config=config)
  19. @pytest.fixture
  20. def gpt4all_without_config():
  21. return GPT4ALLLlm()
  22. def test_gpt4all_init_with_config(config, gpt4all_with_config):
  23. assert gpt4all_with_config.config.temperature == config.temperature
  24. assert gpt4all_with_config.config.max_tokens == config.max_tokens
  25. assert gpt4all_with_config.config.top_p == config.top_p
  26. assert gpt4all_with_config.config.stream == config.stream
  27. assert gpt4all_with_config.config.system_prompt == config.system_prompt
  28. assert gpt4all_with_config.config.model == config.model
  29. assert isinstance(gpt4all_with_config.instance, LangchainGPT4All)
  30. def test_gpt4all_init_without_config(gpt4all_without_config):
  31. assert gpt4all_without_config.config.model == "orca-mini-3b-gguf2-q4_0.gguf"
  32. assert isinstance(gpt4all_without_config.instance, LangchainGPT4All)
  33. def test_get_llm_model_answer(mocker, gpt4all_with_config):
  34. test_query = "Test query"
  35. test_answer = "Test answer"
  36. mocked_get_answer = mocker.patch("embedchain.llm.gpt4all.GPT4ALLLlm._get_answer", return_value=test_answer)
  37. answer = gpt4all_with_config.get_llm_model_answer(test_query)
  38. assert answer == test_answer
  39. mocked_get_answer.assert_called_once_with(prompt=test_query, config=gpt4all_with_config.config)
  40. def test_gpt4all_model_switching(gpt4all_with_config):
  41. with pytest.raises(RuntimeError, match="GPT4ALLLlm does not support switching models at runtime."):
  42. gpt4all_with_config._get_answer("Test prompt", BaseLlmConfig(model="new_model"))