123456789101112131415161718192021222324252627282930313233 |
- import pytest
- from embedchain.config import BaseLlmConfig
- from embedchain.llm.ollama import OllamaLlm
- @pytest.fixture
- def ollama_llm_config():
- config = BaseLlmConfig(model="llama2", temperature=0.7, top_p=0.8, stream=True, system_prompt=None)
- yield config
- def test_get_llm_model_answer(ollama_llm_config, mocker):
- mocker.patch("embedchain.llm.ollama.Client.list", return_value={"models": [{"name": "llama2"}]})
- mocker.patch("embedchain.llm.ollama.OllamaLlm._get_answer", return_value="Test answer")
- llm = OllamaLlm(ollama_llm_config)
- answer = llm.get_llm_model_answer("Test query")
- assert answer == "Test answer"
- def test_get_answer_mocked_ollama(ollama_llm_config, mocker):
- mocker.patch("embedchain.llm.ollama.Client.list", return_value={"models": [{"name": "llama2"}]})
- mocked_ollama = mocker.patch("embedchain.llm.ollama.Ollama")
- mock_instance = mocked_ollama.return_value
- mock_instance.invoke.return_value = "Mocked answer"
- llm = OllamaLlm(ollama_llm_config)
- prompt = "Test query"
- answer = llm.get_llm_model_answer(prompt)
- assert answer == "Mocked answer"
|