|
@@ -28,9 +28,9 @@ def test_get_llm_model_answer(azure_openai_llm):
|
|
|
|
|
|
|
|
|
def test_get_answer(azure_openai_llm):
|
|
|
- with patch("langchain_community.chat_models.AzureChatOpenAI") as mock_chat:
|
|
|
+ with patch("langchain_openai.AzureChatOpenAI") as mock_chat:
|
|
|
mock_chat_instance = mock_chat.return_value
|
|
|
- mock_chat_instance.return_value = MagicMock(content="Test Response")
|
|
|
+ mock_chat_instance.invoke.return_value = MagicMock(content="Test Response")
|
|
|
|
|
|
prompt = "Test Prompt"
|
|
|
response = azure_openai_llm._get_answer(prompt, azure_openai_llm.config)
|
|
@@ -38,15 +38,12 @@ def test_get_answer(azure_openai_llm):
|
|
|
assert response == "Test Response"
|
|
|
mock_chat.assert_called_once_with(
|
|
|
deployment_name=azure_openai_llm.config.deployment_name,
|
|
|
- openai_api_version="2023-05-15",
|
|
|
+ openai_api_version="2024-02-01",
|
|
|
model_name=azure_openai_llm.config.model or "gpt-3.5-turbo",
|
|
|
temperature=azure_openai_llm.config.temperature,
|
|
|
max_tokens=azure_openai_llm.config.max_tokens,
|
|
|
streaming=azure_openai_llm.config.stream,
|
|
|
)
|
|
|
- mock_chat_instance.assert_called_once_with(
|
|
|
- azure_openai_llm._get_messages(prompt, system_prompt=azure_openai_llm.config.system_prompt)
|
|
|
- )
|
|
|
|
|
|
|
|
|
def test_get_messages(azure_openai_llm):
|
|
@@ -65,6 +62,7 @@ def test_when_no_deployment_name_provided():
|
|
|
llm = AzureOpenAILlm(config)
|
|
|
llm.get_llm_model_answer("Test Prompt")
|
|
|
|
|
|
+
|
|
|
def test_with_api_version():
|
|
|
config = BaseLlmConfig(
|
|
|
deployment_name="azure_deployment",
|
|
@@ -75,8 +73,7 @@ def test_with_api_version():
|
|
|
api_version="2024-02-01",
|
|
|
)
|
|
|
|
|
|
- with patch("langchain_community.chat_models.AzureChatOpenAI") as mock_chat:
|
|
|
-
|
|
|
+ with patch("langchain_openai.AzureChatOpenAI") as mock_chat:
|
|
|
llm = AzureOpenAILlm(config)
|
|
|
llm.get_llm_model_answer("Test Prompt")
|
|
|
|
|
@@ -87,4 +84,4 @@ def test_with_api_version():
|
|
|
temperature=0.7,
|
|
|
max_tokens=50,
|
|
|
streaming=False,
|
|
|
- )
|
|
|
+ )
|