Преглед на файлове

[Bug Fix] Make claude-3-opus model work (#1331)

Deshraj Yadav преди 1 година
родител
ревизия
d00a2085d5
променени са 5 файла, в които са добавени 10 реда и са изтрити 26 реда
  1. 1 1
      Makefile
  2. 6 2
      embedchain/llm/anthropic.py
  3. 0 1
      embedchain/vectordb/qdrant.py
  4. 2 2
      pyproject.toml
  5. 1 20
      tests/llm/test_anthrophic.py

+ 1 - 1
Makefile

@@ -11,7 +11,7 @@ install:
 
 install_all:
 	poetry install --all-extras
-	poetry run pip install pinecone-text pinecone-client
+	poetry run pip install pinecone-text pinecone-client langchain-anthropic
 
 install_es:
 	poetry install --extras elasticsearch

+ 6 - 2
embedchain/llm/anthropic.py

@@ -2,6 +2,11 @@ import logging
 import os
 from typing import Optional
 
+try:
+    from langchain_anthropic import ChatAnthropic
+except ImportError:
+    raise ImportError("Please install the langchain-anthropic package by running `pip install langchain-anthropic`.")
+
 from embedchain.config import BaseLlmConfig
 from embedchain.helpers.json_serializable import register_deserializable
 from embedchain.llm.base import BaseLlm
@@ -21,10 +26,9 @@ class AnthropicLlm(BaseLlm):
 
     @staticmethod
     def _get_answer(prompt: str, config: BaseLlmConfig) -> str:
-        from langchain_community.chat_models import ChatAnthropic
 
         chat = ChatAnthropic(
-            anthropic_api_key=os.environ["ANTHROPIC_API_KEY"], temperature=config.temperature, model=config.model
+            anthropic_api_key=os.environ["ANTHROPIC_API_KEY"], temperature=config.temperature, model_name=config.model
         )
 
         if config.max_tokens and config.max_tokens != 1000:

+ 0 - 1
embedchain/vectordb/qdrant.py

@@ -1,6 +1,5 @@
 import copy
 import os
-import uuid
 from typing import Any, Optional, Union
 
 try:

+ 2 - 2
pyproject.toml

@@ -1,7 +1,7 @@
 [tool.poetry]
 name = "embedchain"
-version = "0.1.98"
-description = "Simplest open source retrieval(RAG) framework"
+version = "0.1.99"
+description = "Simplest open source retrieval (RAG) framework"
 authors = [
     "Taranjeet Singh <taranjeet@embedchain.ai>",
     "Deshraj Yadav <deshraj@embedchain.ai>",

+ 1 - 20
tests/llm/test_anthrophic.py

@@ -1,5 +1,5 @@
 import os
-from unittest.mock import MagicMock, patch
+from unittest.mock import patch
 
 import pytest
 from langchain.schema import HumanMessage, SystemMessage
@@ -23,25 +23,6 @@ def test_get_llm_model_answer(anthropic_llm):
         mock_method.assert_called_once_with(prompt=prompt, config=anthropic_llm.config)
 
 
-def test_get_answer(anthropic_llm):
-    with patch("langchain_community.chat_models.ChatAnthropic") as mock_chat:
-        mock_chat_instance = mock_chat.return_value
-        mock_chat_instance.return_value = MagicMock(content="Test Response")
-
-        prompt = "Test Prompt"
-        response = anthropic_llm._get_answer(prompt, anthropic_llm.config)
-
-        assert response == "Test Response"
-        mock_chat.assert_called_once_with(
-            anthropic_api_key="test_api_key",
-            temperature=anthropic_llm.config.temperature,
-            model=anthropic_llm.config.model,
-        )
-        mock_chat_instance.assert_called_once_with(
-            anthropic_llm._get_messages(prompt, system_prompt=anthropic_llm.config.system_prompt)
-        )
-
-
 def test_get_messages(anthropic_llm):
     prompt = "Test Prompt"
     system_prompt = "Test System Prompt"