Sfoglia il codice sorgente

Feature: Custom Ollama endpoint base_url (#1301)

Felipe Amaral 1 anno fa
parent
commit
6078738d34

+ 1 - 0
configs/ollama.yaml

@@ -5,6 +5,7 @@ llm:
     temperature: 0.5
     top_p: 1
     stream: true
+    base_url: http://localhost:11434
 
 embedder:
   provider: huggingface

+ 2 - 0
embedchain/config/llm/base.py

@@ -96,6 +96,7 @@ class BaseLlmConfig(BaseConfig):
         endpoint: Optional[str] = None,
         model_kwargs: Optional[dict[str, Any]] = None,
         local: Optional[bool] = False,
+        base_url: Optional[str] = None,
     ):
         """
         Initializes a configuration class instance for the LLM.
@@ -169,6 +170,7 @@ class BaseLlmConfig(BaseConfig):
         self.endpoint = endpoint
         self.model_kwargs = model_kwargs
         self.local = local
+        self.base_url = base_url
 
         if isinstance(prompt, str):
             prompt = Template(prompt)

+ 1 - 0
embedchain/llm/ollama.py

@@ -31,6 +31,7 @@ class OllamaLlm(BaseLlm):
             temperature=config.temperature,
             top_p=config.top_p,
             callback_manager=CallbackManager(callback_manager),
+            base_url=config.base_url,
         )
 
         return llm.invoke(prompt)

+ 1 - 0
embedchain/utils/misc.py

@@ -427,6 +427,7 @@ def validate_config(config_data):
                     Optional("endpoint"): str,
                     Optional("model_kwargs"): dict,
                     Optional("local"): bool,
+                    Optional("base_url"): str,
                 },
             },
             Optional("vectordb"): {