Jelajahi Sumber

Merge pull request #97 from bsormagec/master

Add Azure OpenAI and OpenRouter Provider Support
Ashu 3 bulan lalu
induk
melakukan
f17e5bca46

+ 213 - 8
chrome-extension/src/background/agent/helper.ts

@@ -1,5 +1,5 @@
 import { type ProviderConfig, type ModelConfig, ProviderTypeEnum } from '@extension/storage';
-import { ChatOpenAI } from '@langchain/openai';
+import { ChatOpenAI, AzureChatOpenAI } from '@langchain/openai';
 import { ChatAnthropic } from '@langchain/anthropic';
 import { ChatGoogleGenerativeAI } from '@langchain/google-genai';
 import { ChatXAI } from '@langchain/xai';
@@ -13,25 +13,56 @@ function isOpenAIOModel(modelName: string): boolean {
   return modelName.startsWith('openai/o') || modelName.startsWith('o');
 }
 
-function createOpenAIChatModel(providerConfig: ProviderConfig, modelConfig: ModelConfig): BaseChatModel {
+function createOpenAIChatModel(
+  providerConfig: ProviderConfig,
+  modelConfig: ModelConfig,
+  // Add optional extra fetch options for headers etc.
+  extraFetchOptions?: { headers?: Record<string, string> },
+): BaseChatModel {
   const args: {
     model: string;
     apiKey?: string;
-    configuration?: Record<string, unknown>;
-    modelKwargs?: { max_completion_tokens: number };
+    // Configuration should align with ClientOptions from @langchain/openai
+    configuration?: {
+      baseURL?: string;
+      defaultHeaders?: Record<string, string>;
+      // Add other ClientOptions if needed, e.g.?
+      // dangerouslyAllowBrowser?: boolean;
+    };
+    modelKwargs?: {
+      max_completion_tokens: number;
+      reasoning_effort?: 'low' | 'medium' | 'high';
+    };
     topP?: number;
     temperature?: number;
     maxTokens?: number;
   } = {
     model: modelConfig.modelName,
     apiKey: providerConfig.apiKey,
+    // Initialize configuration object
+    configuration: {},
   };
 
   if (providerConfig.baseUrl) {
-    args.configuration = {
-      baseURL: providerConfig.baseUrl,
+    // Set baseURL inside configuration
+    args.configuration!.baseURL = providerConfig.baseUrl;
+  }
+
+  // Always add custom headers for OpenRouter to identify Nanobrowser
+  if (providerConfig.type === ProviderTypeEnum.OpenRouter) {
+    args.configuration!.defaultHeaders = {
+      ...(args.configuration!.defaultHeaders || {}),
+      'HTTP-Referer': 'https://nanobrowser.ai',
+      'X-Title': 'Nanobrowser',
+      ...(extraFetchOptions?.headers || {}),
+    };
+  } else if (extraFetchOptions?.headers) {
+    args.configuration!.defaultHeaders = {
+      ...(args.configuration!.defaultHeaders || {}),
+      ...extraFetchOptions.headers,
     };
   }
+
   // custom provider may have no api key
   if (providerConfig.apiKey) {
     args.apiKey = providerConfig.apiKey;
@@ -42,22 +73,53 @@ function createOpenAIChatModel(providerConfig: ProviderConfig, modelConfig: Mode
     args.modelKwargs = {
       max_completion_tokens: maxTokens,
     };
+
+    // Add reasoning_effort parameter for o-series models if specified
+    if (modelConfig.reasoningEffort) {
+      args.modelKwargs.reasoning_effort = modelConfig.reasoningEffort;
+    }
   } else {
     args.topP = (modelConfig.parameters?.topP ?? 0.1) as number;
     args.temperature = (modelConfig.parameters?.temperature ?? 0.1) as number;
     args.maxTokens = maxTokens;
   }
+  // Log args being passed to ChatOpenAI constructor inside the helper
+  console.log('[createOpenAIChatModel] Args passed to new ChatOpenAI:', args);
   return new ChatOpenAI(args);
 }
 
+// Function to extract instance name from Azure endpoint URL
+function extractInstanceNameFromUrl(url: string): string | null {
+  try {
+    const parsedUrl = new URL(url);
+    const hostnameParts = parsedUrl.hostname.split('.');
+    // Expecting format like instance-name.openai.azure.com
+    if (hostnameParts.length >= 4 && hostnameParts[1] === 'openai' && hostnameParts[2] === 'azure') {
+      return hostnameParts[0];
+    }
+  } catch (e) {
+    console.error('Error parsing Azure endpoint URL:', e);
+  }
+  return null;
+}
+
+// Function to check if a provider ID is an Azure provider
+function isAzureProvider(providerId: string): boolean {
+  return providerId === ProviderTypeEnum.AzureOpenAI || providerId.startsWith(`${ProviderTypeEnum.AzureOpenAI}_`);
+}
+
 // create a chat model based on the agent name, the model name and provider
 export function createChatModel(providerConfig: ProviderConfig, modelConfig: ModelConfig): BaseChatModel {
   const temperature = (modelConfig.parameters?.temperature ?? 0.1) as number;
   const topP = (modelConfig.parameters?.topP ?? 0.1) as number;
 
+  // Check if the provider is an Azure provider with a custom ID (e.g. azure_openai_2)
+  const isAzure = isAzureProvider(modelConfig.provider);
+
   switch (modelConfig.provider) {
     case ProviderTypeEnum.OpenAI: {
-      return createOpenAIChatModel(providerConfig, modelConfig);
+      // Call helper without extra options
+      return createOpenAIChatModel(providerConfig, modelConfig, undefined);
     }
     case ProviderTypeEnum.Anthropic: {
       const args = {
@@ -125,9 +187,152 @@ export function createChatModel(providerConfig: ProviderConfig, modelConfig: Mod
       };
       return new ChatOllama(args);
     }
+    case ProviderTypeEnum.AzureOpenAI: {
+      // Validate necessary fields first
+      if (
+        !providerConfig.baseUrl ||
+        !providerConfig.azureDeploymentNames ||
+        providerConfig.azureDeploymentNames.length === 0 ||
+        !providerConfig.azureApiVersion ||
+        !providerConfig.apiKey
+      ) {
+        throw new Error(
+          'Azure configuration is incomplete. Endpoint, Deployment Name, API Version, and API Key are required. Please check settings.',
+        );
+      }
+
+      // Instead of always using the first deployment name, use the model name from modelConfig
+      // which contains the actual model selected in the UI
+      const deploymentName = modelConfig.modelName;
+
+      // Validate that the selected model exists in the configured deployments
+      if (!providerConfig.azureDeploymentNames.includes(deploymentName)) {
+        console.warn(
+          `[createChatModel] Selected deployment "${deploymentName}" not found in available deployments. ` +
+            `Available: ${JSON.stringify(providerConfig.azureDeploymentNames)}. Using the model anyway.`,
+        );
+      }
+
+      // Extract instance name from the endpoint URL
+      const instanceName = extractInstanceNameFromUrl(providerConfig.baseUrl);
+      if (!instanceName) {
+        throw new Error(
+          `Could not extract Instance Name from Azure Endpoint URL: ${providerConfig.baseUrl}. Expected format like https://<your-instance-name>.openai.azure.com/`,
+        );
+      }
+
+      // Check if the Azure deployment is using an "o" series model (GPT-4o, etc.)
+      const isOSeriesModel = isOpenAIOModel(deploymentName);
+
+      // Use AzureChatOpenAI with specific parameters
+      const args = {
+        azureOpenAIApiInstanceName: instanceName, // Derived from endpoint
+        azureOpenAIApiDeploymentName: deploymentName,
+        azureOpenAIApiKey: providerConfig.apiKey,
+        azureOpenAIApiVersion: providerConfig.azureApiVersion,
+        // For Azure, the model name should be the deployment name itself
+        model: deploymentName, // Set model = deployment name to fix Azure requests
+        // For O series models, use modelKwargs instead of temperature/topP
+        ...(isOSeriesModel
+          ? {
+              modelKwargs: {
+                max_completion_tokens: maxTokens,
+                // Add reasoning_effort parameter for Azure o-series models if specified
+                ...(modelConfig.reasoningEffort ? { reasoning_effort: modelConfig.reasoningEffort } : {}),
+              },
+            }
+          : {
+              temperature,
+              topP,
+              maxTokens,
+            }),
+        // DO NOT pass baseUrl or configuration here
+      };
+      console.log('[createChatModel] Azure args passed to AzureChatOpenAI:', args);
+      return new AzureChatOpenAI(args);
+    }
+    case ProviderTypeEnum.OpenRouter: {
+      // Call the helper function, passing OpenRouter headers via the third argument
+      console.log('[createChatModel] Calling createOpenAIChatModel for OpenRouter');
+      return createOpenAIChatModel(providerConfig, modelConfig, {
+        headers: {
+          'HTTP-Referer': 'nanobrowser-extension',
+          'X-Title': 'NanoBrowser Extension',
+        },
+      });
+    }
     default: {
+      // Check if this is a custom Azure provider (azure_openai_X)
+      if (isAzure) {
+        // Validate necessary fields first
+        if (
+          !providerConfig.baseUrl ||
+          !providerConfig.azureDeploymentNames ||
+          providerConfig.azureDeploymentNames.length === 0 ||
+          !providerConfig.azureApiVersion ||
+          !providerConfig.apiKey
+        ) {
+          throw new Error(
+            'Azure configuration is incomplete. Endpoint, Deployment Name, API Version, and API Key are required. Please check settings.',
+          );
+        }
+
+        // Instead of always using the first deployment name, use the model name from modelConfig
+        // which contains the actual model selected in the UI
+        const deploymentName = modelConfig.modelName;
+
+        // Validate that the selected model exists in the configured deployments
+        if (!providerConfig.azureDeploymentNames.includes(deploymentName)) {
+          console.warn(
+            `[createChatModel] Selected deployment "${deploymentName}" not found in available deployments. ` +
+              `Available: ${JSON.stringify(providerConfig.azureDeploymentNames)}. Using the model anyway.`,
+          );
+        }
+
+        // Extract instance name from the endpoint URL
+        const instanceName = extractInstanceNameFromUrl(providerConfig.baseUrl);
+        if (!instanceName) {
+          throw new Error(
+            `Could not extract Instance Name from Azure Endpoint URL: ${providerConfig.baseUrl}. Expected format like https://<your-instance-name>.openai.azure.com/`,
+          );
+        }
+
+        // Check if the Azure deployment is using an "o" series model (GPT-4o, etc.)
+        const isOSeriesModel = isOpenAIOModel(deploymentName);
+
+        // Use AzureChatOpenAI with specific parameters
+        const args = {
+          azureOpenAIApiInstanceName: instanceName, // Derived from endpoint
+          azureOpenAIApiDeploymentName: deploymentName,
+          azureOpenAIApiKey: providerConfig.apiKey,
+          azureOpenAIApiVersion: providerConfig.azureApiVersion,
+          // For Azure, the model name should be the deployment name itself
+          model: deploymentName, // Set model = deployment name to fix Azure requests
+          // For O series models, use modelKwargs instead of temperature/topP
+          ...(isOSeriesModel
+            ? {
+                modelKwargs: {
+                  max_completion_tokens: maxTokens,
+                  // Add reasoning_effort parameter for Azure o-series models if specified
+                  ...(modelConfig.reasoningEffort ? { reasoning_effort: modelConfig.reasoningEffort } : {}),
+                },
+              }
+            : {
+                temperature,
+                topP,
+                maxTokens,
+              }),
+          // DO NOT pass baseUrl or configuration here
+        };
+        console.log('[createChatModel] Azure args (custom ID) passed to AzureChatOpenAI:', args);
+        return new AzureChatOpenAI(args);
+      }
+
+      // If not Azure, handles CustomOpenAI
       // by default, we think it's a openai-compatible provider
-      return createOpenAIChatModel(providerConfig, modelConfig);
+      // Pass undefined for extraFetchOptions for default/custom cases
+      console.log('[createChatModel] Calling createOpenAIChatModel for default/custom provider');
+      return createOpenAIChatModel(providerConfig, modelConfig, undefined);
     }
   }
 }

+ 1 - 2
chrome-extension/src/background/dom/views.ts

@@ -1,5 +1,4 @@
-import type { ViewportInfo, CoordinateSet } from './history/view';
-import type { HashedDomElement } from './history/view';
+import type { ViewportInfo, CoordinateSet, HashedDomElement } from './history/view';
 import { HistoryTreeProcessor } from './history/service';
 
 export abstract class DOMBaseNode {

+ 19 - 5
chrome-extension/src/background/index.ts

@@ -5,7 +5,7 @@ import { Executor } from './agent/executor';
 import { createLogger } from './log';
 import { ExecutionState } from './agent/event/types';
 import { createChatModel } from './agent/helper';
-import { BaseChatModel } from '@langchain/core/language_models/chat_models';
+import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
 
 const logger = createLogger('background');
 
@@ -74,7 +74,14 @@ chrome.tabs.onRemoved.addListener(tabId => {
 
 logger.info('background loaded');
 
-// Setup connection listener
+// Listen for simple messages (e.g., from options page)
+chrome.runtime.onMessage.addListener((message, sender, sendResponse) => {
+  // Handle other message types if needed in the future
+  // Return false if response is not sent asynchronously
+  // return false;
+});
+
+// Setup connection listener for long-lived connections (e.g., side panel)
 chrome.runtime.onConnect.addListener(port => {
   if (port.name === 'side-panel-connection') {
     currentPort = port;
@@ -145,6 +152,7 @@ chrome.runtime.onConnect.addListener(port => {
             await currentExecutor.pause();
             return port.postMessage({ type: 'success' });
           }
+
           default:
             return port.postMessage({ type: 'error', error: 'Unknown message type' });
         }
@@ -182,18 +190,24 @@ async function setupExecutor(taskId: string, task: string, browserContext: Brows
   if (!navigatorModel) {
     throw new Error('Please choose a model for the navigator in the settings first');
   }
-  const navigatorLLM = createChatModel(providers[navigatorModel.provider], navigatorModel);
+  // Log the provider config being used for the navigator
+  const navigatorProviderConfig = providers[navigatorModel.provider];
+  const navigatorLLM = createChatModel(navigatorProviderConfig, navigatorModel);
 
   let plannerLLM: BaseChatModel | null = null;
   const plannerModel = agentModels[AgentNameEnum.Planner];
   if (plannerModel) {
-    plannerLLM = createChatModel(providers[plannerModel.provider], plannerModel);
+    // Log the provider config being used for the planner
+    const plannerProviderConfig = providers[plannerModel.provider];
+    plannerLLM = createChatModel(plannerProviderConfig, plannerModel);
   }
 
   let validatorLLM: BaseChatModel | null = null;
   const validatorModel = agentModels[AgentNameEnum.Validator];
   if (validatorModel) {
-    validatorLLM = createChatModel(providers[validatorModel.provider], validatorModel);
+    // Log the provider config being used for the validator
+    const validatorProviderConfig = providers[validatorModel.provider];
+    validatorLLM = createChatModel(validatorProviderConfig, validatorModel);
   }
 
   const generalSettings = await generalSettingsStore.getSettings();

+ 1 - 0
packages/storage/lib/settings/agentModels.ts

@@ -10,6 +10,7 @@ export interface ModelConfig {
   provider: string;
   modelName: string;
   parameters?: Record<string, unknown>;
+  reasoningEffort?: 'low' | 'medium' | 'high'; // For o-series models (OpenAI and Azure)
 }
 
 // Interface for storing multiple agent model configurations

+ 111 - 15
packages/storage/lib/settings/llmProviders.ts

@@ -8,9 +8,12 @@ export interface ProviderConfig {
   name?: string; // Display name in the options
   type?: ProviderTypeEnum; // Help to decide which LangChain ChatModel package to use
   apiKey: string; // Must be provided, but may be empty for local models
-  baseUrl?: string; // Optional base URL if provided
-  modelNames?: string[]; // Chosen model names, if not provided use hardcoded names from llmProviderModelNames
+  baseUrl?: string; // Optional base URL if provided // For Azure: Endpoint
+  modelNames?: string[]; // Chosen model names (NOT used for Azure OpenAI)
   createdAt?: number; // Timestamp in milliseconds when the provider was created
+  // Azure Specific Fields:
+  azureDeploymentNames?: string[]; // Azure deployment names array
+  azureApiVersion?: string;
 }
 
 // Interface for storing multiple LLM provider configurations
@@ -41,6 +44,17 @@ const storage = createStorage<LLMKeyRecord>(
 // Helper function to determine provider type from provider name
 // Make sure to update this function if you add a new provider type
 export function getProviderTypeByProviderId(providerId: string): ProviderTypeEnum {
+  // Check if this is an Azure provider (either the main one or one with a custom ID)
+  if (providerId === ProviderTypeEnum.AzureOpenAI) {
+    return ProviderTypeEnum.AzureOpenAI;
+  }
+
+  // Handle custom Azure providers with IDs like azure_openai_2
+  if (typeof providerId === 'string' && providerId.startsWith(`${ProviderTypeEnum.AzureOpenAI}_`)) {
+    return ProviderTypeEnum.AzureOpenAI;
+  }
+
+  // Handle standard provider types
   switch (providerId) {
     case ProviderTypeEnum.OpenAI:
     case ProviderTypeEnum.Anthropic:
@@ -48,6 +62,7 @@ export function getProviderTypeByProviderId(providerId: string): ProviderTypeEnu
     case ProviderTypeEnum.Gemini:
     case ProviderTypeEnum.Grok:
     case ProviderTypeEnum.Ollama:
+    case ProviderTypeEnum.OpenRouter:
       return providerId;
     default:
       return ProviderTypeEnum.CustomOpenAI;
@@ -70,13 +85,16 @@ export function getDefaultDisplayNameFromProviderId(providerId: string): string
       return 'Grok';
     case ProviderTypeEnum.Ollama:
       return 'Ollama';
+    case ProviderTypeEnum.AzureOpenAI:
+      return 'Azure OpenAI';
+    case ProviderTypeEnum.OpenRouter:
+      return 'OpenRouter';
     default:
       return providerId; // Use the provider id as display name for custom providers by default
   }
 }
 
 // Get default configuration for built-in providers
-// Make sure to update this function if you add a new provider type
 export function getDefaultProviderConfig(providerId: string): ProviderConfig {
   switch (providerId) {
     case ProviderTypeEnum.OpenAI:
@@ -84,10 +102,12 @@ export function getDefaultProviderConfig(providerId: string): ProviderConfig {
     case ProviderTypeEnum.DeepSeek:
     case ProviderTypeEnum.Gemini:
     case ProviderTypeEnum.Grok:
+    case ProviderTypeEnum.OpenRouter: // OpenRouter uses modelNames
       return {
         apiKey: '',
         name: getDefaultDisplayNameFromProviderId(providerId),
         type: providerId,
+        baseUrl: providerId === ProviderTypeEnum.OpenRouter ? 'https://openrouter.ai/api/v1' : undefined,
         modelNames: [...(llmProviderModelNames[providerId] || [])],
         createdAt: Date.now(),
       };
@@ -97,17 +117,28 @@ export function getDefaultProviderConfig(providerId: string): ProviderConfig {
         apiKey: 'ollama', // Set default API key for Ollama
         name: getDefaultDisplayNameFromProviderId(ProviderTypeEnum.Ollama),
         type: ProviderTypeEnum.Ollama,
-        modelNames: [],
+        modelNames: [], // Ollama uses modelNames (user adds them)
         baseUrl: 'http://localhost:11434',
         createdAt: Date.now(),
       };
-    default:
+    case ProviderTypeEnum.AzureOpenAI:
+      return {
+        apiKey: '', // User needs to provide API Key
+        name: getDefaultDisplayNameFromProviderId(ProviderTypeEnum.AzureOpenAI),
+        type: ProviderTypeEnum.AzureOpenAI,
+        baseUrl: '', // User needs to provide Azure endpoint
+        // modelNames: [], // Not used for Azure configuration
+        azureDeploymentNames: [], // Azure deployment names
+        azureApiVersion: '2024-02-15-preview', // Provide a common default API version
+        createdAt: Date.now(),
+      };
+    default: // Handles CustomOpenAI
       return {
         apiKey: '',
         name: getDefaultDisplayNameFromProviderId(providerId),
         type: ProviderTypeEnum.CustomOpenAI,
         baseUrl: '',
-        modelNames: [],
+        modelNames: [], // Custom providers use modelNames
         createdAt: Date.now(),
       };
   }
@@ -123,20 +154,53 @@ export function getDefaultAgentModelParams(providerId: string, agentName: AgentN
 
 // Helper function to ensure backward compatibility for provider configs
 function ensureBackwardCompatibility(providerId: string, config: ProviderConfig): ProviderConfig {
+  // Log input config
+  // console.log(`[ensureBackwardCompatibility] Input for ${providerId}:`, JSON.stringify(config));
+
   const updatedConfig = { ...config };
+
+  // Ensure name exists
   if (!updatedConfig.name) {
     updatedConfig.name = getDefaultDisplayNameFromProviderId(providerId);
   }
+  // Ensure type exists
   if (!updatedConfig.type) {
     updatedConfig.type = getProviderTypeByProviderId(providerId);
   }
-  if (!updatedConfig.modelNames) {
-    updatedConfig.modelNames = llmProviderModelNames[providerId as keyof typeof llmProviderModelNames] || [];
+
+  // Handle Azure specifics
+  if (updatedConfig.type === ProviderTypeEnum.AzureOpenAI) {
+    // Ensure Azure fields exist, provide defaults if missing
+    if (updatedConfig.azureApiVersion === undefined) {
+      // console.log(`[ensureBackwardCompatibility] Adding default azureApiVersion for ${providerId}`);
+      updatedConfig.azureApiVersion = '2024-02-15-preview';
+    }
+
+    // Initialize azureDeploymentNames array if it doesn't exist yet
+    if (!updatedConfig.azureDeploymentNames) {
+      updatedConfig.azureDeploymentNames = [];
+    }
+
+    // CRITICAL: Delete modelNames if it exists for Azure type to clean up old configs
+    if (Object.prototype.hasOwnProperty.call(updatedConfig, 'modelNames')) {
+      // console.log(`[ensureBackwardCompatibility] Deleting modelNames for Azure config ${providerId}`);
+      delete updatedConfig.modelNames;
+    }
+  } else {
+    // Ensure modelNames exists ONLY for non-Azure types
+    if (!updatedConfig.modelNames) {
+      // console.log(`[ensureBackwardCompatibility] Adding default modelNames for non-Azure ${providerId}`);
+      updatedConfig.modelNames = llmProviderModelNames[providerId as keyof typeof llmProviderModelNames] || [];
+    }
   }
+
+  // Ensure createdAt exists
   if (!updatedConfig.createdAt) {
-    // if createdAt is not set, set it to "03/04/2025" for backward compatibility
     updatedConfig.createdAt = new Date('03/04/2025').getTime();
   }
+
+  // Log output config
+  // console.log(`[ensureBackwardCompatibility] Output for ${providerId}:`, JSON.stringify(updatedConfig));
   return updatedConfig;
 }
 
@@ -151,19 +215,51 @@ export const llmProviderStore: LLMProviderStorage = {
       throw new Error('API key must be provided (can be empty for local models)');
     }
 
-    if (!config.modelNames) {
-      throw new Error('Model names must be provided');
+    const providerType = config.type || getProviderTypeByProviderId(providerId);
+
+    if (providerType === ProviderTypeEnum.AzureOpenAI) {
+      if (!config.baseUrl?.trim()) {
+        throw new Error('Azure Endpoint (baseUrl) is required');
+      }
+      if (!config.azureDeploymentNames || config.azureDeploymentNames.length === 0) {
+        throw new Error('At least one Azure Deployment Name is required');
+      }
+      if (!config.azureApiVersion?.trim()) {
+        throw new Error('Azure API Version is required');
+      }
+      if (!config.apiKey?.trim()) {
+        throw new Error('API Key is required for Azure OpenAI');
+      }
+    } else if (providerType !== ProviderTypeEnum.CustomOpenAI && providerType !== ProviderTypeEnum.Ollama) {
+      if (!config.apiKey?.trim()) {
+        throw new Error(`API Key is required for ${getDefaultDisplayNameFromProviderId(providerId)}`);
+      }
+    }
+
+    if (providerType !== ProviderTypeEnum.AzureOpenAI) {
+      if (!config.modelNames || config.modelNames.length === 0) {
+        console.warn(`Provider ${providerId} of type ${providerType} is being saved without model names.`);
+      }
     }
 
-    // Ensure backward compatibility by filling in missing fields
     const completeConfig: ProviderConfig = {
-      ...config,
+      apiKey: config.apiKey || '',
+      baseUrl: config.baseUrl,
       name: config.name || getDefaultDisplayNameFromProviderId(providerId),
-      type: config.type || getProviderTypeByProviderId(providerId),
-      modelNames: config.modelNames,
+      type: providerType,
       createdAt: config.createdAt || Date.now(),
+      ...(providerType === ProviderTypeEnum.AzureOpenAI
+        ? {
+            azureDeploymentNames: config.azureDeploymentNames || [],
+            azureApiVersion: config.azureApiVersion,
+          }
+        : {
+            modelNames: config.modelNames || [],
+          }),
     };
 
+    console.log(`[llmProviderStore.setProvider] Saving config for ${providerId}:`, JSON.stringify(completeConfig));
+
     const current = (await storage.get()) || { providers: {} };
     await storage.set({
       providers: {

+ 42 - 0
packages/storage/lib/settings/types.ts

@@ -15,6 +15,8 @@ export enum ProviderTypeEnum {
   Gemini = 'gemini',
   Grok = 'grok',
   Ollama = 'ollama',
+  AzureOpenAI = 'azure_openai',
+  OpenRouter = 'openrouter',
   CustomOpenAI = 'custom_openai',
 }
 
@@ -31,6 +33,18 @@ export const llmProviderModelNames = {
   ],
   [ProviderTypeEnum.Grok]: ['grok-2', 'grok-2-vision'],
   [ProviderTypeEnum.Ollama]: [],
+  [ProviderTypeEnum.AzureOpenAI]: ['gpt-4o', 'gpt-4o-mini', 'o3-mini', 'gpt-4.1', 'gpt-4.1-mini', 'gpt-4.1-nano'],
+  [ProviderTypeEnum.OpenRouter]: [
+    'openai/gpt-4o-2024-11-20',
+    'openai/gpt-4.1',
+    'openai/gpt-4.1-mini',
+    'openai/gpt-4.1-nano',
+    'openai/o4-mini',
+    'anthropic/claude-3.5-sonnet',
+    'anthropic/claude-3.7-sonnet',
+    'google/gemini-2.0-flash-001',
+    'deepseek/deepseek-chat-v3-0324:free',
+  ],
   // Custom OpenAI providers don't have predefined models as they are user-defined
 };
 
@@ -106,4 +120,32 @@ export const llmProviderParameters = {
       topP: 0.001,
     },
   },
+  [ProviderTypeEnum.AzureOpenAI]: {
+    [AgentNameEnum.Planner]: {
+      temperature: 0.01,
+      topP: 0.001,
+    },
+    [AgentNameEnum.Navigator]: {
+      temperature: 0,
+      topP: 0.001,
+    },
+    [AgentNameEnum.Validator]: {
+      temperature: 0,
+      topP: 0.001,
+    },
+  },
+  [ProviderTypeEnum.OpenRouter]: {
+    [AgentNameEnum.Planner]: {
+      temperature: 0.01,
+      topP: 0.001,
+    },
+    [AgentNameEnum.Navigator]: {
+      temperature: 0,
+      topP: 0.001,
+    },
+    [AgentNameEnum.Validator]: {
+      temperature: 0,
+      topP: 0.001,
+    },
+  },
 };

File diff ditekan karena terlalu besar
+ 698 - 286
pages/options/src/components/ModelSettings.tsx


+ 29 - 29
pnpm-lock.yaml

@@ -118,25 +118,25 @@ importers:
         version: link:../packages/storage
       '@langchain/anthropic':
         specifier: ^0.3.12
-        version: 0.3.12(@langchain/core@0.3.37(openai@4.82.0(ws@8.18.0)(zod@3.24.1)))
+        version: 0.3.12(@langchain/core@0.3.44(openai@4.82.0(ws@8.18.0)(zod@3.24.1)))
       '@langchain/core':
         specifier: ^0.3.37
-        version: 0.3.37(openai@4.82.0(ws@8.18.0)(zod@3.24.1))
+        version: 0.3.44(openai@4.82.0(ws@8.18.0)(zod@3.24.1))
       '@langchain/deepseek':
         specifier: ^0.0.1
-        version: 0.0.1(@langchain/core@0.3.37(openai@4.82.0(ws@8.18.0)(zod@3.24.1)))(ws@8.18.0)
+        version: 0.0.1(@langchain/core@0.3.44(openai@4.82.0(ws@8.18.0)(zod@3.24.1)))(ws@8.18.0)
       '@langchain/google-genai':
         specifier: 0.1.11
-        version: 0.1.11(@langchain/core@0.3.37(openai@4.82.0(ws@8.18.0)(zod@3.24.1)))(zod@3.24.1)
+        version: 0.1.11(@langchain/core@0.3.44(openai@4.82.0(ws@8.18.0)(zod@3.24.1)))(zod@3.24.1)
       '@langchain/ollama':
         specifier: ^0.2.0
-        version: 0.2.0(@langchain/core@0.3.37(openai@4.82.0(ws@8.18.0)(zod@3.24.1)))
+        version: 0.2.0(@langchain/core@0.3.44(openai@4.82.0(ws@8.18.0)(zod@3.24.1)))
       '@langchain/openai':
         specifier: ^0.4.2
-        version: 0.4.2(@langchain/core@0.3.37(openai@4.82.0(ws@8.18.0)(zod@3.24.1)))(ws@8.18.0)
+        version: 0.4.2(@langchain/core@0.3.44(openai@4.82.0(ws@8.18.0)(zod@3.24.1)))(ws@8.18.0)
       '@langchain/xai':
         specifier: ^0.0.2
-        version: 0.0.2(@langchain/core@0.3.37(openai@4.82.0(ws@8.18.0)(zod@3.24.1)))(ws@8.18.0)
+        version: 0.0.2(@langchain/core@0.3.44(openai@4.82.0(ws@8.18.0)(zod@3.24.1)))(ws@8.18.0)
       puppeteer-core:
         specifier: 24.1.1
         version: 24.1.1
@@ -619,8 +619,8 @@ packages:
     peerDependencies:
       '@langchain/core': '>=0.2.21 <0.4.0'
 
-  '@langchain/core@0.3.37':
-    resolution: {integrity: sha512-LFk9GqHxcyCFx0oXvCBP7vDZIOUHYzzNU7JR+2ofIMnfkBLzcCKzBLySQDfPtd13PrpGHkaeOeLq8H1Tqi9lSw==}
+  '@langchain/core@0.3.44':
+    resolution: {integrity: sha512-3BsSFf7STvPPZyl2kMANgtVnCUvDdyP4k+koP+nY2Tczd5V+RFkuazIn/JOj/xxy/neZjr4PxFU4BFyF1aKXOA==}
     engines: {node: '>=18'}
 
   '@langchain/deepseek@0.0.1':
@@ -3465,17 +3465,17 @@ snapshots:
       '@jridgewell/resolve-uri': 3.1.2
       '@jridgewell/sourcemap-codec': 1.5.0
 
-  '@langchain/anthropic@0.3.12(@langchain/core@0.3.37(openai@4.82.0(ws@8.18.0)(zod@3.24.1)))':
+  '@langchain/anthropic@0.3.12(@langchain/core@0.3.44(openai@4.82.0(ws@8.18.0)(zod@3.24.1)))':
     dependencies:
       '@anthropic-ai/sdk': 0.32.1
-      '@langchain/core': 0.3.37(openai@4.82.0(ws@8.18.0)(zod@3.24.1))
+      '@langchain/core': 0.3.44(openai@4.82.0(ws@8.18.0)(zod@3.24.1))
       fast-xml-parser: 4.5.0
       zod: 3.24.1
       zod-to-json-schema: 3.24.4(zod@3.24.1)
     transitivePeerDependencies:
       - encoding
 
-  '@langchain/core@0.3.37(openai@4.82.0(ws@8.18.0)(zod@3.24.1))':
+  '@langchain/core@0.3.44(openai@4.82.0(ws@8.18.0)(zod@3.24.1))':
     dependencies:
       '@cfworker/json-schema': 4.1.1
       ansi-styles: 5.2.0
@@ -3487,39 +3487,39 @@ snapshots:
       p-queue: 6.6.2
       p-retry: 4.6.2
       uuid: 10.0.0
-      zod: 3.24.1
-      zod-to-json-schema: 3.24.4(zod@3.24.1)
+      zod: 3.24.2
+      zod-to-json-schema: 3.24.4(zod@3.24.2)
     transitivePeerDependencies:
       - openai
 
-  '@langchain/deepseek@0.0.1(@langchain/core@0.3.37(openai@4.82.0(ws@8.18.0)(zod@3.24.1)))(ws@8.18.0)':
+  '@langchain/deepseek@0.0.1(@langchain/core@0.3.44(openai@4.82.0(ws@8.18.0)(zod@3.24.1)))(ws@8.18.0)':
     dependencies:
-      '@langchain/core': 0.3.37(openai@4.82.0(ws@8.18.0)(zod@3.24.1))
-      '@langchain/openai': 0.4.4(@langchain/core@0.3.37(openai@4.82.0(ws@8.18.0)(zod@3.24.1)))(ws@8.18.0)
+      '@langchain/core': 0.3.44(openai@4.82.0(ws@8.18.0)(zod@3.24.1))
+      '@langchain/openai': 0.4.4(@langchain/core@0.3.44(openai@4.82.0(ws@8.18.0)(zod@3.24.1)))(ws@8.18.0)
       zod: 3.24.2
     transitivePeerDependencies:
       - encoding
       - ws
 
-  '@langchain/google-genai@0.1.11(@langchain/core@0.3.37(openai@4.82.0(ws@8.18.0)(zod@3.24.1)))(zod@3.24.1)':
+  '@langchain/google-genai@0.1.11(@langchain/core@0.3.44(openai@4.82.0(ws@8.18.0)(zod@3.24.1)))(zod@3.24.1)':
     dependencies:
       '@google/generative-ai': 0.21.0
-      '@langchain/core': 0.3.37(openai@4.82.0(ws@8.18.0)(zod@3.24.1))
+      '@langchain/core': 0.3.44(openai@4.82.0(ws@8.18.0)(zod@3.24.1))
       zod-to-json-schema: 3.24.4(zod@3.24.1)
     transitivePeerDependencies:
       - zod
 
-  '@langchain/ollama@0.2.0(@langchain/core@0.3.37(openai@4.82.0(ws@8.18.0)(zod@3.24.1)))':
+  '@langchain/ollama@0.2.0(@langchain/core@0.3.44(openai@4.82.0(ws@8.18.0)(zod@3.24.1)))':
     dependencies:
-      '@langchain/core': 0.3.37(openai@4.82.0(ws@8.18.0)(zod@3.24.1))
+      '@langchain/core': 0.3.44(openai@4.82.0(ws@8.18.0)(zod@3.24.1))
       ollama: 0.5.14
       uuid: 10.0.0
       zod: 3.24.1
       zod-to-json-schema: 3.24.4(zod@3.24.1)
 
-  '@langchain/openai@0.4.2(@langchain/core@0.3.37(openai@4.82.0(ws@8.18.0)(zod@3.24.1)))(ws@8.18.0)':
+  '@langchain/openai@0.4.2(@langchain/core@0.3.44(openai@4.82.0(ws@8.18.0)(zod@3.24.1)))(ws@8.18.0)':
     dependencies:
-      '@langchain/core': 0.3.37(openai@4.82.0(ws@8.18.0)(zod@3.24.1))
+      '@langchain/core': 0.3.44(openai@4.82.0(ws@8.18.0)(zod@3.24.1))
       js-tiktoken: 1.0.17
       openai: 4.82.0(ws@8.18.0)(zod@3.24.1)
       zod: 3.24.1
@@ -3528,9 +3528,9 @@ snapshots:
       - encoding
       - ws
 
-  '@langchain/openai@0.4.4(@langchain/core@0.3.37(openai@4.82.0(ws@8.18.0)(zod@3.24.1)))(ws@8.18.0)':
+  '@langchain/openai@0.4.4(@langchain/core@0.3.44(openai@4.82.0(ws@8.18.0)(zod@3.24.1)))(ws@8.18.0)':
     dependencies:
-      '@langchain/core': 0.3.37(openai@4.82.0(ws@8.18.0)(zod@3.24.1))
+      '@langchain/core': 0.3.44(openai@4.82.0(ws@8.18.0)(zod@3.24.1))
       js-tiktoken: 1.0.17
       openai: 4.82.0(ws@8.18.0)(zod@3.24.2)
       zod: 3.24.2
@@ -3539,10 +3539,10 @@ snapshots:
       - encoding
       - ws
 
-  '@langchain/xai@0.0.2(@langchain/core@0.3.37(openai@4.82.0(ws@8.18.0)(zod@3.24.1)))(ws@8.18.0)':
+  '@langchain/xai@0.0.2(@langchain/core@0.3.44(openai@4.82.0(ws@8.18.0)(zod@3.24.1)))(ws@8.18.0)':
     dependencies:
-      '@langchain/core': 0.3.37(openai@4.82.0(ws@8.18.0)(zod@3.24.1))
-      '@langchain/openai': 0.4.4(@langchain/core@0.3.37(openai@4.82.0(ws@8.18.0)(zod@3.24.1)))(ws@8.18.0)
+      '@langchain/core': 0.3.44(openai@4.82.0(ws@8.18.0)(zod@3.24.1))
+      '@langchain/openai': 0.4.4(@langchain/core@0.3.44(openai@4.82.0(ws@8.18.0)(zod@3.24.1)))(ws@8.18.0)
       zod: 3.24.2
     transitivePeerDependencies:
       - encoding
@@ -4086,7 +4086,7 @@ snapshots:
 
   ast-types@0.13.4:
     dependencies:
-      tslib: 2.7.0
+      tslib: 2.8.1
 
   asynckit@0.4.0: {}
 

Beberapa file tidak ditampilkan karena terlalu banyak file yang berubah dalam diff ini