Bläddra i källkod

add built-in deepseek official api support

alexchenzl 5 månader sedan
förälder
incheckning
1935e0fd83

+ 3 - 1
chrome-extension/package.json

@@ -20,13 +20,15 @@
     "@extension/storage": "workspace:*",
     "@langchain/anthropic": "^0.3.12",
     "@langchain/core": "^0.3.37",
+    "@langchain/deepseek": "^0.0.1",
     "@langchain/google-genai": "0.1.11",
     "@langchain/ollama": "^0.2.0",
     "@langchain/openai": "^0.4.2",
     "@langchain/xai": "^0.0.2",
     "puppeteer-core": "24.1.1",
     "webextension-polyfill": "^0.12.0",
-    "zod": "^3.24.1"
+    "zod": "^3.24.1",
+    "zod-to-json-schema": "^3.24.4"
   },
   "overrides": {
     "@langchain/core": "0.3.0"

+ 49 - 12
chrome-extension/src/background/agent/agents/base.ts

@@ -2,9 +2,10 @@ import type { z } from 'zod';
 import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
 import type { AgentContext, AgentOutput } from '../types';
 import type { BasePrompt } from '../prompts/base';
-import { type BaseMessage, AIMessage, ToolMessage } from '@langchain/core/messages';
+import { type BaseMessage, AIMessage, ToolMessage, HumanMessage } from '@langchain/core/messages';
 import { createLogger } from '@src/background/log';
 import type { Action } from '../actions/builder';
+import { convertMessagesForNonFunctionCallingModels, mergeSuccessiveMessages } from '../messages/service';
 
 const logger = createLogger('agent');
 
@@ -107,6 +108,27 @@ export abstract class BaseAgent<T extends z.ZodType, M = unknown> {
     return text.replace(THINK_TAGS, '');
   }
 
+  /**
+   * Convert input messages to a format that is compatible with the model
+   * @param inputMessages - The input messages to convert
+   * @param modelName - The optional model name to determine conversion strategy
+   * @returns The converted input messages
+   */
+  protected convertInputMessages(inputMessages: BaseMessage[], modelName?: string): BaseMessage[] {
+    if (!modelName) {
+      return inputMessages;
+    }
+
+    if (modelName === 'deepseek-reasoner' || modelName.startsWith('deepseek-r1')) {
+      const convertedInputMessages = convertMessagesForNonFunctionCallingModels(inputMessages);
+      let mergedInputMessages = mergeSuccessiveMessages(convertedInputMessages, HumanMessage);
+      mergedInputMessages = mergeSuccessiveMessages(mergedInputMessages, AIMessage);
+      return mergedInputMessages;
+    }
+
+    return inputMessages;
+  }
+
   async invoke(inputMessages: BaseMessage[]): Promise<this['ModelOutput']> {
     // Use structured output
     if (this.withStructuredOutput) {
@@ -115,17 +137,25 @@ export abstract class BaseAgent<T extends z.ZodType, M = unknown> {
         name: this.modelOutputToolName,
       });
 
-      const response = await structuredLlm.invoke(inputMessages, {
-        ...this.callOptions,
-      });
-      if (response.parsed) {
-        return response.parsed;
+      try {
+        const response = await structuredLlm.invoke(inputMessages, {
+          ...this.callOptions,
+        });
+
+        if (response.parsed) {
+          return response.parsed;
+        }
+        logger.error('Failed to parse response', response);
+        throw new Error('Could not parse response with structured output');
+      } catch (error) {
+        const errorMessage = `Failed to invoke ${this.modelName} with structured output: ${error}`;
+        throw new Error(errorMessage);
       }
-      throw new Error('Could not parse response');
     }
 
     // Without structured output support, need to extract JSON from model output manually
-    const response = await this.chatLLM.invoke(inputMessages, {
+    const convertedInputMessages = this.convertInputMessages(inputMessages, this.modelName);
+    const response = await this.chatLLM.invoke(convertedInputMessages, {
       ...this.callOptions,
     });
     if (typeof response.content === 'string') {
@@ -137,10 +167,12 @@ export abstract class BaseAgent<T extends z.ZodType, M = unknown> {
           return parsed;
         }
       } catch (error) {
-        logger.error('Could not parse response', response);
-        throw new Error('Could not parse response');
+        const errorMessage = `Failed to extract JSON from response: ${error}`;
+        throw new Error(errorMessage);
       }
     }
+    const errorMessage = `Failed to parse response: ${response}`;
+    logger.error(errorMessage);
     throw new Error('Could not parse response');
   }
 
@@ -150,7 +182,12 @@ export abstract class BaseAgent<T extends z.ZodType, M = unknown> {
   // Helper method to validate metadata
   protected validateModelOutput(data: unknown): this['ModelOutput'] | undefined {
     if (!this.modelOutputSchema || !data) return undefined;
-    return this.modelOutputSchema.parse(data);
+    try {
+      return this.modelOutputSchema.parse(data);
+    } catch (error) {
+      logger.error('validateModelOutput', error);
+      throw new Error('Could not validate model output');
+    }
   }
 
   // Add the model output to the memory
@@ -202,7 +239,7 @@ export abstract class BaseAgent<T extends z.ZodType, M = unknown> {
       return JSON.parse(cleanedContent);
     } catch (e) {
       logger.warning(`Failed to parse model output: ${content} ${e}`);
-      throw new Error('Could not parse response.');
+      throw new Error('Failed to extract JSON from model output.');
     }
   }
 }

+ 35 - 23
chrome-extension/src/background/agent/agents/navigator.ts

@@ -61,6 +61,8 @@ export class NavigatorAgent extends BaseAgent<z.ZodType, NavigatorResult> {
     this.actionRegistry = actionRegistry;
 
     this.jsonSchema = this.modelName.startsWith('gemini') ? geminiNavigatorOutputSchema : jsonNavigatorOutputSchema;
+
+    // logger.info('Navigator zod schema', JSON.stringify(zodToJsonSchema(this.modelOutputSchema), null, 2));
   }
 
   async invoke(inputMessages: BaseMessage[]): Promise<this['ModelOutput']> {
@@ -68,36 +70,46 @@ export class NavigatorAgent extends BaseAgent<z.ZodType, NavigatorResult> {
     if (this.withStructuredOutput) {
       const structuredLlm = this.chatLLM.withStructuredOutput(this.jsonSchema, {
         includeRaw: true,
+        name: this.modelOutputToolName,
       });
 
-      const response = await structuredLlm.invoke(inputMessages, {
-        ...this.callOptions,
-      });
-
-      if (response.parsed) {
-        return response.parsed;
-      }
-      throw new Error('Could not parse response');
-    }
-
-    // Without structured output support, need to extract JSON from model output manually
-    const response = await this.chatLLM.invoke(inputMessages, {
-      ...this.callOptions,
-    });
-    if (typeof response.content === 'string') {
-      response.content = this.removeThinkTags(response.content);
+      let response = undefined;
       try {
-        const extractedJson = this.extractJsonFromModelOutput(response.content);
-        const parsed = this.validateModelOutput(extractedJson);
-        if (parsed) {
-          return parsed;
+        response = await structuredLlm.invoke(inputMessages, {
+          ...this.callOptions,
+        });
+
+        if (response.parsed) {
+          return response.parsed;
         }
       } catch (error) {
-        logger.error('Could not parse response', response);
-        throw new Error('Could not parse response');
+        const errorMessage = `Failed to invoke ${this.modelName} with structured output: ${error}`;
+        throw new Error(errorMessage);
       }
+
+      // Use type assertion to access the properties
+      const rawResponse = response.raw as BaseMessage & {
+        tool_calls?: Array<{
+          args: {
+            currentState: typeof agentBrainSchema._type;
+            action: z.infer<ReturnType<typeof buildDynamicActionSchema>>;
+          };
+        }>;
+      };
+
+      // sometimes LLM returns an empty content, but with one or more tool calls, so we need to check the tool calls
+      if (rawResponse.tool_calls && rawResponse.tool_calls.length > 0) {
+        logger.info('Navigator structuredLlm tool call with empty content', rawResponse.tool_calls);
+        // only use the first tool call
+        const toolCall = rawResponse.tool_calls[0];
+        return {
+          current_state: toolCall.args.currentState,
+          action: [...toolCall.args.action],
+        };
+      }
+      throw new Error('Could not parse response');
     }
-    throw new Error('Could not parse response');
+    throw new Error('Navigator needs to work with LLM that supports tool calling');
   }
 
   async execute(): Promise<AgentOutput<NavigatorResult>> {

+ 10 - 3
chrome-extension/src/background/agent/helper.ts

@@ -5,6 +5,7 @@ import { ChatGoogleGenerativeAI } from '@langchain/google-genai';
 import { ChatXAI } from '@langchain/xai';
 import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
 import { ChatOllama } from '@langchain/ollama';
+import { ChatDeepSeek } from '@langchain/deepseek';
 
 const maxTokens = 1024 * 4;
 
@@ -54,9 +55,6 @@ export function createChatModel(providerConfig: ProviderConfig, modelConfig: Mod
   const temperature = (modelConfig.parameters?.temperature ?? 0.1) as number;
   const topP = (modelConfig.parameters?.topP ?? 0.1) as number;
 
-  console.log('providerConfig', providerConfig);
-  console.log('modelConfig', modelConfig);
-
   switch (modelConfig.provider) {
     case ProviderTypeEnum.OpenAI: {
       return createOpenAIChatModel(providerConfig, modelConfig);
@@ -72,6 +70,15 @@ export function createChatModel(providerConfig: ProviderConfig, modelConfig: Mod
       };
       return new ChatAnthropic(args);
     }
+    case ProviderTypeEnum.DeepSeek: {
+      const args = {
+        model: modelConfig.modelName,
+        apiKey: providerConfig.apiKey,
+        temperature,
+        topP,
+      };
+      return new ChatDeepSeek(args) as BaseChatModel;
+    }
     case ProviderTypeEnum.Gemini: {
       const args = {
         model: modelConfig.modelName,

+ 65 - 65
chrome-extension/src/background/agent/messages/service.ts

@@ -325,80 +325,80 @@ export default class MessageManager {
       `Added message with ${finalMsg.metadata.inputTokens} tokens - total tokens now: ${this.history.totalTokens}/${this.maxInputTokens} - total messages: ${this.history.messages.length}`,
     );
   }
+}
 
-  /**
-   * Converts messages for non-function-calling models
-   * @param inputMessages - The BaseMessage objects to convert
-   * @returns The converted BaseMessage objects
-   */
-  public convertMessagesForNonFunctionCallingModels(inputMessages: BaseMessage[]): BaseMessage[] {
-    return inputMessages.map(message => {
-      if (message instanceof HumanMessage || message instanceof SystemMessage) {
-        return message;
-      }
-      if (message instanceof ToolMessage) {
-        return new HumanMessage({
-          content: `Tool Response: ${message.content}`,
-        });
-      }
-      if (message instanceof AIMessage) {
-        // if it's an AIMessage with tool_calls, convert it to a normal AIMessage
-        if ('tool_calls' in message && message.tool_calls) {
-          const toolCallsStr = message.tool_calls
-            .map(tc => {
-              if (
-                'function' in tc &&
-                typeof tc.function === 'object' &&
-                tc.function &&
-                'name' in tc.function &&
-                'arguments' in tc.function
-              ) {
-                // For Groq, we need to format function calls differently
-                return `Function: ${tc.function.name}\nArguments: ${JSON.stringify(tc.function.arguments)}`;
-              }
-              return `Tool Call: ${JSON.stringify(tc)}`;
-            })
-            .join('\n');
-          return new AIMessage({ content: toolCallsStr });
-        }
-        return message;
+/**
+ * Converts messages for non-function-calling models
+ * @param inputMessages - The BaseMessage objects to convert
+ * @returns The converted BaseMessage objects
+ */
+export function convertMessagesForNonFunctionCallingModels(inputMessages: BaseMessage[]): BaseMessage[] {
+  return inputMessages.map(message => {
+    if (message instanceof HumanMessage || message instanceof SystemMessage) {
+      return message;
+    }
+    if (message instanceof ToolMessage) {
+      return new HumanMessage({
+        content: `Tool Response: ${message.content}`,
+      });
+    }
+    if (message instanceof AIMessage) {
+      // if it's an AIMessage with tool_calls, convert it to a normal AIMessage
+      if ('tool_calls' in message && message.tool_calls) {
+        const toolCallsStr = message.tool_calls
+          .map(tc => {
+            if (
+              'function' in tc &&
+              typeof tc.function === 'object' &&
+              tc.function &&
+              'name' in tc.function &&
+              'arguments' in tc.function
+            ) {
+              // For Groq, we need to format function calls differently
+              return `Function: ${tc.function.name}\nArguments: ${JSON.stringify(tc.function.arguments)}`;
+            }
+            return `Tool Call: ${JSON.stringify(tc)}`;
+          })
+          .join('\n');
+        return new AIMessage({ content: toolCallsStr });
       }
-      throw new Error(`Unknown message type: ${message.constructor.name}`);
-    });
-  }
+      return message;
+    }
+    throw new Error(`Unknown message type: ${message.constructor.name}`);
+  });
+}
 
-  /**
-   * Some models like deepseek-reasoner dont allow multiple human messages in a row. This function merges them into one."
-   * @param messages - The BaseMessage objects to merge
-   * @param classToMerge - The class of the messages to merge
-   * @returns The merged BaseMessage objects
-   */
-  public mergeSuccessiveMessages(messages: BaseMessage[], classToMerge: typeof BaseMessage): BaseMessage[] {
-    const mergedMessages: BaseMessage[] = [];
-    let streak = 0;
-
-    for (const message of messages) {
-      if (message instanceof classToMerge) {
-        streak += 1;
-        if (streak > 1) {
-          const lastMessage = mergedMessages[mergedMessages.length - 1];
-          if (Array.isArray(message.content)) {
-            const firstContent = message.content[0];
-            if ('text' in firstContent) {
-              lastMessage.content += firstContent.text;
-            }
-          } else {
-            lastMessage.content += message.content;
+/**
+ * Some models like deepseek-reasoner dont allow multiple human messages in a row. This function merges them into one."
+ * @param messages - The BaseMessage objects to merge
+ * @param classToMerge - The class of the messages to merge
+ * @returns The merged BaseMessage objects
+ */
+export function mergeSuccessiveMessages(messages: BaseMessage[], classToMerge: typeof BaseMessage): BaseMessage[] {
+  const mergedMessages: BaseMessage[] = [];
+  let streak = 0;
+
+  for (const message of messages) {
+    if (message instanceof classToMerge) {
+      streak += 1;
+      if (streak > 1) {
+        const lastMessage = mergedMessages[mergedMessages.length - 1];
+        if (Array.isArray(message.content)) {
+          const firstContent = message.content[0];
+          if ('text' in firstContent) {
+            lastMessage.content += firstContent.text;
           }
         } else {
-          mergedMessages.push(message);
+          lastMessage.content += message.content;
         }
       } else {
         mergedMessages.push(message);
-        streak = 0;
       }
+    } else {
+      mergedMessages.push(message);
+      streak = 0;
     }
-
-    return mergedMessages;
   }
+
+  return mergedMessages;
 }

+ 6 - 6
chrome-extension/src/background/agent/prompts/planner.ts

@@ -34,12 +34,12 @@ RESPONSIBILITIES:
 
 RESPONSE FORMAT: Your must always respond with a valid JSON object with the following fields:
 {
-    "observation": "Brief analysis of the current state and what has been done so far",
-    "done": "true or false [boolean type], whether further steps are needed to complete the ultimate task",
-    "challenges": "List any potential challenges or roadblocks",
-    "next_steps": "List 2-3 high-level next steps to take, each step should start with a new line",
-    "reasoning": "Explain your reasoning for the suggested next steps",
-    "web_task": "true or false [boolean type], whether the ultimate task is related to browsing the web"
+    "observation": "[string type], brief analysis of the current state and what has been done so far",
+    "done": "[boolean type], whether further steps are needed to complete the ultimate task",
+    "challenges": "[string type], list any potential challenges or roadblocks",
+    "next_steps": "[string type], list 2-3 high-level next steps to take, each step should start with a new line",
+    "reasoning": "[string type], explain your reasoning for the suggested next steps",
+    "web_task": "[boolean type], whether the ultimate task is related to browsing the web"
 }
 
 NOTE:

+ 4 - 0
packages/storage/lib/settings/llmProviders.ts

@@ -44,6 +44,7 @@ export function getProviderTypeByProviderId(providerId: string): ProviderTypeEnu
   switch (providerId) {
     case ProviderTypeEnum.OpenAI:
     case ProviderTypeEnum.Anthropic:
+    case ProviderTypeEnum.DeepSeek:
     case ProviderTypeEnum.Gemini:
     case ProviderTypeEnum.Grok:
     case ProviderTypeEnum.Ollama:
@@ -61,6 +62,8 @@ export function getDefaultDisplayNameFromProviderId(providerId: string): string
       return 'OpenAI';
     case ProviderTypeEnum.Anthropic:
       return 'Anthropic';
+    case ProviderTypeEnum.DeepSeek:
+      return 'DeepSeek';
     case ProviderTypeEnum.Gemini:
       return 'Gemini';
     case ProviderTypeEnum.Grok:
@@ -78,6 +81,7 @@ export function getDefaultProviderConfig(providerId: string): ProviderConfig {
   switch (providerId) {
     case ProviderTypeEnum.OpenAI:
     case ProviderTypeEnum.Anthropic:
+    case ProviderTypeEnum.DeepSeek:
     case ProviderTypeEnum.Gemini:
     case ProviderTypeEnum.Grok:
       return {

+ 2 - 0
packages/storage/lib/settings/types.ts

@@ -11,6 +11,7 @@ export enum AgentNameEnum {
 export enum ProviderTypeEnum {
   OpenAI = 'openai',
   Anthropic = 'anthropic',
+  DeepSeek = 'deepseek',
   Gemini = 'gemini',
   Grok = 'grok',
   Ollama = 'ollama',
@@ -21,6 +22,7 @@ export enum ProviderTypeEnum {
 export const llmProviderModelNames = {
   [ProviderTypeEnum.OpenAI]: ['gpt-4o', 'gpt-4o-mini', 'o1', 'o3-mini'],
   [ProviderTypeEnum.Anthropic]: ['claude-3-7-sonnet-latest', 'claude-3-5-sonnet-latest', 'claude-3-5-haiku-latest'],
+  [ProviderTypeEnum.DeepSeek]: ['deepseek-chat', 'deepseek-reasoner'],
   [ProviderTypeEnum.Gemini]: [
     'gemini-2.0-flash',
     'gemini-2.0-flash-lite',

+ 1 - 1
pages/side-panel/src/templates.tsx

@@ -15,6 +15,6 @@ export const defaultTemplates = [
     id: '3',
     title: '📚 Explore AI Papers',
     content:
-      '- Go to https://huggingface.co/papers and click through each of the top 3 upvoted papers.\n- For each paper:\n  - Record the title, URL and upvotes\n  - Summarise the abstract section\n  - Finally, compile together a summary of all 3 papers, ranked by upvotes',
+      '- Go to https://huggingface.co/papers and click through each of the top 3 upvoted papers.\n- For each paper:\n  - Record the title, URL and upvotes\n  - Summarise the abstract section\n- Finally, compile together a summary of all 3 papers, ranked by upvotes',
   },
 ];

+ 31 - 10
pnpm-lock.yaml

@@ -117,6 +117,9 @@ importers:
       '@langchain/core':
         specifier: ^0.3.37
         version: 0.3.37(openai@4.82.0(ws@8.18.0)(zod@3.24.1))
+      '@langchain/deepseek':
+        specifier: ^0.0.1
+        version: 0.0.1(@langchain/core@0.3.37(openai@4.82.0(ws@8.18.0)(zod@3.24.1)))(ws@8.18.0)
       '@langchain/google-genai':
         specifier: 0.1.11
         version: 0.1.11(@langchain/core@0.3.37(openai@4.82.0(ws@8.18.0)(zod@3.24.1)))(zod@3.24.1)
@@ -138,6 +141,9 @@ importers:
       zod:
         specifier: ^3.24.1
         version: 3.24.1
+      zod-to-json-schema:
+        specifier: ^3.24.4
+        version: 3.24.4(zod@3.24.1)
     devDependencies:
       '@extension/dev-utils':
         specifier: workspace:*
@@ -750,6 +756,12 @@ packages:
     resolution: {integrity: sha512-LFk9GqHxcyCFx0oXvCBP7vDZIOUHYzzNU7JR+2ofIMnfkBLzcCKzBLySQDfPtd13PrpGHkaeOeLq8H1Tqi9lSw==}
     engines: {node: '>=18'}
 
+  '@langchain/deepseek@0.0.1':
+    resolution: {integrity: sha512-jgrbitvV4p7Kqo/Fyni9coCliNXUrJ2XChdR8eHvQg3RL+w13DIQjJn2mrkCrb7v6Is1rI7It2x3yIbADL71Yg==}
+    engines: {node: '>=18'}
+    peerDependencies:
+      '@langchain/core': '>=0.3.0 <0.4.0'
+
   '@langchain/google-genai@0.1.11':
     resolution: {integrity: sha512-puNFwgW+yY3nI358R4wjG7EsCiCIMH0UaIMGcK/GF44opAlf6pBY9iql4CLVrUto1XBI3EETZuNZ9nOhzpnfgQ==}
     engines: {node: '>=18'}
@@ -3407,8 +3419,8 @@ packages:
     resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==}
     engines: {node: '>=10'}
 
-  zod-to-json-schema@3.24.1:
-    resolution: {integrity: sha512-3h08nf3Vw3Wl3PK+q3ow/lIil81IT2Oa7YpQyUUDsEWbXveMesdfK1xBd2RhCkynwZndAxixji/7SYJJowr62w==}
+  zod-to-json-schema@3.24.4:
+    resolution: {integrity: sha512-0uNlcvgabyrni9Ag8Vghj21drk7+7tp7VTwwR7KxxXXc/3pbXz2PHlDgj3cICahgF1kHm4dExBFj7BXrZJXzig==}
     peerDependencies:
       zod: ^3.24.1
 
@@ -3663,7 +3675,7 @@ snapshots:
       '@langchain/core': 0.3.37(openai@4.82.0(ws@8.18.0)(zod@3.24.1))
       fast-xml-parser: 4.5.0
       zod: 3.24.1
-      zod-to-json-schema: 3.24.1(zod@3.24.1)
+      zod-to-json-schema: 3.24.4(zod@3.24.1)
     transitivePeerDependencies:
       - encoding
 
@@ -3680,15 +3692,24 @@ snapshots:
       p-retry: 4.6.2
       uuid: 10.0.0
       zod: 3.24.1
-      zod-to-json-schema: 3.24.1(zod@3.24.1)
+      zod-to-json-schema: 3.24.4(zod@3.24.1)
     transitivePeerDependencies:
       - openai
 
+  '@langchain/deepseek@0.0.1(@langchain/core@0.3.37(openai@4.82.0(ws@8.18.0)(zod@3.24.1)))(ws@8.18.0)':
+    dependencies:
+      '@langchain/core': 0.3.37(openai@4.82.0(ws@8.18.0)(zod@3.24.1))
+      '@langchain/openai': 0.4.4(@langchain/core@0.3.37(openai@4.82.0(ws@8.18.0)(zod@3.24.1)))(ws@8.18.0)
+      zod: 3.24.2
+    transitivePeerDependencies:
+      - encoding
+      - ws
+
   '@langchain/google-genai@0.1.11(@langchain/core@0.3.37(openai@4.82.0(ws@8.18.0)(zod@3.24.1)))(zod@3.24.1)':
     dependencies:
       '@google/generative-ai': 0.21.0
       '@langchain/core': 0.3.37(openai@4.82.0(ws@8.18.0)(zod@3.24.1))
-      zod-to-json-schema: 3.24.1(zod@3.24.1)
+      zod-to-json-schema: 3.24.4(zod@3.24.1)
     transitivePeerDependencies:
       - zod
 
@@ -3698,7 +3719,7 @@ snapshots:
       ollama: 0.5.14
       uuid: 10.0.0
       zod: 3.24.1
-      zod-to-json-schema: 3.24.1(zod@3.24.1)
+      zod-to-json-schema: 3.24.4(zod@3.24.1)
 
   '@langchain/openai@0.4.2(@langchain/core@0.3.37(openai@4.82.0(ws@8.18.0)(zod@3.24.1)))(ws@8.18.0)':
     dependencies:
@@ -3706,7 +3727,7 @@ snapshots:
       js-tiktoken: 1.0.17
       openai: 4.82.0(ws@8.18.0)(zod@3.24.1)
       zod: 3.24.1
-      zod-to-json-schema: 3.24.1(zod@3.24.1)
+      zod-to-json-schema: 3.24.4(zod@3.24.1)
     transitivePeerDependencies:
       - encoding
       - ws
@@ -3717,7 +3738,7 @@ snapshots:
       js-tiktoken: 1.0.17
       openai: 4.82.0(ws@8.18.0)(zod@3.24.2)
       zod: 3.24.2
-      zod-to-json-schema: 3.24.1(zod@3.24.2)
+      zod-to-json-schema: 3.24.4(zod@3.24.2)
     transitivePeerDependencies:
       - encoding
       - ws
@@ -6666,11 +6687,11 @@ snapshots:
 
   yocto-queue@0.1.0: {}
 
-  zod-to-json-schema@3.24.1(zod@3.24.1):
+  zod-to-json-schema@3.24.4(zod@3.24.1):
     dependencies:
       zod: 3.24.1
 
-  zod-to-json-schema@3.24.1(zod@3.24.2):
+  zod-to-json-schema@3.24.4(zod@3.24.2):
     dependencies:
       zod: 3.24.2