helper.ts 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113
  1. import { type ProviderConfig, type ModelConfig, ProviderTypeEnum } from '@extension/storage';
  2. import { ChatOpenAI } from '@langchain/openai';
  3. import { ChatAnthropic } from '@langchain/anthropic';
  4. import { ChatGoogleGenerativeAI } from '@langchain/google-genai';
  5. import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
  6. import { ChatOllama } from '@langchain/ollama';
  7. // create a chat model based on the agent name, the model name and provider
  8. export function createChatModel(providerConfig: ProviderConfig, modelConfig: ModelConfig): BaseChatModel {
  9. const maxTokens = 1024 * 4;
  10. const maxCompletionTokens = 1024 * 4;
  11. const temperature = (modelConfig.parameters?.temperature ?? 0.1) as number;
  12. const topP = (modelConfig.parameters?.topP ?? 0.1) as number;
  13. console.log('modelConfig', modelConfig);
  14. switch (providerConfig.type) {
  15. case ProviderTypeEnum.OpenAI: {
  16. const args: {
  17. model: string;
  18. apiKey: string;
  19. modelKwargs?: { max_completion_tokens: number };
  20. topP?: number;
  21. temperature?: number;
  22. maxTokens?: number;
  23. } = {
  24. model: modelConfig.modelName,
  25. apiKey: providerConfig.apiKey,
  26. };
  27. // O series models have different parameters
  28. if (modelConfig.modelName.startsWith('o')) {
  29. args.modelKwargs = {
  30. max_completion_tokens: maxCompletionTokens,
  31. };
  32. } else {
  33. args.topP = topP;
  34. args.temperature = temperature;
  35. args.maxTokens = maxTokens;
  36. }
  37. return new ChatOpenAI(args);
  38. }
  39. case ProviderTypeEnum.Anthropic: {
  40. const args = {
  41. model: modelConfig.modelName,
  42. apiKey: providerConfig.apiKey,
  43. maxTokens,
  44. temperature,
  45. topP,
  46. clientOptions: {},
  47. };
  48. if (providerConfig.baseUrl) {
  49. args.clientOptions = {
  50. baseURL: providerConfig.baseUrl,
  51. };
  52. }
  53. return new ChatAnthropic(args);
  54. }
  55. case ProviderTypeEnum.Gemini: {
  56. const args = {
  57. model: modelConfig.modelName,
  58. apiKey: providerConfig.apiKey,
  59. temperature,
  60. topP,
  61. };
  62. return new ChatGoogleGenerativeAI(args);
  63. }
  64. case ProviderTypeEnum.Ollama: {
  65. const args: {
  66. model: string;
  67. apiKey?: string;
  68. baseUrl: string;
  69. modelKwargs?: { max_completion_tokens: number };
  70. topP?: number;
  71. temperature?: number;
  72. maxTokens?: number;
  73. options: {
  74. num_ctx: number;
  75. };
  76. } = {
  77. model: modelConfig.modelName,
  78. apiKey: providerConfig.apiKey,
  79. baseUrl: providerConfig.baseUrl ?? 'http://localhost:11434',
  80. topP,
  81. temperature,
  82. maxTokens,
  83. options: {
  84. num_ctx: 128000,
  85. },
  86. };
  87. return new ChatOllama(args);
  88. }
  89. default: {
  90. const args: {
  91. model: string;
  92. apiKey: string;
  93. configuration: Record<string, unknown>;
  94. topP?: number;
  95. temperature?: number;
  96. maxTokens?: number;
  97. } = {
  98. model: modelConfig.modelName,
  99. apiKey: providerConfig.apiKey,
  100. configuration: {
  101. baseURL: providerConfig.baseUrl,
  102. },
  103. topP,
  104. temperature,
  105. maxTokens,
  106. };
  107. return new ChatOpenAI(args);
  108. }
  109. }
  110. }