llm: provider: gpt4all config: model: 'orca-mini-3b.ggmlv3.q4_0.bin' temperature: 0.5 max_tokens: 1000 top_p: 1 stream: false embedder: provider: gpt4all