llm: provider: gpt4all model: 'orca-mini-3b.ggmlv3.q4_0.bin' config: temperature: 0.5 max_tokens: 1000 top_p: 1 stream: false embedder: provider: gpt4all config: model: 'all-MiniLM-L6-v2'