llama2.yaml 215 B

12345678
  1. llm:
  2. provider: llama2
  3. config:
  4. model: 'a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5'
  5. temperature: 0.5
  6. max_tokens: 1000
  7. top_p: 0.5
  8. stream: false