streamlit-mistral.mdx 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112
  1. ---
  2. title: '🚀 Streamlit'
  3. description: 'Integrate with Streamlit to plug and play with any LLM'
  4. ---
  5. In this example, we will learn how to use `mistralai/Mixtral-8x7B-Instruct-v0.1` and Embedchain together with Streamlit to build a simple RAG chatbot.
  6. ![Streamlit + Embedchain Demo](https://github.com/embedchain/embedchain/assets/73601258/052f7378-797c-41cf-ac81-f004d0d44dd1)
  7. ## Setup
  8. Install Embedchain and Streamlit.
  9. ```bash
  10. pip install embedchain streamlit
  11. ```
  12. <Tabs>
  13. <Tab title="app.py">
  14. ```python
  15. import os
  16. from embedchain import App
  17. import streamlit as st
  18. with st.sidebar:
  19. huggingface_access_token = st.text_input("Hugging face Token", key="chatbot_api_key", type="password")
  20. "[Get Hugging Face Access Token](https://huggingface.co/settings/tokens)"
  21. "[View the source code](https://github.com/embedchain/examples/mistral-streamlit)"
  22. st.title("💬 Chatbot")
  23. st.caption("🚀 An Embedchain app powered by Mistral!")
  24. if "messages" not in st.session_state:
  25. st.session_state.messages = [
  26. {
  27. "role": "assistant",
  28. "content": """
  29. Hi! I'm a chatbot. I can answer questions and learn new things!\n
  30. Ask me anything and if you want me to learn something do `/add <source>`.\n
  31. I can learn mostly everything. :)
  32. """,
  33. }
  34. ]
  35. for message in st.session_state.messages:
  36. with st.chat_message(message["role"]):
  37. st.markdown(message["content"])
  38. if prompt := st.chat_input("Ask me anything!"):
  39. if not st.session_state.chatbot_api_key:
  40. st.error("Please enter your Hugging Face Access Token")
  41. st.stop()
  42. os.environ["HUGGINGFACE_ACCESS_TOKEN"] = st.session_state.chatbot_api_key
  43. app = App.from_config(config_path="config.yaml")
  44. if prompt.startswith("/add"):
  45. with st.chat_message("user"):
  46. st.markdown(prompt)
  47. st.session_state.messages.append({"role": "user", "content": prompt})
  48. prompt = prompt.replace("/add", "").strip()
  49. with st.chat_message("assistant"):
  50. message_placeholder = st.empty()
  51. message_placeholder.markdown("Adding to knowledge base...")
  52. app.add(prompt)
  53. message_placeholder.markdown(f"Added {prompt} to knowledge base!")
  54. st.session_state.messages.append({"role": "assistant", "content": f"Added {prompt} to knowledge base!"})
  55. st.stop()
  56. with st.chat_message("user"):
  57. st.markdown(prompt)
  58. st.session_state.messages.append({"role": "user", "content": prompt})
  59. with st.chat_message("assistant"):
  60. msg_placeholder = st.empty()
  61. msg_placeholder.markdown("Thinking...")
  62. full_response = ""
  63. for response in app.chat(prompt):
  64. msg_placeholder.empty()
  65. full_response += response
  66. msg_placeholder.markdown(full_response)
  67. st.session_state.messages.append({"role": "assistant", "content": full_response})
  68. ```
  69. </Tab>
  70. <Tab title="config.yaml">
  71. ```yaml
  72. app:
  73. config:
  74. name: 'mistral-streamlit-app'
  75. llm:
  76. provider: huggingface
  77. config:
  78. model: 'mistralai/Mixtral-8x7B-Instruct-v0.1'
  79. temperature: 0.1
  80. max_tokens: 250
  81. top_p: 0.1
  82. stream: true
  83. embedder:
  84. provider: huggingface
  85. config:
  86. model: 'sentence-transformers/all-mpnet-base-v2'
  87. ```
  88. </Tab>
  89. </Tabs>
  90. ## To run it locally,
  91. ```bash
  92. streamlit run app.py
  93. ```