streamlit-mistral.mdx 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113
  1. ---
  2. title: '🚀 Streamlit'
  3. description: 'Integrate with Streamlit to plug and play with any LLM'
  4. ---
  5. In this example, we will learn how to use `mistralai/Mistral-7B-v0.1` and Embedchain together with Streamlit to build a simple RAG chatbot.
  6. ## Setup
  7. <Accordion title="Customize using code.">
  8. 1. Install Embedchain and Streamlit
  9. ```bash
  10. pip install embedchain
  11. pip install streamlit
  12. ```
  13. <Tabs>
  14. <Tab title="app.py">
  15. ```python
  16. import os
  17. from embedchain import Pipeline as App
  18. import streamlit as st
  19. with st.sidebar:
  20. huggingface_access_token = st.text_input("Hugging face Token", key="chatbot_api_key", type="password")
  21. "[Get Hugging Face Access Token](https://huggingface.co/settings/tokens)"
  22. "[View the source code](https://github.com/embedchain/examples/mistral-streamlit)"
  23. st.title("💬 Chatbot")
  24. st.caption("🚀 An Embedchain app powered by Mistral!")
  25. if "messages" not in st.session_state:
  26. st.session_state.messages = [
  27. {
  28. "role": "assistant",
  29. "content": """
  30. Hi! I'm a chatbot. I can answer questions and learn new things!\n
  31. Ask me anything and if you want me to learn something do `/add <source>`.\n
  32. I can learn mostly everything. :)
  33. """,
  34. }
  35. ]
  36. for message in st.session_state.messages:
  37. with st.chat_message(message["role"]):
  38. st.markdown(message["content"])
  39. if prompt := st.chat_input("Ask me anything!"):
  40. if not st.session_state.chatbot_api_key:
  41. st.error("Please enter your Hugging Face Access Token")
  42. st.stop()
  43. os.environ["HUGGINGFACE_ACCESS_TOKEN"] = st.session_state.chatbot_api_key
  44. app = App.from_config(config_path="config.yaml")
  45. if prompt.startswith("/add"):
  46. with st.chat_message("user"):
  47. st.markdown(prompt)
  48. st.session_state.messages.append({"role": "user", "content": prompt})
  49. prompt = prompt.replace("/add", "").strip()
  50. with st.chat_message("assistant"):
  51. message_placeholder = st.empty()
  52. message_placeholder.markdown("Adding to knowledge base...")
  53. app.add(prompt)
  54. message_placeholder.markdown(f"Added {prompt} to knowledge base!")
  55. st.session_state.messages.append({"role": "assistant", "content": f"Added {prompt} to knowledge base!"})
  56. st.stop()
  57. with st.chat_message("user"):
  58. st.markdown(prompt)
  59. st.session_state.messages.append({"role": "user", "content": prompt})
  60. with st.chat_message("assistant"):
  61. msg_placeholder = st.empty()
  62. msg_placeholder.markdown("Thinking...")
  63. full_response = ""
  64. for response in app.chat(prompt):
  65. msg_placeholder.empty()
  66. full_response += response
  67. msg_placeholder.markdown(full_response)
  68. st.session_state.messages.append({"role": "assistant", "content": full_response})
  69. ```
  70. </Tab>
  71. <Tab title="config.yaml">
  72. ```yaml
  73. app:
  74. config:
  75. name: 'mistral-streamlit-app'
  76. llm:
  77. provider: huggingface
  78. config:
  79. model: 'mistralai/Mistral-7B-v0.1'
  80. temperature: 0.1
  81. max_tokens: 250
  82. top_p: 0.1
  83. stream: true
  84. embedder:
  85. provider: huggingface
  86. config:
  87. model: 'sentence-transformers/all-mpnet-base-v2'
  88. ```
  89. </Tab>
  90. </Tabs>
  91. </Accordion>
  92. ## To run it locally,
  93. ```bash
  94. streamlit run app.py
  95. ```