app.py 3.1 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495
  1. import csv
  2. import queue
  3. import threading
  4. import streamlit as st
  5. from embedchain import Pipeline as App
  6. from embedchain.config import BaseLlmConfig
  7. from embedchain.helpers.callbacks import (StreamingStdOutCallbackHandlerYield,
  8. generate)
  9. @st.cache_resource
  10. def sadhguru_ai():
  11. app = App()
  12. return app
  13. # Function to read the CSV file row by row
  14. def read_csv_row_by_row(file_path):
  15. with open(file_path, mode="r", newline="", encoding="utf-8") as file:
  16. csv_reader = csv.DictReader(file)
  17. for row in csv_reader:
  18. yield row
  19. @st.cache_resource
  20. def add_data_to_app():
  21. app = sadhguru_ai()
  22. file_path = "data.csv"
  23. for row in read_csv_row_by_row(file_path):
  24. app.add(row["url"], data_type="web_page")
  25. app = sadhguru_ai()
  26. add_data_to_app()
  27. assistant_avatar_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/2/21/Sadhguru-Jaggi-Vasudev.jpg/640px-Sadhguru-Jaggi-Vasudev.jpg" # noqa: E501
  28. st.title("🙏 Sadhguru AI")
  29. styled_caption = '<p style="font-size: 17px; color: #aaa;">🚀 An <a href="https://github.com/embedchain/embedchain">Embedchain</a> app powered with Sadhguru\'s wisdom!</p>' # noqa: E501
  30. st.markdown(styled_caption, unsafe_allow_html=True) # noqa: E501
  31. if "messages" not in st.session_state:
  32. st.session_state.messages = [
  33. {
  34. "role": "assistant",
  35. "content": """
  36. Hi, I'm Sadhguru AI! I'm a mystic, yogi, visionary, and spiritual master. I'm here to answer your questions about life, the universe, and everything.
  37. """, # noqa: E501
  38. }
  39. ]
  40. for message in st.session_state.messages:
  41. role = message["role"]
  42. with st.chat_message(role, avatar=assistant_avatar_url if role == "assistant" else None):
  43. st.markdown(message["content"])
  44. if prompt := st.chat_input("Ask me anything!"):
  45. with st.chat_message("user"):
  46. st.markdown(prompt)
  47. st.session_state.messages.append({"role": "user", "content": prompt})
  48. with st.chat_message("assistant", avatar=assistant_avatar_url):
  49. msg_placeholder = st.empty()
  50. msg_placeholder.markdown("Thinking...")
  51. full_response = ""
  52. q = queue.Queue()
  53. def app_response(result):
  54. config = BaseLlmConfig(stream=True, callbacks=[StreamingStdOutCallbackHandlerYield(q)])
  55. answer, citations = app.chat(prompt, config=config, citations=True)
  56. result["answer"] = answer
  57. result["citations"] = citations
  58. results = {}
  59. thread = threading.Thread(target=app_response, args=(results,))
  60. thread.start()
  61. for answer_chunk in generate(q):
  62. full_response += answer_chunk
  63. msg_placeholder.markdown(full_response)
  64. thread.join()
  65. answer, citations = results["answer"], results["citations"]
  66. if citations:
  67. full_response += "\n\n**Sources**:\n"
  68. for i, citations in enumerate(citations):
  69. full_response += f"{i+1}. {citations[1]}\n"
  70. msg_placeholder.markdown(full_response)
  71. st.session_state.messages.append({"role": "assistant", "content": full_response})