app.py 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101
  1. import csv
  2. import queue
  3. import threading
  4. from io import StringIO
  5. import requests
  6. import streamlit as st
  7. from embedchain import App
  8. from embedchain.config import BaseLlmConfig
  9. from embedchain.helpers.callbacks import (StreamingStdOutCallbackHandlerYield,
  10. generate)
  11. @st.cache_resource
  12. def sadhguru_ai():
  13. app = App()
  14. return app
  15. # Function to read the CSV file row by row
  16. def read_csv_row_by_row(file_path):
  17. with open(file_path, mode="r", newline="", encoding="utf-8") as file:
  18. csv_reader = csv.DictReader(file)
  19. for row in csv_reader:
  20. yield row
  21. @st.cache_resource
  22. def add_data_to_app():
  23. app = sadhguru_ai()
  24. url = "https://gist.githubusercontent.com/deshraj/50b0597157e04829bbbb7bc418be6ccb/raw/95b0f1547028c39691f5c7db04d362baa597f3f4/data.csv" # noqa:E501
  25. response = requests.get(url)
  26. csv_file = StringIO(response.text)
  27. for row in csv.reader(csv_file):
  28. if row and row[0] != "url":
  29. app.add(row[0], data_type="web_page")
  30. app = sadhguru_ai()
  31. add_data_to_app()
  32. assistant_avatar_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/2/21/Sadhguru-Jaggi-Vasudev.jpg/640px-Sadhguru-Jaggi-Vasudev.jpg" # noqa: E501
  33. st.title("🙏 Sadhguru AI")
  34. styled_caption = '<p style="font-size: 17px; color: #aaa;">🚀 An <a href="https://github.com/embedchain/embedchain">Embedchain</a> app powered with Sadhguru\'s wisdom!</p>' # noqa: E501
  35. st.markdown(styled_caption, unsafe_allow_html=True) # noqa: E501
  36. if "messages" not in st.session_state:
  37. st.session_state.messages = [
  38. {
  39. "role": "assistant",
  40. "content": """
  41. Hi, I'm Sadhguru AI! I'm a mystic, yogi, visionary, and spiritual master. I'm here to answer your questions about life, the universe, and everything.
  42. """, # noqa: E501
  43. }
  44. ]
  45. for message in st.session_state.messages:
  46. role = message["role"]
  47. with st.chat_message(role, avatar=assistant_avatar_url if role == "assistant" else None):
  48. st.markdown(message["content"])
  49. if prompt := st.chat_input("Ask me anything!"):
  50. with st.chat_message("user"):
  51. st.markdown(prompt)
  52. st.session_state.messages.append({"role": "user", "content": prompt})
  53. with st.chat_message("assistant", avatar=assistant_avatar_url):
  54. msg_placeholder = st.empty()
  55. msg_placeholder.markdown("Thinking...")
  56. full_response = ""
  57. q = queue.Queue()
  58. def app_response(result):
  59. config = BaseLlmConfig(stream=True, callbacks=[StreamingStdOutCallbackHandlerYield(q)])
  60. answer, citations = app.chat(prompt, config=config, citations=True)
  61. result["answer"] = answer
  62. result["citations"] = citations
  63. results = {}
  64. thread = threading.Thread(target=app_response, args=(results,))
  65. thread.start()
  66. for answer_chunk in generate(q):
  67. full_response += answer_chunk
  68. msg_placeholder.markdown(full_response)
  69. thread.join()
  70. answer, citations = results["answer"], results["citations"]
  71. if citations:
  72. full_response += "\n\n**Sources**:\n"
  73. sources = list(set(map(lambda x: x[1]["url"], citations)))
  74. for i, source in enumerate(sources):
  75. full_response += f"{i+1}. {source}\n"
  76. msg_placeholder.markdown(full_response)
  77. st.session_state.messages.append({"role": "assistant", "content": full_response})