app.py 3.3 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697
  1. import queue
  2. import streamlit as st
  3. from embedchain import Pipeline as App
  4. from embedchain.config import BaseLlmConfig
  5. from embedchain.helpers.callbacks import StreamingStdOutCallbackHandlerYield, generate
  6. @st.cache_resource
  7. def unacademy_ai():
  8. app = App()
  9. return app
  10. app = unacademy_ai()
  11. assistant_avatar_url = "https://cdn-images-1.medium.com/v2/resize:fit:1200/1*LdFNhpOe7uIn-bHK9VUinA.jpeg"
  12. st.markdown(f"# <img src='{assistant_avatar_url}' width={35} /> Unacademy UPSC AI", unsafe_allow_html=True)
  13. styled_caption = """
  14. <p style="font-size: 17px; color: #aaa;">🚀 An <a href="https://github.com/embedchain/embedchain">Embedchain</a> app powered with Unacademy\'s UPSC data!</p>
  15. """
  16. st.markdown(styled_caption, unsafe_allow_html=True)
  17. with st.expander(":grey[Want to create your own Unacademy UPSC AI?]"):
  18. st.write(
  19. """
  20. ```bash
  21. pip install embedchain
  22. ```
  23. ```python
  24. from embedchain import Pipeline as App
  25. unacademy_ai_app = App()
  26. unacademy_ai_app.add("https://unacademy.com/content/upsc/study-material/plan-policy/atma-nirbhar-bharat-3-0/", data_type="web_page")
  27. unacademy_ai_app.chat("What is Atma Nirbhar 3.0?")
  28. ```
  29. For more information, checkout the [Embedchain docs](https://docs.embedchain.ai/get-started/quickstart).
  30. """
  31. )
  32. if "messages" not in st.session_state:
  33. st.session_state.messages = [
  34. {
  35. "role": "assistant",
  36. "content": """Hi, I'm Unacademy UPSC AI bot, who can answer any questions related to UPSC preparation. Let me help you prepare better for UPSC.\n
  37. Sample questions:
  38. - What are the subjects in UPSC CSE?
  39. - What is the CSE scholarship price amount?
  40. - What are different indian calendar forms?
  41. """,
  42. }
  43. ]
  44. for message in st.session_state.messages:
  45. role = message["role"]
  46. with st.chat_message(role, avatar=assistant_avatar_url if role == "assistant" else None):
  47. st.markdown(message["content"])
  48. if prompt := st.chat_input("Ask me anything!"):
  49. with st.chat_message("user"):
  50. st.markdown(prompt)
  51. st.session_state.messages.append({"role": "user", "content": prompt})
  52. with st.chat_message("assistant", avatar=assistant_avatar_url):
  53. msg_placeholder = st.empty()
  54. msg_placeholder.markdown("Thinking...")
  55. full_response = ""
  56. q = queue.Queue()
  57. def app_response(result):
  58. llm_config = app.llm.config.as_dict()
  59. llm_config["callbacks"] = [StreamingStdOutCallbackHandlerYield(q=q)]
  60. config = BaseLlmConfig(**llm_config)
  61. answer, citations = app.chat(prompt, config=config, citations=True)
  62. result["answer"] = answer
  63. result["citations"] = citations
  64. results = {}
  65. for answer_chunk in generate(q):
  66. full_response += answer_chunk
  67. msg_placeholder.markdown(full_response)
  68. answer, citations = results["answer"], results["citations"]
  69. if citations:
  70. full_response += "\n\n**Sources**:\n"
  71. sources = list(set(map(lambda x: x[1], citations)))
  72. for i, source in enumerate(sources):
  73. full_response += f"{i+1}. {source}\n"
  74. msg_placeholder.markdown(full_response)
  75. st.session_state.messages.append({"role": "assistant", "content": full_response})