gmail.py 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124
  1. import hashlib
  2. import logging
  3. import os
  4. import quopri
  5. from textwrap import dedent
  6. from bs4 import BeautifulSoup
  7. try:
  8. from llama_index import download_loader
  9. except ImportError:
  10. raise ImportError("Notion requires extra dependencies. Install with `pip install embedchain[community]`") from None
  11. from embedchain.loaders.base_loader import BaseLoader
  12. from embedchain.utils import clean_string
  13. def get_header(text: str, header: str) -> str:
  14. start_string_position = text.find(header)
  15. pos_start = text.find(":", start_string_position) + 1
  16. pos_end = text.find("\n", pos_start)
  17. header = text[pos_start:pos_end]
  18. return header.strip()
  19. class GmailLoader(BaseLoader):
  20. def load_data(self, query):
  21. """Load data from gmail."""
  22. if not os.path.isfile("credentials.json"):
  23. raise FileNotFoundError(
  24. "You must download the valid credentials file from your google \
  25. dev account. Refer this `https://cloud.google.com/docs/authentication/api-keys`"
  26. )
  27. GmailReader = download_loader("GmailReader")
  28. loader = GmailReader(query=query, service=None, results_per_page=20)
  29. documents = loader.load_data()
  30. logging.info(f"Gmail Loader: {len(documents)} mails found for query- {query}")
  31. data = []
  32. data_contents = []
  33. logging.info(f"Gmail Loader: {len(documents)} mails found")
  34. for document in documents:
  35. original_size = len(document.text)
  36. snippet = document.metadata.get("snippet")
  37. meta_data = {
  38. "url": document.metadata.get("id"),
  39. "date": get_header(document.text, "Date"),
  40. "subject": get_header(document.text, "Subject"),
  41. "from": get_header(document.text, "From"),
  42. "to": get_header(document.text, "To"),
  43. "search_query": query,
  44. }
  45. # Decode
  46. decoded_bytes = quopri.decodestring(document.text)
  47. decoded_str = decoded_bytes.decode("utf-8", errors="replace")
  48. # Slice
  49. mail_start = decoded_str.find("<!DOCTYPE")
  50. email_data = decoded_str[mail_start:]
  51. # Web Page HTML Processing
  52. soup = BeautifulSoup(email_data, "html.parser")
  53. tags_to_exclude = [
  54. "nav",
  55. "aside",
  56. "form",
  57. "header",
  58. "noscript",
  59. "svg",
  60. "canvas",
  61. "footer",
  62. "script",
  63. "style",
  64. ]
  65. for tag in soup(tags_to_exclude):
  66. tag.decompose()
  67. ids_to_exclude = ["sidebar", "main-navigation", "menu-main-menu"]
  68. for id in ids_to_exclude:
  69. tags = soup.find_all(id=id)
  70. for tag in tags:
  71. tag.decompose()
  72. classes_to_exclude = [
  73. "elementor-location-header",
  74. "navbar-header",
  75. "nav",
  76. "header-sidebar-wrapper",
  77. "blog-sidebar-wrapper",
  78. "related-posts",
  79. ]
  80. for class_name in classes_to_exclude:
  81. tags = soup.find_all(class_=class_name)
  82. for tag in tags:
  83. tag.decompose()
  84. content = soup.get_text()
  85. content = clean_string(content)
  86. cleaned_size = len(content)
  87. if original_size != 0:
  88. logging.info(
  89. f"[{id}] Cleaned page size: {cleaned_size} characters, down from {original_size} (shrunk: {original_size-cleaned_size} chars, {round((1-(cleaned_size/original_size)) * 100, 2)}%)" # noqa:E501
  90. )
  91. result = f"""
  92. email from '{meta_data.get('from')}' to '{meta_data.get('to')}'
  93. subject: {meta_data.get('subject')}
  94. date: {meta_data.get('date')}
  95. preview: {snippet}
  96. content: f{content}
  97. """
  98. data_content = dedent(result)
  99. data.append({"content": data_content, "meta_data": meta_data})
  100. data_contents.append(data_content)
  101. doc_id = hashlib.sha256((query + ", ".join(data_contents)).encode()).hexdigest()
  102. response_data = {"doc_id": doc_id, "data": data}
  103. return response_data