web_page.py 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100
  1. import hashlib
  2. import logging
  3. import requests
  4. try:
  5. from bs4 import BeautifulSoup
  6. except ImportError:
  7. raise ImportError(
  8. 'Webpage requires extra dependencies. Install with `pip install --upgrade "embedchain[dataloaders]"`'
  9. ) from None
  10. from embedchain.helpers.json_serializable import register_deserializable
  11. from embedchain.loaders.base_loader import BaseLoader
  12. from embedchain.utils.misc import clean_string
  13. logger = logging.getLogger(__name__)
  14. @register_deserializable
  15. class WebPageLoader(BaseLoader):
  16. # Shared session for all instances
  17. _session = requests.Session()
  18. def load_data(self, url):
  19. """Load data from a web page using a shared requests' session."""
  20. headers = {
  21. "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36", # noqa:E501
  22. }
  23. response = self._session.get(url, headers=headers, timeout=30)
  24. response.raise_for_status()
  25. data = response.content
  26. content = self._get_clean_content(data, url)
  27. metadata = {"url": url}
  28. doc_id = hashlib.sha256((content + url).encode()).hexdigest()
  29. return {
  30. "doc_id": doc_id,
  31. "data": [
  32. {
  33. "content": content,
  34. "meta_data": metadata,
  35. }
  36. ],
  37. }
  38. @staticmethod
  39. def _get_clean_content(html, url) -> str:
  40. soup = BeautifulSoup(html, "html.parser")
  41. original_size = len(str(soup.get_text()))
  42. tags_to_exclude = [
  43. "nav",
  44. "aside",
  45. "form",
  46. "header",
  47. "noscript",
  48. "svg",
  49. "canvas",
  50. "footer",
  51. "script",
  52. "style",
  53. ]
  54. for tag in soup(tags_to_exclude):
  55. tag.decompose()
  56. ids_to_exclude = ["sidebar", "main-navigation", "menu-main-menu"]
  57. for id_ in ids_to_exclude:
  58. tags = soup.find_all(id=id_)
  59. for tag in tags:
  60. tag.decompose()
  61. classes_to_exclude = [
  62. "elementor-location-header",
  63. "navbar-header",
  64. "nav",
  65. "header-sidebar-wrapper",
  66. "blog-sidebar-wrapper",
  67. "related-posts",
  68. ]
  69. for class_name in classes_to_exclude:
  70. tags = soup.find_all(class_=class_name)
  71. for tag in tags:
  72. tag.decompose()
  73. content = soup.get_text()
  74. content = clean_string(content)
  75. cleaned_size = len(content)
  76. if original_size != 0:
  77. logger.info(
  78. f"[{url}] Cleaned page size: {cleaned_size} characters, down from {original_size} (shrunk: {original_size-cleaned_size} chars, {round((1-(cleaned_size/original_size)) * 100, 2)}%)" # noqa:E501
  79. )
  80. return content
  81. @classmethod
  82. def close_session(cls):
  83. cls._session.close()