web_page.py 2.9 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798
  1. import hashlib
  2. import logging
  3. import requests
  4. try:
  5. from bs4 import BeautifulSoup
  6. except ImportError:
  7. raise ImportError(
  8. 'Webpage requires extra dependencies. Install with `pip install --upgrade "embedchain[dataloaders]"`'
  9. ) from None
  10. from embedchain.helpers.json_serializable import register_deserializable
  11. from embedchain.loaders.base_loader import BaseLoader
  12. from embedchain.utils.misc import clean_string
  13. @register_deserializable
  14. class WebPageLoader(BaseLoader):
  15. # Shared session for all instances
  16. _session = requests.Session()
  17. def load_data(self, url):
  18. """Load data from a web page using a shared requests' session."""
  19. headers = {
  20. "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36", # noqa:E501
  21. }
  22. response = self._session.get(url, headers=headers, timeout=30)
  23. response.raise_for_status()
  24. data = response.content
  25. content = self._get_clean_content(data, url)
  26. meta_data = {"url": url}
  27. doc_id = hashlib.sha256((content + url).encode()).hexdigest()
  28. return {
  29. "doc_id": doc_id,
  30. "data": [
  31. {
  32. "content": content,
  33. "meta_data": meta_data,
  34. }
  35. ],
  36. }
  37. @staticmethod
  38. def _get_clean_content(html, url) -> str:
  39. soup = BeautifulSoup(html, "html.parser")
  40. original_size = len(str(soup.get_text()))
  41. tags_to_exclude = [
  42. "nav",
  43. "aside",
  44. "form",
  45. "header",
  46. "noscript",
  47. "svg",
  48. "canvas",
  49. "footer",
  50. "script",
  51. "style",
  52. ]
  53. for tag in soup(tags_to_exclude):
  54. tag.decompose()
  55. ids_to_exclude = ["sidebar", "main-navigation", "menu-main-menu"]
  56. for id_ in ids_to_exclude:
  57. tags = soup.find_all(id=id_)
  58. for tag in tags:
  59. tag.decompose()
  60. classes_to_exclude = [
  61. "elementor-location-header",
  62. "navbar-header",
  63. "nav",
  64. "header-sidebar-wrapper",
  65. "blog-sidebar-wrapper",
  66. "related-posts",
  67. ]
  68. for class_name in classes_to_exclude:
  69. tags = soup.find_all(class_=class_name)
  70. for tag in tags:
  71. tag.decompose()
  72. content = soup.get_text()
  73. content = clean_string(content)
  74. cleaned_size = len(content)
  75. if original_size != 0:
  76. logging.info(
  77. f"[{url}] Cleaned page size: {cleaned_size} characters, down from {original_size} (shrunk: {original_size-cleaned_size} chars, {round((1-(cleaned_size/original_size)) * 100, 2)}%)" # noqa:E501
  78. )
  79. return content
  80. @classmethod
  81. def close_session(cls):
  82. cls._session.close()