web_page.py 2.7 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495
  1. import hashlib
  2. import logging
  3. import requests
  4. try:
  5. from bs4 import BeautifulSoup
  6. except ImportError:
  7. raise ImportError(
  8. 'Webpage requires extra dependencies. Install with `pip install --upgrade "embedchain[dataloaders]"`'
  9. ) from None
  10. from embedchain.helpers.json_serializable import register_deserializable
  11. from embedchain.loaders.base_loader import BaseLoader
  12. from embedchain.utils.misc import clean_string
  13. @register_deserializable
  14. class WebPageLoader(BaseLoader):
  15. # Shared session for all instances
  16. _session = requests.Session()
  17. def load_data(self, url):
  18. """Load data from a web page using a shared requests' session."""
  19. response = self._session.get(url, timeout=30)
  20. response.raise_for_status()
  21. data = response.content
  22. content = self._get_clean_content(data, url)
  23. meta_data = {"url": url}
  24. doc_id = hashlib.sha256((content + url).encode()).hexdigest()
  25. return {
  26. "doc_id": doc_id,
  27. "data": [
  28. {
  29. "content": content,
  30. "meta_data": meta_data,
  31. }
  32. ],
  33. }
  34. @staticmethod
  35. def _get_clean_content(html, url) -> str:
  36. soup = BeautifulSoup(html, "html.parser")
  37. original_size = len(str(soup.get_text()))
  38. tags_to_exclude = [
  39. "nav",
  40. "aside",
  41. "form",
  42. "header",
  43. "noscript",
  44. "svg",
  45. "canvas",
  46. "footer",
  47. "script",
  48. "style",
  49. ]
  50. for tag in soup(tags_to_exclude):
  51. tag.decompose()
  52. ids_to_exclude = ["sidebar", "main-navigation", "menu-main-menu"]
  53. for id_ in ids_to_exclude:
  54. tags = soup.find_all(id=id_)
  55. for tag in tags:
  56. tag.decompose()
  57. classes_to_exclude = [
  58. "elementor-location-header",
  59. "navbar-header",
  60. "nav",
  61. "header-sidebar-wrapper",
  62. "blog-sidebar-wrapper",
  63. "related-posts",
  64. ]
  65. for class_name in classes_to_exclude:
  66. tags = soup.find_all(class_=class_name)
  67. for tag in tags:
  68. tag.decompose()
  69. content = soup.get_text()
  70. content = clean_string(content)
  71. cleaned_size = len(content)
  72. if original_size != 0:
  73. logging.info(
  74. f"[{url}] Cleaned page size: {cleaned_size} characters, down from {original_size} (shrunk: {original_size-cleaned_size} chars, {round((1-(cleaned_size/original_size)) * 100, 2)}%)" # noqa:E501
  75. )
  76. return content
  77. @classmethod
  78. def close_session(cls):
  79. cls._session.close()