web_page.py 2.5 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788
  1. import hashlib
  2. import logging
  3. import requests
  4. try:
  5. from bs4 import BeautifulSoup
  6. except ImportError:
  7. raise ImportError(
  8. 'Webpage requires extra dependencies. Install with `pip install --upgrade "embedchain[dataloaders]"`'
  9. ) from None
  10. from embedchain.helper.json_serializable import register_deserializable
  11. from embedchain.loaders.base_loader import BaseLoader
  12. from embedchain.utils import clean_string
  13. @register_deserializable
  14. class WebPageLoader(BaseLoader):
  15. def load_data(self, url):
  16. """Load data from a web page."""
  17. response = requests.get(url)
  18. data = response.content
  19. content = self._get_clean_content(data, url)
  20. meta_data = {
  21. "url": url,
  22. }
  23. doc_id = hashlib.sha256((content + url).encode()).hexdigest()
  24. return {
  25. "doc_id": doc_id,
  26. "data": [
  27. {
  28. "content": content,
  29. "meta_data": meta_data,
  30. }
  31. ],
  32. }
  33. def _get_clean_content(self, html, url) -> str:
  34. soup = BeautifulSoup(html, "html.parser")
  35. original_size = len(str(soup.get_text()))
  36. tags_to_exclude = [
  37. "nav",
  38. "aside",
  39. "form",
  40. "header",
  41. "noscript",
  42. "svg",
  43. "canvas",
  44. "footer",
  45. "script",
  46. "style",
  47. ]
  48. for tag in soup(tags_to_exclude):
  49. tag.decompose()
  50. ids_to_exclude = ["sidebar", "main-navigation", "menu-main-menu"]
  51. for id in ids_to_exclude:
  52. tags = soup.find_all(id=id)
  53. for tag in tags:
  54. tag.decompose()
  55. classes_to_exclude = [
  56. "elementor-location-header",
  57. "navbar-header",
  58. "nav",
  59. "header-sidebar-wrapper",
  60. "blog-sidebar-wrapper",
  61. "related-posts",
  62. ]
  63. for class_name in classes_to_exclude:
  64. tags = soup.find_all(class_=class_name)
  65. for tag in tags:
  66. tag.decompose()
  67. content = soup.get_text()
  68. content = clean_string(content)
  69. cleaned_size = len(content)
  70. if original_size != 0:
  71. logging.info(
  72. f"[{url}] Cleaned page size: {cleaned_size} characters, down from {original_size} (shrunk: {original_size-cleaned_size} chars, {round((1-(cleaned_size/original_size)) * 100, 2)}%)" # noqa:E501
  73. )
  74. return content