web_page.py 2.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172
  1. import logging
  2. import requests
  3. from bs4 import BeautifulSoup
  4. from embedchain.helper_classes.json_serializable import register_deserializable
  5. from embedchain.loaders.base_loader import BaseLoader
  6. from embedchain.utils import clean_string
  7. @register_deserializable
  8. class WebPageLoader(BaseLoader):
  9. def load_data(self, url):
  10. """Load data from a web page."""
  11. response = requests.get(url)
  12. data = response.content
  13. soup = BeautifulSoup(data, "html.parser")
  14. original_size = len(str(soup.get_text()))
  15. tags_to_exclude = [
  16. "nav",
  17. "aside",
  18. "form",
  19. "header",
  20. "noscript",
  21. "svg",
  22. "canvas",
  23. "footer",
  24. "script",
  25. "style",
  26. ]
  27. for tag in soup(tags_to_exclude):
  28. tag.decompose()
  29. ids_to_exclude = ["sidebar", "main-navigation", "menu-main-menu"]
  30. for id in ids_to_exclude:
  31. tags = soup.find_all(id=id)
  32. for tag in tags:
  33. tag.decompose()
  34. classes_to_exclude = [
  35. "elementor-location-header",
  36. "navbar-header",
  37. "nav",
  38. "header-sidebar-wrapper",
  39. "blog-sidebar-wrapper",
  40. "related-posts",
  41. ]
  42. for class_name in classes_to_exclude:
  43. tags = soup.find_all(class_=class_name)
  44. for tag in tags:
  45. tag.decompose()
  46. content = soup.get_text()
  47. content = clean_string(content)
  48. cleaned_size = len(content)
  49. if original_size != 0:
  50. logging.info(
  51. f"[{url}] Cleaned page size: {cleaned_size} characters, down from {original_size} (shrunk: {original_size-cleaned_size} chars, {round((1-(cleaned_size/original_size)) * 100, 2)}%)" # noqa:E501
  52. )
  53. meta_data = {
  54. "url": url,
  55. }
  56. return [
  57. {
  58. "content": content,
  59. "meta_data": meta_data,
  60. }
  61. ]