web_page.py 1.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869
  1. import logging
  2. import requests
  3. from bs4 import BeautifulSoup
  4. from embedchain.utils import clean_string
  5. class WebPageLoader:
  6. def load_data(self, url):
  7. """Load data from a web page."""
  8. response = requests.get(url)
  9. data = response.content
  10. soup = BeautifulSoup(data, "html.parser")
  11. original_size = len(str(soup.get_text()))
  12. tags_to_exclude = [
  13. "nav",
  14. "aside",
  15. "form",
  16. "header",
  17. "noscript",
  18. "svg",
  19. "canvas",
  20. "footer",
  21. "script",
  22. "style",
  23. ]
  24. for tag in soup(tags_to_exclude):
  25. tag.decompose()
  26. ids_to_exclude = ["sidebar", "main-navigation", "menu-main-menu"]
  27. for id in ids_to_exclude:
  28. tags = soup.find_all(id=id)
  29. for tag in tags:
  30. tag.decompose()
  31. classes_to_exclude = [
  32. "elementor-location-header",
  33. "navbar-header",
  34. "nav",
  35. "header-sidebar-wrapper",
  36. "blog-sidebar-wrapper",
  37. "related-posts",
  38. ]
  39. for class_name in classes_to_exclude:
  40. tags = soup.find_all(class_=class_name)
  41. for tag in tags:
  42. tag.decompose()
  43. content = soup.get_text()
  44. content = clean_string(content)
  45. cleaned_size = len(content)
  46. if original_size != 0:
  47. logging.info(
  48. f"[{url}] Cleaned page size: {cleaned_size} characters, down from {original_size} (shrunk: {original_size-cleaned_size} chars, {round((1-(cleaned_size/original_size)) * 100, 2)}%)" # noqa:E501
  49. )
  50. meta_data = {
  51. "url": url,
  52. }
  53. return [
  54. {
  55. "content": content,
  56. "meta_data": meta_data,
  57. }
  58. ]