sitemap.py 1.6 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546
  1. import hashlib
  2. import logging
  3. import requests
  4. from bs4 import BeautifulSoup
  5. from bs4.builder import ParserRejectedMarkup
  6. from embedchain.helper.json_serializable import register_deserializable
  7. from embedchain.loaders.base_loader import BaseLoader
  8. from embedchain.loaders.web_page import WebPageLoader
  9. from embedchain.utils import is_readable
  10. @register_deserializable
  11. class SitemapLoader(BaseLoader):
  12. def load_data(self, sitemap_url):
  13. """
  14. This method takes a sitemap URL as input and retrieves
  15. all the URLs to use the WebPageLoader to load content
  16. of each page.
  17. """
  18. output = []
  19. web_page_loader = WebPageLoader()
  20. response = requests.get(sitemap_url)
  21. response.raise_for_status()
  22. soup = BeautifulSoup(response.text, "xml")
  23. links = [link.text for link in soup.find_all("loc") if link.parent.name == "url"]
  24. if len(links) == 0:
  25. # Get all <loc> tags as a fallback. This might include images.
  26. links = [link.text for link in soup.find_all("loc")]
  27. doc_id = hashlib.sha256((" ".join(links) + sitemap_url).encode()).hexdigest()
  28. for link in links:
  29. try:
  30. each_load_data = web_page_loader.load_data(link)
  31. if is_readable(each_load_data[0].get("content")):
  32. output.append(each_load_data)
  33. else:
  34. logging.warning(f"Page is not readable (too many invalid characters): {link}")
  35. except ParserRejectedMarkup as e:
  36. logging.error(f"Failed to parse {link}: {e}")
  37. return {"doc_id": doc_id, "data": [data[0] for data in output]}