sitemap.py 2.2 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758
  1. import concurrent.futures
  2. import hashlib
  3. import logging
  4. import requests
  5. try:
  6. from bs4 import BeautifulSoup
  7. from bs4.builder import ParserRejectedMarkup
  8. except ImportError:
  9. raise ImportError(
  10. 'Sitemap requires extra dependencies. Install with `pip install --upgrade "embedchain[dataloaders]"`'
  11. ) from None
  12. from embedchain.helper.json_serializable import register_deserializable
  13. from embedchain.loaders.base_loader import BaseLoader
  14. from embedchain.loaders.web_page import WebPageLoader
  15. from embedchain.utils import is_readable
  16. @register_deserializable
  17. class SitemapLoader(BaseLoader):
  18. def load_data(self, sitemap_url):
  19. output = []
  20. web_page_loader = WebPageLoader()
  21. response = requests.get(sitemap_url)
  22. response.raise_for_status()
  23. soup = BeautifulSoup(response.text, "xml")
  24. links = [link.text for link in soup.find_all("loc") if link.parent.name == "url"]
  25. if len(links) == 0:
  26. links = [link.text for link in soup.find_all("loc")]
  27. doc_id = hashlib.sha256((" ".join(links) + sitemap_url).encode()).hexdigest()
  28. def load_link(link):
  29. try:
  30. each_load_data = web_page_loader.load_data(link)
  31. if is_readable(each_load_data.get("data")[0].get("content")):
  32. return each_load_data.get("data")
  33. else:
  34. logging.warning(f"Page is not readable (too many invalid characters): {link}")
  35. except ParserRejectedMarkup as e:
  36. logging.error(f"Failed to parse {link}: {e}")
  37. return None
  38. with concurrent.futures.ThreadPoolExecutor() as executor:
  39. future_to_link = {executor.submit(load_link, link): link for link in links}
  40. for future in concurrent.futures.as_completed(future_to_link):
  41. link = future_to_link[future]
  42. try:
  43. data = future.result()
  44. if data:
  45. output.append(data)
  46. except Exception as e:
  47. logging.error(f"Error loading page {link}: {e}")
  48. return {"doc_id": doc_id, "data": [data[0] for data in output if data]}