sitemap.py 2.1 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061
  1. import concurrent.futures
  2. import hashlib
  3. import logging
  4. import requests
  5. from tqdm import tqdm
  6. try:
  7. from bs4 import BeautifulSoup
  8. from bs4.builder import ParserRejectedMarkup
  9. except ImportError:
  10. raise ImportError(
  11. 'Sitemap requires extra dependencies. Install with `pip install --upgrade "embedchain[dataloaders]"`'
  12. ) from None
  13. from embedchain.helpers.json_serializable import register_deserializable
  14. from embedchain.loaders.base_loader import BaseLoader
  15. from embedchain.loaders.web_page import WebPageLoader
  16. @register_deserializable
  17. class SitemapLoader(BaseLoader):
  18. """
  19. This method takes a sitemap URL as input and retrieves
  20. all the URLs to use the WebPageLoader to load content
  21. of each page.
  22. """
  23. def load_data(self, sitemap_url):
  24. output = []
  25. web_page_loader = WebPageLoader()
  26. response = requests.get(sitemap_url)
  27. response.raise_for_status()
  28. soup = BeautifulSoup(response.text, "xml")
  29. links = [link.text for link in soup.find_all("loc") if link.parent.name == "url"]
  30. if len(links) == 0:
  31. links = [link.text for link in soup.find_all("loc")]
  32. doc_id = hashlib.sha256((" ".join(links) + sitemap_url).encode()).hexdigest()
  33. def load_web_page(link):
  34. try:
  35. loader_data = web_page_loader.load_data(link)
  36. return loader_data.get("data")
  37. except ParserRejectedMarkup as e:
  38. logging.error(f"Failed to parse {link}: {e}")
  39. return None
  40. with concurrent.futures.ThreadPoolExecutor() as executor:
  41. future_to_link = {executor.submit(load_web_page, link): link for link in links}
  42. for future in tqdm(concurrent.futures.as_completed(future_to_link), total=len(links), desc="Loading pages"):
  43. link = future_to_link[future]
  44. try:
  45. data = future.result()
  46. if data:
  47. output.extend(data)
  48. except Exception as e:
  49. logging.error(f"Error loading page {link}: {e}")
  50. return {"doc_id": doc_id, "data": output}