csv.py 1.7 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546
  1. import csv
  2. from io import StringIO
  3. from urllib.parse import urlparse
  4. import requests
  5. from embedchain.loaders.base_loader import BaseLoader
  6. class CsvLoader(BaseLoader):
  7. @staticmethod
  8. def _detect_delimiter(first_line):
  9. delimiters = [",", "\t", ";", "|"]
  10. counts = {delimiter: first_line.count(delimiter) for delimiter in delimiters}
  11. return max(counts, key=counts.get)
  12. @staticmethod
  13. def _get_file_content(content):
  14. url = urlparse(content)
  15. if all([url.scheme, url.netloc]) and url.scheme not in ["file", "http", "https"]:
  16. raise ValueError("Not a valid URL.")
  17. if url.scheme in ["http", "https"]:
  18. response = requests.get(content)
  19. response.raise_for_status()
  20. return StringIO(response.text)
  21. elif url.scheme == "file":
  22. path = url.path
  23. return open(path, newline="") # Open the file using the path from the URI
  24. else:
  25. return open(content, newline="") # Treat content as a regular file path
  26. @staticmethod
  27. def load_data(content):
  28. """Load a csv file with headers. Each line is a document"""
  29. result = []
  30. with CsvLoader._get_file_content(content) as file:
  31. first_line = file.readline()
  32. delimiter = CsvLoader._detect_delimiter(first_line)
  33. file.seek(0) # Reset the file pointer to the start
  34. reader = csv.DictReader(file, delimiter=delimiter)
  35. for i, row in enumerate(reader):
  36. line = ", ".join([f"{field}: {value}" for field, value in row.items()])
  37. result.append({"content": line, "meta_data": {"url": content, "row": i + 1}})
  38. return result