utils.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415
  1. import json
  2. import logging
  3. import os
  4. import re
  5. import string
  6. from typing import Any
  7. from bs4 import BeautifulSoup
  8. from schema import Optional, Or, Schema
  9. from embedchain.models.data_type import DataType
  10. def parse_content(content, type):
  11. implemented = ["html.parser", "lxml", "lxml-xml", "xml", "html5lib"]
  12. if type not in implemented:
  13. raise ValueError(f"Parser type {type} not implemented. Please choose one of {implemented}")
  14. soup = BeautifulSoup(content, type)
  15. original_size = len(str(soup.get_text()))
  16. tags_to_exclude = [
  17. "nav",
  18. "aside",
  19. "form",
  20. "header",
  21. "noscript",
  22. "svg",
  23. "canvas",
  24. "footer",
  25. "script",
  26. "style",
  27. ]
  28. for tag in soup(tags_to_exclude):
  29. tag.decompose()
  30. ids_to_exclude = ["sidebar", "main-navigation", "menu-main-menu"]
  31. for id in ids_to_exclude:
  32. tags = soup.find_all(id=id)
  33. for tag in tags:
  34. tag.decompose()
  35. classes_to_exclude = [
  36. "elementor-location-header",
  37. "navbar-header",
  38. "nav",
  39. "header-sidebar-wrapper",
  40. "blog-sidebar-wrapper",
  41. "related-posts",
  42. ]
  43. for class_name in classes_to_exclude:
  44. tags = soup.find_all(class_=class_name)
  45. for tag in tags:
  46. tag.decompose()
  47. content = soup.get_text()
  48. content = clean_string(content)
  49. cleaned_size = len(content)
  50. if original_size != 0:
  51. logging.info(
  52. f"Cleaned page size: {cleaned_size} characters, down from {original_size} (shrunk: {original_size-cleaned_size} chars, {round((1-(cleaned_size/original_size)) * 100, 2)}%)" # noqa:E501
  53. )
  54. return content
  55. def clean_string(text):
  56. """
  57. This function takes in a string and performs a series of text cleaning operations.
  58. Args:
  59. text (str): The text to be cleaned. This is expected to be a string.
  60. Returns:
  61. cleaned_text (str): The cleaned text after all the cleaning operations
  62. have been performed.
  63. """
  64. # Replacement of newline characters:
  65. text = text.replace("\n", " ")
  66. # Stripping and reducing multiple spaces to single:
  67. cleaned_text = re.sub(r"\s+", " ", text.strip())
  68. # Removing backslashes:
  69. cleaned_text = cleaned_text.replace("\\", "")
  70. # Replacing hash characters:
  71. cleaned_text = cleaned_text.replace("#", " ")
  72. # Eliminating consecutive non-alphanumeric characters:
  73. # This regex identifies consecutive non-alphanumeric characters (i.e., not
  74. # a word character [a-zA-Z0-9_] and not a whitespace) in the string
  75. # and replaces each group of such characters with a single occurrence of
  76. # that character.
  77. # For example, "!!! hello !!!" would become "! hello !".
  78. cleaned_text = re.sub(r"([^\w\s])\1*", r"\1", cleaned_text)
  79. return cleaned_text
  80. def is_readable(s):
  81. """
  82. Heuristic to determine if a string is "readable" (mostly contains printable characters and forms meaningful words)
  83. :param s: string
  84. :return: True if the string is more than 95% printable.
  85. """
  86. try:
  87. printable_ratio = sum(c in string.printable for c in s) / len(s)
  88. except ZeroDivisionError:
  89. logging.warning("Empty string processed as unreadable")
  90. printable_ratio = 0
  91. return printable_ratio > 0.95 # 95% of characters are printable
  92. def use_pysqlite3():
  93. """
  94. Swap std-lib sqlite3 with pysqlite3.
  95. """
  96. import platform
  97. import sqlite3
  98. if platform.system() == "Linux" and sqlite3.sqlite_version_info < (3, 35, 0):
  99. try:
  100. # According to the Chroma team, this patch only works on Linux
  101. import datetime
  102. import subprocess
  103. import sys
  104. subprocess.check_call(
  105. [sys.executable, "-m", "pip", "install", "pysqlite3-binary", "--quiet", "--disable-pip-version-check"]
  106. )
  107. __import__("pysqlite3")
  108. sys.modules["sqlite3"] = sys.modules.pop("pysqlite3")
  109. # Let the user know what happened.
  110. current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S,%f")[:-3]
  111. print(
  112. f"{current_time} [embedchain] [INFO]",
  113. "Swapped std-lib sqlite3 with pysqlite3 for ChromaDb compatibility.",
  114. f"Your original version was {sqlite3.sqlite_version}.",
  115. )
  116. except Exception as e:
  117. # Escape all exceptions
  118. current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S,%f")[:-3]
  119. print(
  120. f"{current_time} [embedchain] [ERROR]",
  121. "Failed to swap std-lib sqlite3 with pysqlite3 for ChromaDb compatibility.",
  122. "Error:",
  123. e,
  124. )
  125. def format_source(source: str, limit: int = 20) -> str:
  126. """
  127. Format a string to only take the first x and last x letters.
  128. This makes it easier to display a URL, keeping familiarity while ensuring a consistent length.
  129. If the string is too short, it is not sliced.
  130. """
  131. if len(source) > 2 * limit:
  132. return source[:limit] + "..." + source[-limit:]
  133. return source
  134. def detect_datatype(source: Any) -> DataType:
  135. """
  136. Automatically detect the datatype of the given source.
  137. :param source: the source to base the detection on
  138. :return: data_type string
  139. """
  140. from urllib.parse import urlparse
  141. import requests
  142. import yaml
  143. def is_openapi_yaml(yaml_content):
  144. # currently the following two fields are required in openapi spec yaml config
  145. return "openapi" in yaml_content and "info" in yaml_content
  146. try:
  147. if not isinstance(source, str):
  148. raise ValueError("Source is not a string and thus cannot be a URL.")
  149. url = urlparse(source)
  150. # Check if both scheme and netloc are present. Local file system URIs are acceptable too.
  151. if not all([url.scheme, url.netloc]) and url.scheme != "file":
  152. raise ValueError("Not a valid URL.")
  153. except ValueError:
  154. url = False
  155. formatted_source = format_source(str(source), 30)
  156. if url:
  157. from langchain.document_loaders.youtube import \
  158. ALLOWED_NETLOCK as YOUTUBE_ALLOWED_NETLOCS
  159. if url.netloc in YOUTUBE_ALLOWED_NETLOCS:
  160. logging.debug(f"Source of `{formatted_source}` detected as `youtube_video`.")
  161. return DataType.YOUTUBE_VIDEO
  162. if url.netloc in {"notion.so", "notion.site"}:
  163. logging.debug(f"Source of `{formatted_source}` detected as `notion`.")
  164. return DataType.NOTION
  165. if url.path.endswith(".pdf"):
  166. logging.debug(f"Source of `{formatted_source}` detected as `pdf_file`.")
  167. return DataType.PDF_FILE
  168. if url.path.endswith(".xml"):
  169. logging.debug(f"Source of `{formatted_source}` detected as `sitemap`.")
  170. return DataType.SITEMAP
  171. if url.path.endswith(".csv"):
  172. logging.debug(f"Source of `{formatted_source}` detected as `csv`.")
  173. return DataType.CSV
  174. if url.path.endswith(".docx"):
  175. logging.debug(f"Source of `{formatted_source}` detected as `docx`.")
  176. return DataType.DOCX
  177. if url.path.endswith(".yaml"):
  178. try:
  179. response = requests.get(source)
  180. response.raise_for_status()
  181. try:
  182. yaml_content = yaml.safe_load(response.text)
  183. except yaml.YAMLError as exc:
  184. logging.error(f"Error parsing YAML: {exc}")
  185. raise TypeError(f"Not a valid data type. Error loading YAML: {exc}")
  186. if is_openapi_yaml(yaml_content):
  187. logging.debug(f"Source of `{formatted_source}` detected as `openapi`.")
  188. return DataType.OPENAPI
  189. else:
  190. logging.error(
  191. f"Source of `{formatted_source}` does not contain all the required \
  192. fields of OpenAPI yaml. Check 'https://spec.openapis.org/oas/v3.1.0'"
  193. )
  194. raise TypeError(
  195. "Not a valid data type. Check 'https://spec.openapis.org/oas/v3.1.0', \
  196. make sure you have all the required fields in YAML config data"
  197. )
  198. except requests.exceptions.RequestException as e:
  199. logging.error(f"Error fetching URL {formatted_source}: {e}")
  200. if url.path.endswith(".json"):
  201. logging.debug(f"Source of `{formatted_source}` detected as `json_file`.")
  202. return DataType.JSON
  203. if "docs" in url.netloc or ("docs" in url.path and url.scheme != "file"):
  204. # `docs_site` detection via path is not accepted for local filesystem URIs,
  205. # because that would mean all paths that contain `docs` are now doc sites, which is too aggressive.
  206. logging.debug(f"Source of `{formatted_source}` detected as `docs_site`.")
  207. return DataType.DOCS_SITE
  208. if "github.com" in url.netloc:
  209. logging.debug(f"Source of `{formatted_source}` detected as `github`.")
  210. return DataType.GITHUB
  211. # If none of the above conditions are met, it's a general web page
  212. logging.debug(f"Source of `{formatted_source}` detected as `web_page`.")
  213. return DataType.WEB_PAGE
  214. elif not isinstance(source, str):
  215. # For datatypes where source is not a string.
  216. if isinstance(source, tuple) and len(source) == 2 and isinstance(source[0], str) and isinstance(source[1], str):
  217. logging.debug(f"Source of `{formatted_source}` detected as `qna_pair`.")
  218. return DataType.QNA_PAIR
  219. # Raise an error if it isn't a string and also not a valid non-string type (one of the previous).
  220. # We could stringify it, but it is better to raise an error and let the user decide how they want to do that.
  221. raise TypeError(
  222. "Source is not a string and a valid non-string type could not be detected. If you want to embed it, please stringify it, for instance by using `str(source)` or `(', ').join(source)`." # noqa: E501
  223. )
  224. elif os.path.isfile(source):
  225. # For datatypes that support conventional file references.
  226. # Note: checking for string is not necessary anymore.
  227. if source.endswith(".docx"):
  228. logging.debug(f"Source of `{formatted_source}` detected as `docx`.")
  229. return DataType.DOCX
  230. if source.endswith(".csv"):
  231. logging.debug(f"Source of `{formatted_source}` detected as `csv`.")
  232. return DataType.CSV
  233. if source.endswith(".xml"):
  234. logging.debug(f"Source of `{formatted_source}` detected as `xml`.")
  235. return DataType.XML
  236. if source.endswith(".yaml"):
  237. with open(source, "r") as file:
  238. yaml_content = yaml.safe_load(file)
  239. if is_openapi_yaml(yaml_content):
  240. logging.debug(f"Source of `{formatted_source}` detected as `openapi`.")
  241. return DataType.OPENAPI
  242. else:
  243. logging.error(
  244. f"Source of `{formatted_source}` does not contain all the required \
  245. fields of OpenAPI yaml. Check 'https://spec.openapis.org/oas/v3.1.0'"
  246. )
  247. raise ValueError(
  248. "Invalid YAML data. Check 'https://spec.openapis.org/oas/v3.1.0', \
  249. make sure to add all the required params"
  250. )
  251. if source.endswith(".json"):
  252. logging.debug(f"Source of `{formatted_source}` detected as `json`.")
  253. return DataType.JSON
  254. # If the source is a valid file, that's not detectable as a type, an error is raised.
  255. # It does not fallback to text.
  256. raise ValueError(
  257. "Source points to a valid file, but based on the filename, no `data_type` can be detected. Please be aware, that not all data_types allow conventional file references, some require the use of the `file URI scheme`. Please refer to the embedchain documentation (https://docs.embedchain.ai/advanced/data_types#remote-data-types)." # noqa: E501
  258. )
  259. else:
  260. # Source is not a URL.
  261. # TODO: check if source is gmail query
  262. # check if the source is valid json string
  263. if is_valid_json_string(source):
  264. logging.debug(f"Source of `{formatted_source}` detected as `json`.")
  265. return DataType.JSON
  266. # Use text as final fallback.
  267. logging.debug(f"Source of `{formatted_source}` detected as `text`.")
  268. return DataType.TEXT
  269. # check if the source is valid json string
  270. def is_valid_json_string(source: str):
  271. try:
  272. _ = json.loads(source)
  273. return True
  274. except json.JSONDecodeError:
  275. logging.error(
  276. "Insert valid string format of JSON. \
  277. Check the docs to see the supported formats - `https://docs.embedchain.ai/data-sources/json`"
  278. )
  279. return False
  280. def validate_yaml_config(config_data):
  281. schema = Schema(
  282. {
  283. Optional("app"): {
  284. Optional("config"): {
  285. Optional("id"): str,
  286. Optional("name"): str,
  287. Optional("log_level"): Or("DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"),
  288. Optional("collect_metrics"): bool,
  289. Optional("collection_name"): str,
  290. }
  291. },
  292. Optional("llm"): {
  293. Optional("provider"): Or(
  294. "openai",
  295. "azure_openai",
  296. "anthropic",
  297. "huggingface",
  298. "cohere",
  299. "gpt4all",
  300. "jina",
  301. "llama2",
  302. "vertexai",
  303. ),
  304. Optional("config"): {
  305. Optional("model"): str,
  306. Optional("number_documents"): int,
  307. Optional("temperature"): float,
  308. Optional("max_tokens"): int,
  309. Optional("top_p"): Or(float, int),
  310. Optional("stream"): bool,
  311. Optional("template"): str,
  312. Optional("system_prompt"): str,
  313. Optional("deployment_name"): str,
  314. Optional("where"): dict,
  315. Optional("query_type"): str,
  316. },
  317. },
  318. Optional("vectordb"): {
  319. Optional("provider"): Or(
  320. "chroma", "elasticsearch", "opensearch", "pinecone", "qdrant", "weaviate", "zilliz"
  321. ),
  322. Optional("config"): object, # TODO: add particular config schema for each provider
  323. },
  324. Optional("embedder"): {
  325. Optional("provider"): Or("openai", "gpt4all", "huggingface", "vertexai", "azure_openai"),
  326. Optional("config"): {
  327. Optional("model"): Optional(str),
  328. Optional("deployment_name"): Optional(str),
  329. },
  330. },
  331. Optional("embedding_model"): {
  332. Optional("provider"): Or("openai", "gpt4all", "huggingface", "vertexai", "azure_openai"),
  333. Optional("config"): {
  334. Optional("model"): str,
  335. Optional("deployment_name"): str,
  336. },
  337. },
  338. Optional("chunker"): {
  339. Optional("chunk_size"): int,
  340. Optional("chunk_overlap"): int,
  341. Optional("length_function"): str,
  342. },
  343. }
  344. )
  345. return schema.validate(config_data)