Просмотр исходного кода

[Refactor] Update dependencies and loaders (#1062)

Sidharth Mohanty 1 год назад
Родитель
Сommit
aee5bbb44b

+ 123 - 104
embedchain/loaders/gmail.py

@@ -1,123 +1,142 @@
+import base64
 import hashlib
 import logging
 import os
-import quopri
+from email import message_from_bytes
+from email.utils import parsedate_to_datetime
 from textwrap import dedent
+from typing import Dict, List, Optional
 
 from bs4 import BeautifulSoup
 
 try:
-    from llama_hub.gmail.base import GmailReader
+    from google.auth.transport.requests import Request
+    from google.oauth2.credentials import Credentials
+    from google_auth_oauthlib.flow import InstalledAppFlow
+    from googleapiclient.discovery import build
 except ImportError:
-    raise ImportError("Gmail requires extra dependencies. Install with `pip install embedchain[gmail]`") from None
+    raise ImportError(
+        'Gmail requires extra dependencies. Install with `pip install --upgrade "embedchain[gmail]"`'
+    ) from None
 
 from embedchain.loaders.base_loader import BaseLoader
 from embedchain.utils import clean_string
 
 
-def get_header(text: str, header: str) -> str:
-    start_string_position = text.find(header)
-    pos_start = text.find(":", start_string_position) + 1
-    pos_end = text.find("\n", pos_start)
-    header = text[pos_start:pos_end]
-    return header.strip()
+class GmailReader:
+    SCOPES = ["https://www.googleapis.com/auth/gmail.readonly"]
+
+    def __init__(self, query: str, service=None, results_per_page: int = 10):
+        self.query = query
+        self.service = service or self._initialize_service()
+        self.results_per_page = results_per_page
+
+    @staticmethod
+    def _initialize_service():
+        credentials = GmailReader._get_credentials()
+        return build("gmail", "v1", credentials=credentials)
+
+    @staticmethod
+    def _get_credentials():
+        if not os.path.exists("credentials.json"):
+            raise FileNotFoundError("Missing 'credentials.json'. Download it from your Google Developer account.")
+
+        creds = (
+            Credentials.from_authorized_user_file("token.json", GmailReader.SCOPES)
+            if os.path.exists("token.json")
+            else None
+        )
+
+        if not creds or not creds.valid:
+            if creds and creds.expired and creds.refresh_token:
+                creds.refresh(Request())
+            else:
+                flow = InstalledAppFlow.from_client_secrets_file("credentials.json", GmailReader.SCOPES)
+                creds = flow.run_local_server(port=8080)
+            with open("token.json", "w") as token:
+                token.write(creds.to_json())
+        return creds
+
+    def load_emails(self) -> List[Dict]:
+        response = self.service.users().messages().list(userId="me", q=self.query).execute()
+        messages = response.get("messages", [])
+
+        return [self._parse_email(self._get_email(message["id"])) for message in messages]
+
+    def _get_email(self, message_id: str):
+        raw_message = self.service.users().messages().get(userId="me", id=message_id, format="raw").execute()
+        return base64.urlsafe_b64decode(raw_message["raw"])
+
+    def _parse_email(self, raw_email) -> Dict:
+        mime_msg = message_from_bytes(raw_email)
+        return {
+            "subject": self._get_header(mime_msg, "Subject"),
+            "from": self._get_header(mime_msg, "From"),
+            "to": self._get_header(mime_msg, "To"),
+            "date": self._format_date(mime_msg),
+            "body": self._get_body(mime_msg),
+        }
+
+    @staticmethod
+    def _get_header(mime_msg, header_name: str) -> str:
+        return mime_msg.get(header_name, "")
+
+    @staticmethod
+    def _format_date(mime_msg) -> Optional[str]:
+        date_header = GmailReader._get_header(mime_msg, "Date")
+        return parsedate_to_datetime(date_header).isoformat() if date_header else None
+
+    @staticmethod
+    def _get_body(mime_msg) -> str:
+        def decode_payload(part):
+            charset = part.get_content_charset() or "utf-8"
+            try:
+                return part.get_payload(decode=True).decode(charset)
+            except UnicodeDecodeError:
+                return part.get_payload(decode=True).decode(charset, errors="replace")
+
+        if mime_msg.is_multipart():
+            for part in mime_msg.walk():
+                ctype = part.get_content_type()
+                cdispo = str(part.get("Content-Disposition"))
+
+                if ctype == "text/plain" and "attachment" not in cdispo:
+                    return decode_payload(part)
+                elif ctype == "text/html":
+                    return decode_payload(part)
+        else:
+            return decode_payload(mime_msg)
+
+        return ""
 
 
 class GmailLoader(BaseLoader):
-    def load_data(self, query):
-        """Load data from gmail."""
-        if not os.path.isfile("credentials.json"):
-            raise FileNotFoundError(
-                "You must download the valid credentials file from your google \
-                dev account. Refer this `https://cloud.google.com/docs/authentication/api-keys`"
-            )
-
-        loader = GmailReader(query=query, service=None, results_per_page=20)
-        documents = loader.load_data()
-        logging.info(f"Gmail Loader: {len(documents)} mails found for query- {query}")
+    def load_data(self, query: str):
+        reader = GmailReader(query=query)
+        emails = reader.load_emails()
+        logging.info(f"Gmail Loader: {len(emails)} emails found for query '{query}'")
 
         data = []
-        data_contents = []
-        logging.info(f"Gmail Loader: {len(documents)} mails found")
-        for document in documents:
-            original_size = len(document.text)
-
-            snippet = document.metadata.get("snippet")
-            meta_data = {
-                "url": document.metadata.get("id"),
-                "date": get_header(document.text, "Date"),
-                "subject": get_header(document.text, "Subject"),
-                "from": get_header(document.text, "From"),
-                "to": get_header(document.text, "To"),
-                "search_query": query,
-            }
-
-            # Decode
-            decoded_bytes = quopri.decodestring(document.text)
-            decoded_str = decoded_bytes.decode("utf-8", errors="replace")
-
-            # Slice
-            mail_start = decoded_str.find("<!DOCTYPE")
-            email_data = decoded_str[mail_start:]
-
-            # Web Page HTML Processing
-            soup = BeautifulSoup(email_data, "html.parser")
-
-            tags_to_exclude = [
-                "nav",
-                "aside",
-                "form",
-                "header",
-                "noscript",
-                "svg",
-                "canvas",
-                "footer",
-                "script",
-                "style",
-            ]
-
-            for tag in soup(tags_to_exclude):
-                tag.decompose()
-
-            ids_to_exclude = ["sidebar", "main-navigation", "menu-main-menu"]
-            for id in ids_to_exclude:
-                tags = soup.find_all(id=id)
-                for tag in tags:
-                    tag.decompose()
-
-            classes_to_exclude = [
-                "elementor-location-header",
-                "navbar-header",
-                "nav",
-                "header-sidebar-wrapper",
-                "blog-sidebar-wrapper",
-                "related-posts",
-            ]
-
-            for class_name in classes_to_exclude:
-                tags = soup.find_all(class_=class_name)
-                for tag in tags:
-                    tag.decompose()
-
-            content = soup.get_text()
-            content = clean_string(content)
-
-            cleaned_size = len(content)
-            if original_size != 0:
-                logging.info(
-                    f"[{id}] Cleaned page size: {cleaned_size} characters, down from {original_size} (shrunk: {original_size-cleaned_size} chars, {round((1-(cleaned_size/original_size)) * 100, 2)}%)"  # noqa:E501
-                )
-
-            result = f"""
-            email from '{meta_data.get('from')}' to '{meta_data.get('to')}'
-            subject: {meta_data.get('subject')}
-            date: {meta_data.get('date')}
-            preview: {snippet}
-            content: f{content}
-            """
-            data_content = dedent(result)
-            data.append({"content": data_content, "meta_data": meta_data})
-            data_contents.append(data_content)
-        doc_id = hashlib.sha256((query + ", ".join(data_contents)).encode()).hexdigest()
-        response_data = {"doc_id": doc_id, "data": data}
-        return response_data
+        for email in emails:
+            content = self._process_email(email)
+            data.append({"content": content, "meta_data": email})
+
+        return {"doc_id": self._generate_doc_id(query, data), "data": data}
+
+    @staticmethod
+    def _process_email(email: Dict) -> str:
+        content = BeautifulSoup(email["body"], "html.parser").get_text()
+        content = clean_string(content)
+        return dedent(
+            f"""
+            Email from '{email['from']}' to '{email['to']}'
+            Subject: {email['subject']}
+            Date: {email['date']}
+            Content: {content}
+        """
+        )
+
+    @staticmethod
+    def _generate_doc_id(query: str, data: List[Dict]) -> str:
+        content_strings = [email["content"] for email in data]
+        return hashlib.sha256((query + ", ".join(content_strings)).encode()).hexdigest()

+ 30 - 16
embedchain/loaders/json.py

@@ -2,29 +2,43 @@ import hashlib
 import json
 import os
 import re
+from typing import Dict, List, Union
 
 import requests
 
 from embedchain.loaders.base_loader import BaseLoader
 from embedchain.utils import clean_string, is_valid_json_string
 
-VALID_URL_PATTERN = "^https:\/\/[0-9A-z.]+.[0-9A-z.]+.[a-z]+\/.*\.json$"
 
+class JSONReader:
+    def __init__(self) -> None:
+        """Initialize the JSONReader."""
+        pass
+
+    def load_data(self, json_data: Union[Dict, str]) -> List[str]:
+        """Load data from a JSON structure.
+
+        Args:
+            json_data (Union[Dict, str]): The JSON data to load.
+
+        Returns:
+            List[str]: A list of strings representing the leaf nodes of the JSON.
+        """
+        if isinstance(json_data, str):
+            json_data = json.loads(json_data)
+        else:
+            json_data = json_data
+
+        json_output = json.dumps(json_data, indent=0)
+        lines = json_output.split("\n")
+        useful_lines = [line for line in lines if not re.match(r"^[{}\[\],]*$", line)]
+        return ["\n".join(useful_lines)]
 
-class JSONLoader(BaseLoader):
-    @staticmethod
-    def _get_llama_hub_loader():
-        try:
-            from llama_hub.jsondata.base import \
-                JSONDataReader as LLHUBJSONLoader
-        except ImportError as e:
-            raise Exception(
-                f"Failed to install required packages: {e}, \
-                install them using `pip install --upgrade 'embedchain[json]`"
-            )
 
-        return LLHUBJSONLoader()
+VALID_URL_PATTERN = "^https:\/\/[0-9A-z.]+.[0-9A-z.]+.[a-z]+\/.*\.json$"
+
 
+class JSONLoader(BaseLoader):
     @staticmethod
     def _check_content(content):
         if not isinstance(content, str):
@@ -40,14 +54,13 @@ class JSONLoader(BaseLoader):
         """Load a json file. Each data point is a key value pair."""
 
         JSONLoader._check_content(content)
-        loader = JSONLoader._get_llama_hub_loader()
+        loader = JSONReader()
 
         data = []
         data_content = []
 
         content_url_str = content
 
-        # Load json data from various sources.
         if os.path.isfile(content):
             with open(content, "r", encoding="utf-8") as json_file:
                 json_data = json.load(json_file)
@@ -68,7 +81,8 @@ class JSONLoader(BaseLoader):
 
         docs = loader.load_data(json_data)
         for doc in docs:
-            doc_content = clean_string(doc.text)
+            text = doc if isinstance(doc, str) else doc["text"]
+            doc_content = clean_string(text)
             data.append({"content": doc_content, "meta_data": {"url": content_url_str}})
             data_content.append(doc_content)
 

+ 85 - 13
embedchain/loaders/notion.py

@@ -1,39 +1,111 @@
 import hashlib
 import logging
 import os
+from typing import Any, Dict, List, Optional
 
-try:
-    from llama_hub.notion.base import NotionPageReader
-except ImportError:
-    raise ImportError(
-        "Notion requires extra dependencies. Install with `pip install --upgrade embedchain[community]`"
-    ) from None
-
+import requests
 
 from embedchain.helpers.json_serializable import register_deserializable
 from embedchain.loaders.base_loader import BaseLoader
 from embedchain.utils import clean_string
 
 
+class NotionDocument:
+    """
+    A simple Document class to hold the text and additional information of a page.
+    """
+
+    def __init__(self, text: str, extra_info: Dict[str, Any]):
+        self.text = text
+        self.extra_info = extra_info
+
+
+class NotionPageLoader:
+    """
+    Notion Page Loader.
+    Reads a set of Notion pages.
+    """
+
+    BLOCK_CHILD_URL_TMPL = "https://api.notion.com/v1/blocks/{block_id}/children"
+
+    def __init__(self, integration_token: Optional[str] = None) -> None:
+        """Initialize with Notion integration token."""
+        if integration_token is None:
+            integration_token = os.getenv("NOTION_INTEGRATION_TOKEN")
+            if integration_token is None:
+                raise ValueError(
+                    "Must specify `integration_token` or set environment " "variable `NOTION_INTEGRATION_TOKEN`."
+                )
+        self.token = integration_token
+        self.headers = {
+            "Authorization": "Bearer " + self.token,
+            "Content-Type": "application/json",
+            "Notion-Version": "2022-06-28",
+        }
+
+    def _read_block(self, block_id: str, num_tabs: int = 0) -> str:
+        """Read a block from Notion."""
+        done = False
+        result_lines_arr = []
+        cur_block_id = block_id
+        while not done:
+            block_url = self.BLOCK_CHILD_URL_TMPL.format(block_id=cur_block_id)
+            res = requests.get(block_url, headers=self.headers)
+            data = res.json()
+
+            for result in data["results"]:
+                result_type = result["type"]
+                result_obj = result[result_type]
+
+                cur_result_text_arr = []
+                if "rich_text" in result_obj:
+                    for rich_text in result_obj["rich_text"]:
+                        if "text" in rich_text:
+                            text = rich_text["text"]["content"]
+                            prefix = "\t" * num_tabs
+                            cur_result_text_arr.append(prefix + text)
+
+                result_block_id = result["id"]
+                has_children = result["has_children"]
+                if has_children:
+                    children_text = self._read_block(result_block_id, num_tabs=num_tabs + 1)
+                    cur_result_text_arr.append(children_text)
+
+                cur_result_text = "\n".join(cur_result_text_arr)
+                result_lines_arr.append(cur_result_text)
+
+            if data["next_cursor"] is None:
+                done = True
+            else:
+                cur_block_id = data["next_cursor"]
+
+        result_lines = "\n".join(result_lines_arr)
+        return result_lines
+
+    def load_data(self, page_ids: List[str]) -> List[NotionDocument]:
+        """Load data from the given list of page IDs."""
+        docs = []
+        for page_id in page_ids:
+            page_text = self._read_block(page_id)
+            docs.append(NotionDocument(text=page_text, extra_info={"page_id": page_id}))
+        return docs
+
+
 @register_deserializable
 class NotionLoader(BaseLoader):
     def load_data(self, source):
-        """Load data from a PDF file."""
+        """Load data from a Notion URL."""
 
-        # Reformat Id to match notion expectation
         id = source[-32:]
         formatted_id = f"{id[:8]}-{id[8:12]}-{id[12:16]}-{id[16:20]}-{id[20:]}"
         logging.debug(f"Extracted notion page id as: {formatted_id}")
 
-        # Get page through the notion api
         integration_token = os.getenv("NOTION_INTEGRATION_TOKEN")
-        reader = NotionPageReader(integration_token=integration_token)
+        reader = NotionPageLoader(integration_token=integration_token)
         documents = reader.load_data(page_ids=[formatted_id])
 
-        # Extract text
         raw_text = documents[0].text
 
-        # Clean text
         text = clean_string(raw_text)
         doc_id = hashlib.sha256((text + source).encode()).hexdigest()
         return {

+ 84 - 145
poetry.lock

@@ -231,27 +231,6 @@ files = [
     {file = "async_timeout-4.0.3-py3-none-any.whl", hash = "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028"},
 ]
 
-[[package]]
-name = "atlassian-python-api"
-version = "3.41.2"
-description = "Python Atlassian REST API Wrapper"
-optional = true
-python-versions = "*"
-files = [
-    {file = "atlassian-python-api-3.41.2.tar.gz", hash = "sha256:a2022977da5a395412ace8e29c2c541312f07d45fc750435dec036af53daceda"},
-    {file = "atlassian_python_api-3.41.2-py3-none-any.whl", hash = "sha256:27c2361a22ee8cc69988f67a591488cbfce09e5f284da000011af11944d2bc96"},
-]
-
-[package.dependencies]
-deprecated = "*"
-oauthlib = "*"
-requests = "*"
-requests-oauthlib = "*"
-six = "*"
-
-[package.extras]
-kerberos = ["requests-kerberos"]
-
 [[package]]
 name = "attrs"
 version = "23.1.0"
@@ -1816,13 +1795,13 @@ protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4
 
 [[package]]
 name = "google-api-core"
-version = "2.12.0"
+version = "2.15.0"
 description = "Google API client core library"
 optional = true
 python-versions = ">=3.7"
 files = [
-    {file = "google-api-core-2.12.0.tar.gz", hash = "sha256:c22e01b1e3c4dcd90998494879612c38d0a3411d1f7b679eb89e2abe3ce1f553"},
-    {file = "google_api_core-2.12.0-py3-none-any.whl", hash = "sha256:ec6054f7d64ad13b41e43d96f735acbd763b0f3b695dabaa2d579673f6a6e160"},
+    {file = "google-api-core-2.15.0.tar.gz", hash = "sha256:abc978a72658f14a2df1e5e12532effe40f94f868f6e23d95133bd6abcca35ca"},
+    {file = "google_api_core-2.15.0-py3-none-any.whl", hash = "sha256:2aa56d2be495551e66bbff7f729b790546f87d5c90e74781aa77233bcb395a8a"},
 ]
 
 [package.dependencies]
@@ -1844,15 +1823,33 @@ grpc = ["grpcio (>=1.33.2,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "grpcio-status
 grpcgcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"]
 grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"]
 
+[[package]]
+name = "google-api-python-client"
+version = "2.111.0"
+description = "Google API Client Library for Python"
+optional = true
+python-versions = ">=3.7"
+files = [
+    {file = "google-api-python-client-2.111.0.tar.gz", hash = "sha256:3a45a53c031478d1c82c7162dd25c9a965247bca6bd438af0838a9d9b8219405"},
+    {file = "google_api_python_client-2.111.0-py2.py3-none-any.whl", hash = "sha256:b605adee2d09a843b97a59925757802904679e44e5599708cedb8939900dfbc7"},
+]
+
+[package.dependencies]
+google-api-core = ">=1.31.5,<2.0.dev0 || >2.3.0,<3.0.0.dev0"
+google-auth = ">=1.19.0,<3.0.0.dev0"
+google-auth-httplib2 = ">=0.1.0"
+httplib2 = ">=0.15.0,<1.dev0"
+uritemplate = ">=3.0.1,<5"
+
 [[package]]
 name = "google-auth"
-version = "2.23.3"
+version = "2.25.2"
 description = "Google Authentication Library"
 optional = false
 python-versions = ">=3.7"
 files = [
-    {file = "google-auth-2.23.3.tar.gz", hash = "sha256:6864247895eea5d13b9c57c9e03abb49cb94ce2dc7c58e91cba3248c7477c9e3"},
-    {file = "google_auth-2.23.3-py2.py3-none-any.whl", hash = "sha256:a8f4608e65c244ead9e0538f181a96c6e11199ec114d41f1d7b1bffa96937bda"},
+    {file = "google-auth-2.25.2.tar.gz", hash = "sha256:42f707937feb4f5e5a39e6c4f343a17300a459aaf03141457ba505812841cc40"},
+    {file = "google_auth-2.25.2-py2.py3-none-any.whl", hash = "sha256:473a8dfd0135f75bb79d878436e568f2695dce456764bf3a02b6f8c540b1d256"},
 ]
 
 [package.dependencies]
@@ -1867,6 +1864,39 @@ pyopenssl = ["cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"]
 reauth = ["pyu2f (>=0.1.5)"]
 requests = ["requests (>=2.20.0,<3.0.0.dev0)"]
 
+[[package]]
+name = "google-auth-httplib2"
+version = "0.2.0"
+description = "Google Authentication Library: httplib2 transport"
+optional = true
+python-versions = "*"
+files = [
+    {file = "google-auth-httplib2-0.2.0.tar.gz", hash = "sha256:38aa7badf48f974f1eb9861794e9c0cb2a0511a4ec0679b1f886d108f5640e05"},
+    {file = "google_auth_httplib2-0.2.0-py2.py3-none-any.whl", hash = "sha256:b65a0a2123300dd71281a7bf6e64d65a0759287df52729bdd1ae2e47dc311a3d"},
+]
+
+[package.dependencies]
+google-auth = "*"
+httplib2 = ">=0.19.0"
+
+[[package]]
+name = "google-auth-oauthlib"
+version = "1.2.0"
+description = "Google Authentication Library"
+optional = true
+python-versions = ">=3.6"
+files = [
+    {file = "google-auth-oauthlib-1.2.0.tar.gz", hash = "sha256:292d2d3783349f2b0734a0a0207b1e1e322ac193c2c09d8f7c613fb7cc501ea8"},
+    {file = "google_auth_oauthlib-1.2.0-py2.py3-none-any.whl", hash = "sha256:297c1ce4cb13a99b5834c74a1fe03252e1e499716718b190f56bcb9c4abc4faf"},
+]
+
+[package.dependencies]
+google-auth = ">=2.15.0"
+requests-oauthlib = ">=0.7.0"
+
+[package.extras]
+tool = ["click (>=6.0.0)"]
+
 [[package]]
 name = "google-cloud-aiplatform"
 version = "1.35.0"
@@ -2450,17 +2480,6 @@ files = [
     {file = "hpack-4.0.0.tar.gz", hash = "sha256:fc41de0c63e687ebffde81187a948221294896f6bdc0ae2312708df339430095"},
 ]
 
-[[package]]
-name = "html2text"
-version = "2020.1.16"
-description = "Turn HTML into equivalent Markdown-structured text."
-optional = true
-python-versions = ">=3.5"
-files = [
-    {file = "html2text-2020.1.16-py3-none-any.whl", hash = "sha256:c7c629882da0cf377d66f073329ccf34a12ed2adf0169b9285ae4e63ef54c82b"},
-    {file = "html2text-2020.1.16.tar.gz", hash = "sha256:e296318e16b059ddb97f7a8a1d6a5c1d7af4544049a01e261731d2d5cc277bbb"},
-]
-
 [[package]]
 name = "httpcore"
 version = "0.18.0"
@@ -2482,6 +2501,20 @@ sniffio = "==1.*"
 http2 = ["h2 (>=3,<5)"]
 socks = ["socksio (==1.*)"]
 
+[[package]]
+name = "httplib2"
+version = "0.22.0"
+description = "A comprehensive HTTP client library."
+optional = true
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+files = [
+    {file = "httplib2-0.22.0-py3-none-any.whl", hash = "sha256:14ae0a53c1ba8f3d37e9e27cf37eabb0fb9980f435ba405d546948b009dd64dc"},
+    {file = "httplib2-0.22.0.tar.gz", hash = "sha256:d7a10bc5ef5ab08322488bde8c726eeee5c8618723fdb399597ec58f3d82df81"},
+]
+
+[package.dependencies]
+pyparsing = {version = ">=2.4.2,<3.0.0 || >3.0.0,<3.0.1 || >3.0.1,<3.0.2 || >3.0.2,<3.0.3 || >3.0.3,<4", markers = "python_version > \"3.0\""}
+
 [[package]]
 name = "httptools"
 version = "0.6.0"
@@ -3074,58 +3107,6 @@ files = [
     {file = "lit-17.0.2.tar.gz", hash = "sha256:d6a551eab550f81023c82a260cd484d63970d2be9fd7588111208e7d2ff62212"},
 ]
 
-[[package]]
-name = "llama-hub"
-version = "0.0.43"
-description = "A library of community-driven data loaders for LLMs. Use with LlamaIndex and/or LangChain. "
-optional = true
-python-versions = ">=3.8.1,<4.0"
-files = [
-    {file = "llama_hub-0.0.43-py3-none-any.whl", hash = "sha256:6dbf1261f75e97de7f086d1ca3258db39f530526382d347f9523fcbb472f72c9"},
-    {file = "llama_hub-0.0.43.tar.gz", hash = "sha256:82b4405d8f20f9538621d0324aaaeeba8945c0eca4cd8998d67c3140bb7cbd05"},
-]
-
-[package.dependencies]
-atlassian-python-api = "*"
-html2text = "*"
-llama-index = ">=0.6.9"
-psutil = "*"
-retrying = "*"
-
-[[package]]
-name = "llama-index"
-version = "0.8.65"
-description = "Interface between LLMs and your data"
-optional = true
-python-versions = ">=3.8.1,<3.12"
-files = [
-    {file = "llama_index-0.8.65-py3-none-any.whl", hash = "sha256:0bdb32a33e846b3b7517cb4a39c21768f1cf263b2b6a111e0405b2eabe6bac01"},
-    {file = "llama_index-0.8.65.tar.gz", hash = "sha256:826b824aba2ea4a14369f7840df3975d4e1af068bc9e8770d2d7f392db5eb873"},
-]
-
-[package.dependencies]
-aiostream = ">=0.5.2,<0.6.0"
-dataclasses-json = ">=0.5.7,<0.6.0"
-deprecated = ">=1.2.9.3"
-fsspec = ">=2023.5.0"
-langchain = ">=0.0.303"
-nest-asyncio = ">=1.5.8,<2.0.0"
-nltk = ">=3.8.1,<4.0.0"
-numpy = "*"
-openai = ">=1.1.0"
-pandas = "*"
-SQLAlchemy = {version = ">=1.4.49", extras = ["asyncio"]}
-tenacity = ">=8.2.0,<9.0.0"
-tiktoken = ">=0.3.3"
-typing-extensions = ">=4.5.0"
-typing-inspect = ">=0.8.0"
-urllib3 = "<2"
-
-[package.extras]
-local-models = ["optimum[onnxruntime] (>=1.13.2,<2.0.0)", "sentencepiece (>=0.1.99,<0.2.0)", "transformers[torch] (>=4.34.0,<5.0.0)"]
-postgres = ["asyncpg (>=0.28.0,<0.29.0)", "pgvector (>=0.1.0,<0.2.0)", "psycopg-binary (>=3.1.12,<4.0.0)"]
-query-tools = ["guidance (>=0.0.64,<0.0.65)", "jsonpath-ng (>=1.6.0,<2.0.0)", "lm-format-enforcer (>=0.4.3,<0.5.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "scikit-learn (<1.3.0)", "spacy (>=3.7.1,<4.0.0)"]
-
 [[package]]
 name = "loguru"
 version = "0.7.2"
@@ -3706,17 +3687,6 @@ dns-srv = ["dnspython (>=1.16.0,<=2.3.0)"]
 gssapi = ["gssapi (>=1.6.9,<=1.8.2)"]
 opentelemetry = ["Deprecated (>=1.2.6)", "typing-extensions (>=3.7.4)", "zipp (>=0.5)"]
 
-[[package]]
-name = "nest-asyncio"
-version = "1.5.8"
-description = "Patch asyncio to allow nested event loops"
-optional = true
-python-versions = ">=3.5"
-files = [
-    {file = "nest_asyncio-1.5.8-py3-none-any.whl", hash = "sha256:accda7a339a70599cb08f9dd09a67e0c2ef8d8d6f4c07f96ab203f2ae254e48d"},
-    {file = "nest_asyncio-1.5.8.tar.gz", hash = "sha256:25aa2ca0d2a5b5531956b9e273b45cf664cae2b145101d73b86b199978d48fdb"},
-]
-
 [[package]]
 name = "networkx"
 version = "3.1"
@@ -4694,32 +4664,6 @@ files = [
     {file = "protobuf-4.21.12.tar.gz", hash = "sha256:7cd532c4566d0e6feafecc1059d04c7915aec8e182d1cf7adee8b24ef1e2e6ab"},
 ]
 
-[[package]]
-name = "psutil"
-version = "5.9.5"
-description = "Cross-platform lib for process and system monitoring in Python."
-optional = true
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
-files = [
-    {file = "psutil-5.9.5-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:be8929ce4313f9f8146caad4272f6abb8bf99fc6cf59344a3167ecd74f4f203f"},
-    {file = "psutil-5.9.5-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:ab8ed1a1d77c95453db1ae00a3f9c50227ebd955437bcf2a574ba8adbf6a74d5"},
-    {file = "psutil-5.9.5-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:4aef137f3345082a3d3232187aeb4ac4ef959ba3d7c10c33dd73763fbc063da4"},
-    {file = "psutil-5.9.5-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:ea8518d152174e1249c4f2a1c89e3e6065941df2fa13a1ab45327716a23c2b48"},
-    {file = "psutil-5.9.5-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:acf2aef9391710afded549ff602b5887d7a2349831ae4c26be7c807c0a39fac4"},
-    {file = "psutil-5.9.5-cp27-none-win32.whl", hash = "sha256:5b9b8cb93f507e8dbaf22af6a2fd0ccbe8244bf30b1baad6b3954e935157ae3f"},
-    {file = "psutil-5.9.5-cp27-none-win_amd64.whl", hash = "sha256:8c5f7c5a052d1d567db4ddd231a9d27a74e8e4a9c3f44b1032762bd7b9fdcd42"},
-    {file = "psutil-5.9.5-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:3c6f686f4225553615612f6d9bc21f1c0e305f75d7d8454f9b46e901778e7217"},
-    {file = "psutil-5.9.5-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7a7dd9997128a0d928ed4fb2c2d57e5102bb6089027939f3b722f3a210f9a8da"},
-    {file = "psutil-5.9.5-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89518112647f1276b03ca97b65cc7f64ca587b1eb0278383017c2a0dcc26cbe4"},
-    {file = "psutil-5.9.5-cp36-abi3-win32.whl", hash = "sha256:104a5cc0e31baa2bcf67900be36acde157756b9c44017b86b2c049f11957887d"},
-    {file = "psutil-5.9.5-cp36-abi3-win_amd64.whl", hash = "sha256:b258c0c1c9d145a1d5ceffab1134441c4c5113b2417fafff7315a917a026c3c9"},
-    {file = "psutil-5.9.5-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:c607bb3b57dc779d55e1554846352b4e358c10fff3abf3514a7a6601beebdb30"},
-    {file = "psutil-5.9.5.tar.gz", hash = "sha256:5410638e4df39c54d957fc51ce03048acd8e6d60abc0f5107af51e5fb566eb3c"},
-]
-
-[package.extras]
-test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"]
-
 [[package]]
 name = "psycopg"
 version = "3.1.12"
@@ -5941,20 +5885,6 @@ urllib3 = ">=1.25.10,<3.0"
 [package.extras]
 tests = ["coverage (>=6.0.0)", "flake8", "mypy", "pytest (>=7.0.0)", "pytest-asyncio", "pytest-cov", "pytest-httpserver", "tomli", "tomli-w", "types-requests"]
 
-[[package]]
-name = "retrying"
-version = "1.3.4"
-description = "Retrying"
-optional = true
-python-versions = "*"
-files = [
-    {file = "retrying-1.3.4-py3-none-any.whl", hash = "sha256:8cc4d43cb8e1125e0ff3344e9de678fefd85db3b750b81b2240dc0183af37b35"},
-    {file = "retrying-1.3.4.tar.gz", hash = "sha256:345da8c5765bd982b1d1915deb9102fd3d1f7ad16bd84a9700b85f64d24e8f3e"},
-]
-
-[package.dependencies]
-six = ">=1.7.0"
-
 [[package]]
 name = "rich"
 version = "13.7.0"
@@ -7449,6 +7379,17 @@ files = [
 packaging = ">=21.3"
 Pillow = ">=8.0.0"
 
+[[package]]
+name = "uritemplate"
+version = "4.1.1"
+description = "Implementation of RFC 6570 URI Templates"
+optional = true
+python-versions = ">=3.6"
+files = [
+    {file = "uritemplate-4.1.1-py2.py3-none-any.whl", hash = "sha256:830c08b8d99bdd312ea4ead05994a38e8936266f84b9a7878232db50b044e02e"},
+    {file = "uritemplate-4.1.1.tar.gz", hash = "sha256:4346edfc5c3b79f694bccd6d6099a322bbeb628dbf2cd86eea55a456ce5124f0"},
+]
+
 [[package]]
 name = "urllib3"
 version = "1.26.17"
@@ -8089,17 +8030,15 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p
 
 [extras]
 cohere = ["cohere"]
-community = ["llama-hub"]
 dataloaders = ["docx2txt", "duckduckgo-search", "pytube", "sentence-transformers", "unstructured", "youtube-transcript-api"]
 discord = ["discord"]
 dropbox = ["dropbox"]
 elasticsearch = ["elasticsearch"]
 github = ["PyGithub", "gitpython"]
-gmail = ["llama-hub", "requests"]
+gmail = ["google-api-core", "google-api-python-client", "google-auth", "google-auth-httplib2", "google-auth-oauthlib", "requests"]
 google = ["google-generativeai"]
 huggingface-hub = ["huggingface_hub"]
 images = ["ftfy", "pillow", "regex", "torch", "torchvision"]
-json = ["llama-hub"]
 llama2 = ["replicate"]
 milvus = ["pymilvus"]
 modal = ["modal"]
@@ -8122,4 +8061,4 @@ youtube = ["youtube-transcript-api", "yt_dlp"]
 [metadata]
 lock-version = "2.0"
 python-versions = ">=3.9,<3.12"
-content-hash = "2d59f37483caac14b4c0e10331facdab174deba143480df53340756330214010"
+content-hash = "335c42c91a2b5e4a1c3d8a7c39dee8665fd1eee0410e1bc6cb6cb1d6f6722445"

+ 5 - 5
pyproject.toml

@@ -106,8 +106,6 @@ tiktoken = { version = "^0.4.0", optional = true }
 youtube-transcript-api = { version = "^0.6.1", optional = true }
 pytube = { version = "^15.0.0", optional = true }
 duckduckgo-search = { version = "^3.8.5", optional = true }
-llama-hub = { version = "^0.0.43", optional = true }
-llama-index = { version = "^0.8.65", optional = true }
 sentence-transformers = { version = "^2.2.2", optional = true }
 torch = { version = "2.0.0", optional = true }
 # Torch 2.0.1 is not compatible with poetry (https://github.com/pytorch/pytorch/issues/100974)
@@ -149,6 +147,11 @@ listparser = { version = "^0.19", optional = true }
 google-generativeai = { version = "^0.3.0", optional = true }
 modal = { version = "^0.56.4329", optional = true }
 dropbox = { version = "^11.36.2", optional = true }
+google-api-python-client = { version = "^2.111.0", optional = true }
+google-auth-oauthlib = { version = "^1.2.0", optional = true }
+google-auth = { version = "^2.25.2", optional = true }
+google-auth-httplib2 = { version = "^0.2.0", optional = true }
+google-api-core = { version = "^2.15.0", optional = true }
 
 [tool.poetry.group.dev.dependencies]
 black = "^23.3.0"
@@ -166,7 +169,6 @@ pytest-asyncio = "^0.21.1"
 
 [tool.poetry.extras]
 streamlit = ["streamlit"]
-community = ["llama-hub"]
 opensource = ["sentence-transformers", "torch", "gpt4all"]
 elasticsearch = ["elasticsearch"]
 opensearch = ["opensearch-py"]
@@ -193,7 +195,6 @@ dataloaders=[
 vertexai = ["google-cloud-aiplatform"]
 llama2 = ["replicate"]
 gmail = [
-    "llama-hub",
     "requests",
     "google-api-python-client",
     "google-auth",
@@ -201,7 +202,6 @@ gmail = [
     "google-auth-httplib2",
     "google-api-core",
 ]
-json = ["llama-hub"]
 postgres = ["psycopg", "psycopg-binary", "psycopg-pool"]
 mysql = ["mysql-connector-python"]
 github = ["PyGithub", "gitpython"]

+ 7 - 8
tests/loaders/test_gmail.py

@@ -1,21 +1,15 @@
 import pytest
-from llama_hub.readwise.base import Document
 
 from embedchain.loaders.gmail import GmailLoader
 
 
-@pytest.fixture
-def mock_quopri(mocker):
-    return mocker.patch("embedchain.loaders.gmail.quopri.decodestring", return_value=b"your_test_decoded_string")
-
-
 @pytest.fixture
 def mock_beautifulsoup(mocker):
     return mocker.patch("embedchain.loaders.gmail.BeautifulSoup", return_value=mocker.MagicMock())
 
 
 @pytest.fixture
-def gmail_loader(mock_quopri, mock_beautifulsoup):
+def gmail_loader(mock_beautifulsoup):
     return GmailLoader()
 
 
@@ -33,7 +27,12 @@ def test_load_data(gmail_loader, mocker):
         "id": "your_test_id",
         "snippet": "your_test_snippet",
     }
-    mock_gmail_reader_instance.load_data.return_value = [Document(text=text, extra_info=metadata)]
+    mock_gmail_reader_instance.load_data.return_value = [
+        {
+            "text": text,
+            "extra_info": metadata,
+        }
+    ]
 
     with mocker.patch("os.path.isfile", return_value=True):
         response_data = gmail_loader.load_data("your_query")

+ 18 - 5
tests/loaders/test_json.py

@@ -1,7 +1,6 @@
 import hashlib
 
 import pytest
-from llama_index.readers.schema.base import Document
 
 from embedchain.loaders.json import JSONLoader
 
@@ -42,8 +41,15 @@ def test_load_data_url(mocker):
 
     mocker.patch("os.path.isfile", return_value=False)
     mocker.patch(
-        "llama_hub.jsondata.base.JSONDataReader.load_data",
-        return_value=[Document(text="content1"), Document(text="content2")],
+        "embedchain.loaders.json.JSONReader.load_data",
+        return_value=[
+            {
+                "text": "content1",
+            },
+            {
+                "text": "content2",
+            },
+        ],
     )
 
     mock_response = mocker.Mock()
@@ -98,8 +104,15 @@ def test_load_data_from_json_string(mocker):
 
     mocker.patch("os.path.isfile", return_value=False)
     mocker.patch(
-        "llama_hub.jsondata.base.JSONDataReader.load_data",
-        return_value=[Document(text="content1"), Document(text="content2")],
+        "embedchain.loaders.json.JSONReader.load_data",
+        return_value=[
+            {
+                "text": "content1",
+            },
+            {
+                "text": "content2",
+            },
+        ],
     )
 
     result = JSONLoader.load_data(content)

+ 1 - 1
tests/loaders/test_notion.py

@@ -28,7 +28,7 @@ def test_load_data(notion_loader):
     mock_page.text = mock_text
     mock_documents = [mock_page]
 
-    with patch("embedchain.loaders.notion.NotionPageReader") as mock_reader:
+    with patch("embedchain.loaders.notion.NotionPageLoader") as mock_reader:
         mock_reader.return_value.load_data.return_value = mock_documents
         result = notion_loader.load_data(source)