# -*- coding: utf-8 -*- # @Author: privacy # @Date: 2024-06-11 13:43:14 # @Last Modified by: privacy # @Last Modified time: 2024-07-04 09:59:10 # import os # from PIL import Image # from PyPDF2 import PdfReader # # 读取PDF文件 # with open(pdf_path, 'rb') as file: # reader = PdfReader(file) # num_pages = len(reader.pages) # # 遍历PDF的每一页 # for page_num in range(num_pages): # page = reader.pages[page_num] # # 提取页面中的图像 # if '/XObject' in page['/Resources']: # xobjects = page['/Resources']['/XObject'].get_object() # for obj in xobjects: # if xobjects[obj]['/Subtype'] == '/Image': # size = (xobjects[obj]['/Width'], xobjects[obj]['/Height']) # data = xobjects[obj].get_data() # if xobjects[obj]['/ColorSpace'] == '/DeviceRGB': # mode = "RGB" # else: # mode = "P" # img = Image.frombytes(mode, size, data) # img_path = os.path.join(output_dir, f'image_{page_num}_{obj}.png') # img.save(img_path) # print(f'Image saved: {img_path}') ####################################################################### # import os # import re # import fitz # def pdf2pic(path, save_path): # checkXO = r"/Type(?= */XObject)" # checkIM = r"/Subtype(?= */Image)" # pdf = fitz.open(path) # lenXREF = pdf._getXrefLength() # imgcount = 0 # for i in range(1, lenXREF): # text = pdf._getXrefString(i) # isXObject = re.search(checkXO, text) # isImage = re.search(checkIM, text) # if not isXObject or not isImage: # continue # imgcount += 1 # pix = fitz.Pixmap(pdf, i) # new_name = f"img_{imgcount}.png" # if pix.n < 5: # pix.writePNG(os.path.join(pic_path, new_name)) # else: # pix0 = fitz.Pixmap(fitz.csRGB, pix) # pix0.writePNG(os.path.join(pic_path, new_name)) # pix0 = None # pix = None # if __name__ == '__main__': # pdf2pic(pdf_path, image_dir) ####################################################################### # 标准包导入 import os import re import json from io import BytesIO from pprint import pprint # 第三方包导入 import numpy as np import pandas as pd import cv2 from tqdm import tqdm from pdfminer.high_level import extract_pages from pdfminer.layout import LTRect, LTTextBoxHorizontal, LTLine, LTFigure, LTCurve, LTImage, LTChar from pdfminer.pdfcolor import LITERAL_DEVICE_CMYK from pdfminer.pdfcolor import LITERAL_DEVICE_GRAY from pdfminer.pdfcolor import LITERAL_DEVICE_RGB from pdfminer.pdftypes import ( LITERALS_DCT_DECODE, LITERALS_JBIG2_DECODE, LITERALS_JPX_DECODE, LITERALS_FLATE_DECODE, ) from pdfminer.pdfparser import PDFParser, PDFSyntaxError from pdfminer.pdfdocument import PDFDocument, PDFNoOutlines from pdfminer.image import BMPWriter import pdfplumber # 自定义包导入 from tools import RefPageNumberResolver HEADERS = set({'序号', '项目编码', '项目名称', '项目特征', '单位', '工程量', '全费用综合单价', '合价', '备注', '主材名称', '规格型号', '不低于下列同档次品牌', '投标选用品牌及规格型号', '名称', '事项', '数量', '含税单价(元)', '含税合价(元)', '条款号', '评分因素', '评分标准', '页码'}) PIL_ERROR_MESSAGE = "PIL导入错误" def load_json(data_path: str): try: with open(data_path, 'r', encoding='utf-8') as f: data = json.load(f) return data except FileNotFoundError: print(f"Error: The file '{data_path}' was not found.") return None except json.JSONDecodeError as e: print(f"Error decoding JSON from '{data_path}': {e}") return None except Exception as e: print(f"Error loading JSON from '{data_path}': {e}") return None # 定义函数is_title,用于判断输入字符line是否为标题 def is_title(line: str) -> bool: # 匹配 # 以中文括号或英文括号开头,中间是一到多个汉字数字,然后以括号闭合 # 以数字开头,后面紧跟一个点号 # 以10-19或20-29开头的数字,后面紧跟一个点号 # 以第字开头,后面跟着汉字数字(一到十)或阿拉伯数字,最后以章、节、条结尾 # 汉字数字(一到十),后面跟着一个顿号或"要是"两字 title_word = re.findall('^[(\(][一二三四五六七八九十]+[\))]|^\d\.|^1\d\.|^2\d\.|^[第][一二三四五六七八九十\d]+[章节条]|[一二三四五六七八九十]+[、要是]', line.strip()) # 如果找到了匹配的标题模式,则返回True表示这行文本是标题 if title_word: return True # 如果上述未匹配到,但满足以附录|参考文献|附表开头,也认定为标题,返回True title_word = re.findall('^附录|^参考文献|^附表', line.strip()) if title_word: return True # 上述均不满足,返回False return False # 定义函数export_image,用于将LTImage类型的对象保存到path路径下 def export_image(image: LTImage, path: str) -> str: """Save an LTImage to disk""" # 获取图像的宽度和高度信息 (width, height) = image.srcsize ### 检查图像的编码类型 # 获取图像流的过滤器信息 filters = image.stream.get_filters() # 如果图像只有一个过滤器且过滤器属于LITERALS_DCT_DECODE中的类型 # 则调用_save_jpeg函数将其保存为JPEG格式 if len(filters) == 1 and filters[0][0] in LITERALS_DCT_DECODE: name = _save_jpeg(image, path) return name # 如果只有一个过滤器且过滤器属于LITERALS_JPX_DECODE中的类型,则调用_save_jpeg2000函数保存为JPEG2000格式 elif len(filters) == 1 and filters[0][0] in LITERALS_JPX_DECODE: name = _save_jpeg2000(image, path) return name # elif image.bits == 1: # name = _save_bmp(image, width, height, (width + 7) // 8, image.bits, path) # elif image.bits == 8 and LITERAL_DEVICE_RGB in image.colorspace: # name = _save_bmp(image, width, height, width * 3, image.bits * 3, path) # elif image.bits == 8 and LITERAL_DEVICE_GRAY in image.colorspace: # name = _save_bmp(image, width, height, width, image.bits, path) # elif len(filters) == 1 and filters[0][0] in LITERALS_FLATE_DECODE: # name = _save_bytes(image) # else: # name = _save_raw(image) # 获取图像流的处理后数据 data = image.stream.get_data() # #获取图像流的原始数据 raw_data = image.stream.get_rawdata() # 如果data数据存在,根据不同文件头判断图像类型,保存到相应格式 if data: if data[:2] == b'\xff\xd8' and data[-2:] == b'\xff\xd9': path += '.jpg' elif data[:8] == b'\x89\x50\x4e\x47\x0d\x0a\x1a\x0a': path += '.png' elif data[:2] == b'\x42\x4d': path += '.bmp' elif data[:6] == b'\x47\x49\x46\x38\x37\x61' or data[:6] == b'\x47\x49\x46\x38\x39\x61': path += '.gif' elif data[:2] == b'\x4d\x4d' or data[:2] == b'\x49\x49': path += '.tiff' elif data[:8] == b'\xffO\xffQ\x00/\x00\x00': name = _save_j2k(image, path) return name else: path += '.unk' if os.path.exists(path): return path else: with open(path, 'wb') as file: file.write(data) return path # 如果data数据不存在,raw_data数据存在,根据不同文件头判断图像类型,保存到相应格式 elif raw_data: if raw_data[:2] == b'\xff\xd8' and raw_data[-2:] == b'\xff\xd9': path += '.jpg' elif raw_data[:8] == b'\x89\x50\x4e\x47\x0d\x0a\x1a\x0a': path += '.png' elif raw_data[:2] == b'\x42\x4d': path += '.bmp' elif raw_data[:6] == b'\x47\x49\x46\x38\x37\x61' or raw_data[:6] == b'\x47\x49\x46\x38\x39\x61': path += '.gif' elif raw_data[:2] == b'\x4d\x4d' or raw_data[:2] == b'\x49\x49': path += '.tiff' elif data[:8] == b'\xffO\xffQ\x00/\x00\x00': name = _save_j2k(image, path) return name else: path += '.unk' if os.path.exists(path): return path else: with open(path, 'wb') as file: file.write(data) return path # 均不存在则返回None else: return None def _save_j2k(image: LTImage, path: str) -> str: try: from PIL import Image except ImportError: raise ImportError(PIL_ERROR_MESSAGE) path = path + ".png" data = image.stream.get_data() assert data is not None byte_stream = BytesIO(data) roiImg = Image.open(byte_stream) roiImg.save(path) return path # _save_jpeg函数用于将给定的LTImage对象保存为JPEG格式图像文件 def _save_jpeg(image: LTImage, path: str) -> str: """Save a JPEG encoded image""" # 调用get_rawdata()从image对象的流中获取原始图像数据 raw_data = image.stream.get_rawdata() assert raw_data is not None # 将原始文件路径添加.jpg扩展名 path = path + ".jpg" if os.path.exists(path): return # 写入path with open(path, "wb") as fp: # 如果图像的颜色空间中包含LITERAL_DEVICE_CMYK # 表明图像是CMYK格式,需要先将其转换为RGB格式后再保存为JPEG格式 if LITERAL_DEVICE_CMYK in image.colorspace: try: from PIL import Image, ImageChops # type: ignore[import] except ImportError: raise ImportError(PIL_ERROR_MESSAGE) # 通过BytesIO(raw_data)将raw_data加载到内存中,方便后续处理 ifp = BytesIO(raw_data) # 使用PIL打开图像 i = Image.open(ifp) # 反转图像颜色 i = ImageChops.invert(i) # 将图像转换为RGB格式 # 这是由于JPEG格式通常不支持CMYK色彩模式,需要先转换为RGB才能保存为JPEG格式 i = i.convert("RGB") # 将经过处理的图像i保存为JPEG格式存入指定文件对象fp中 i.save(fp, "JPEG") # 如果图像不是CMYK格式,则直接写入 else: fp.write(raw_data) # 返回写入文件的路径 return path def _save_jpeg2000(image: LTImage, path: str) -> str: """Save a JPEG 2000 encoded image""" # 调用get_rawdata()从image对象的流中获取原始图像数据 raw_data = image.stream.get_rawdata() assert raw_data is not None # 将原始文件路径添加.png扩展名 path = path + ".png" if os.path.exists(path): return try: from PIL import Image # type: ignore[import] except ImportError: raise ImportError(PIL_ERROR_MESSAGE) # 如果我们只写原始数据,我尝试过的大多数图像程序都无法打开文件。 # 然而,使用OpenCV2打开和保存会生成一个文件,该文件似乎很容易被其他程序打开 # 通过BytesIO(raw_data)将raw_data加载到内存中,方便后续处理 try: ifp = BytesIO(raw_data) # 使用PIL打开图像 i = Image.open(ifp) # 在PIL中,图像格式为RGB # 在Opencv中,图像格式为BGR # cv2.cvtColor函数用于颜色空间转换,将RGB格式转化为BGR格式 opencv_image = cv2.cvtColor(np.array(i), cv2.COLOR_RGB2BGR) cv2.imwrite(path, opencv_image) except ValueError as e: pprint(f'Error processing image: {e}') except OSError as e: pprint(f'Error processing image: {e}') return path # 用于将BMP编码的图像保存到指定路径,并返回该路径 def _save_bmp(image: LTImage, width: int, height: int, bytes_per_line: int, bits: int, path: str) -> str: """Save a BMP encoded image""" # 获取原始数据 data = image.stream.get_data() # 将原始文件路径添加.bmp扩展名 path = path + ".bmp" # 打开文件路径,写入data with open(path, "wb") as fp: fp.write(data) return path # main_parse函数用于: # 解析pdf文件 # 提取文本和图片信息 # 将标题信息保存为json文件 def main_parse(pdf_path: str, title_path: str, image_dir: str) -> None: # 用于存储文本和图像 texts = [] images = [] # 读取PDF文件并提取页面 # 调用pdfminer中的extract_page函数提取每一页的页面布局page_layout for page_number, page_layout in enumerate(extract_pages(pdf_path)): title_index = 0 image_index = 0 # 遍历页面布局中的每一个元素 for element in page_layout: # 如果元素为直线,pass不处理 if isinstance(element, LTLine): pass # 如果元素为矩形,pass不处理 elif isinstance(element, LTRect): pass # 如果元素为LTTextBoxHorizontal且包含一个对象 # 则提取文本并判断该文本是否为标题 # 如果当前文本是标题,则将标题信息以字典形式存储在texts列表中 # 标题信息包括: # 标题索引(title_index) # 页面序号(page_number) # 文本框边界框(element.bbox) # 标题文本信息(text) elif isinstance(element, LTTextBoxHorizontal) and len(element._objs) == 1: # 提取文本 text = element.get_text().strip() # # 假设标题通常是一行且字体较大 if text and (is_title(text) or element.height > 15): texts.append({'index': title_index, 'page_number': page_number, 'bbox': element.bbox, 'text': text}) title_index += 1 # 如果元素为LTFigure,则遍历其内部的对象 # 如果该对象是LTFigure,则提取出图片信息并保存在本地 elif isinstance(element, LTFigure): for e_obj in element._objs: if isinstance(e_obj, LTImage): # 提取图片数据 image_file = os.path.join(image_dir, f'image_page_{page_number}_{image_index}') image_file = export_image(e_obj, image_file) images.append(image_file) pprint(f'Image saved: {image_file}') image_index += 1 # 最终将标题信息保存为本地的json文件 with open(title_path, 'w', encoding='utf-8') as fp: json.dump(texts, fp, indent=4, ensure_ascii=False) # table_parse函数用于从pdf文件中解析表格,根据表格内容和布局特征将其结构化存储为json格式 def table_parse(pdf_path: str, title_path: str, start_title: str = '六、已标价工程量清单', end_title: str = '七、施工组织设计', table_path: str = 'table.json', start_page_number: int = None, end_page_number: int = None) -> list: """pdf表格解析功能 @pdf_path @title_path @start_title @end_title @table_path @start_page_number @end_page_number """ # pdf_path;要解析的pdf文件路径 # title_path:包含标题信息的json文件路径 # start_title / end_title:用于标识需要解析的pdf页范围的起始和结束标题 # table_path:存储解析表格数据后存储的json文件路径 # start_page_number / end_page_number:指定需要解析的pdf页码范围 # 如果未提供,则从title_path中查找start_title和end_title对应的页码 # 用于存储解析后的表格数据 tables = [] # 如果start_page_number或者end_page_number有一个为None if (start_page_number == None) or (end_page_number == None): # 读取title_path df = pd.read_json(title_path) # 筛选df['text']列等于start_title的行,找到这些行中page_number列的最大值 start_page_number = df[df['text'] == start_title].page_number.max() # 筛选df['text']列等于end_title的行,找到这些行中page_number列的最大值 end_page_number = df[df['text'] == end_title].page_number.max() # concat_table为内部嵌入函数 # 将解析的每个表格添加到tables列表中 # 根据表格特征判断是新表格还是追加到现有表格中 # tables列表存储每个表格的页码、表头长度、列数、表格内容、置信度 def concat_table(tables, table): """尝试将表添加到结果列中,有两种情况,直接添加一个新表;拼接最后一个表 @tables @table """ # 对表格中每个单元格内容进行处理,去除其中空格 # first存储第一行处理后的单元格内容 # tail存储最后一行处理后的单元格内容 first = [''.join([i for i in cell.split() if i]) if cell else cell for cell in table[0]] tail = [''.join([i for i in cell.split() if i]) if cell else cell for cell in table[-1]] # 如果表格行数大于1,用second存储第二行处理后的单元格内容 if len(table) > 1: second = [''.join([i for i in cell.split() if i]) if cell else cell for cell in table[1]] ### 判断表格添加方式 # 如果HEADERS与first中的交集元素数量 > 2 # 证明找到了大量表头元素,对应为一张新表 # 将当前表格table添加为一个新表格到tables列表中,并设置confidence=1 # 如果当前表格的第一行与上一个表格的最后一行的列数相同且上一个表格页码中包含当前页码i - 1 # 将当前表格table的内容拼接到上一个表格的内容中 # 如果以上两种情况均不符合,则将当前表格table添加为一个新表格到tables列表中,并设置confidence=0 if len(HEADERS & set(first)) > 2: # pprint("找到大量表头元素,判断为独立表头,生成新表!") tables.append({"page_numbers": [i], "title_len": len(first), "col_len": len(table[-1]), "table": table, "confidence": 1}) elif ((i-1) in tables[-1]['page_numbers']) and (len(first) == tables[-1]['col_len']): # pprint("有空列,不是单独表,直接合并") tables[-1]['page_numbers'].append(i) tables[-1]['table'].extend(table) else: tables.append({"page_numbers": [i], "title_len": len(first), "col_len": len(table[-1]), "table": table, "confidence": 0}) return tables # 打开pdf_path # 遍历start_page_number -> end_page_number的页面 # 提取出所有表格内容到tables中 with pdfplumber.open(pdf_path) as pdf: for i in range(start_page_number, end_page_number): for table in pdf.pages[i].extract_tables(): tables = concat_table(tables, table) # 将tables中表格信息保存为json with open(table_path, 'w', encoding='utf-8') as fp: json.dump(tables, fp, indent=4, ensure_ascii=False) return tables class PdfExtractAttr(object): def __init__(self, file_path: str): """PDF文件解析 @file_path """ super(PdfExtractAttr, self).__init__() self.file_path = file_path self.details = [] self.tables = [] # parse_outline用于解析pdf文件大纲,并将解析结果存储为json文件并打印 def parse_outline(self, out_path: str): """PDF大纲解析 """ if os.path.exists(out_path): results = load_json(out_path) else: results = [] with open(self.file_path, "rb") as fp: try: # parser用于解析pdf文件流 parser = PDFParser(fp) # document用于获取解析后到文档对象 document = PDFDocument(parser) # 将解析后到pdf文档传入tool类中的RefPageNumberResolver # 其用于解析文档中对象所在页面 ref_pagenum_resolver = RefPageNumberResolver(document) # 调用document.get_outlines()获取pdf大纲信息 outlines = document.get_outlines() # 遍历outlines列表 # 每个元素包含: # 大纲的级别level # 大纲标题title # 大纲目标dest # 动作a # 节se # 根据不同情况(是否存在目标dest、动作a、节se)来获取相应页码page_num # 将解析后的信息以字典形式添加到results列表中 for (level, title, dest, a, se) in outlines: if dest: page_num = ref_pagenum_resolver.resolve(dest) elif a: page_num = ref_pagenum_resolver.resolve(a) elif se: page_num = ref_pagenum_resolver.resolve(se) else: page_num = None results.append({'level': level, 'title': title, 'page_number': page_num}) # pdf文件中没有大纲信息 except PDFNoOutlines: print("No outlines found.") # pdf文件损坏或不是有效的pdf文件 except PDFSyntaxError: print("Corrupted PDF or non-PDF file.") finally: parser.close() # 将results存储为outlines.json文件 with open(out_path, 'w', encoding='utf-8') as op: json.dump(results, op, indent=4, ensure_ascii=False) # 输出results print(results) return results # parse_text方法用于解析PDF文本 # 具体将每一页中的文本元素提取出来 # 并根据其在页面中的位置和布局进行分析和归类 def parse_text(self, out_path): """文本解析 """ if os.path.exists(out_path): self.details = load_json(out_path) else: # 循环遍历每一页的布局 for page_number, page_layout in enumerate(extract_pages(self.file_path)): # 遍历当前页面中的元素 for element in page_layout: # 如果当前元素属于LTTextBoxHorizontal类型 # 计算文本框左侧、右侧、上侧、下侧距离页边界的距离以及文本框的宽度 if isinstance(element, LTTextBoxHorizontal): # 距离左侧 left = element.x0 # 距离右侧 right = (page_layout.width - element.x1) # 距离上侧 top = (page_layout.height - element.y1) # 距离下侧 button = element.y0 # 文本宽度 width = element.width # 确认文本框的对齐方式 if (left > right) and (abs(left - right) > 100): alignment = 'right' elif (left > 100) and (abs(left - right) < 50) and ((abs(left - right) / width) < 0.5): alignment = 'center' else: alignment = 'left' # 将element的解析结果存储到列表中 self.details.append({ 'page_number': page_number, 'index': element.index, 'x0': element.bbox[0], 'y0': element.bbox[1], 'x1': element.bbox[2], 'y1': element.bbox[3], 'alignment': alignment, 'lines': len(element._objs), 'text': element.get_text().strip(), 'is_table_name': element.get_text().strip().endswith('表') }) with open(out_path, 'w', encoding='utf-8') as fp: json.dump(self.details, fp, indent=4, ensure_ascii=False) # 转为pandas的DataFrame格式,存储到self.detail_df中 self.detail_df = pd.DataFrame(self.details) return self.details # 与之前的函数一致,此函数专注于解析某一页的表格数据 def concat_table(self, table: list, page_number: int, table_name: str = None, new: bool = False) -> None: """尝试将表添加到结果列中,有两种情况,直接添加一个新表;拼接最后一个表 @table """ first = [''.join([i for i in cell.split() if i]) if cell else cell for cell in table[0]] # 如果指定当前table为新表(即new=True),直接添加为新表 if new: self.tables.append({"page_numbers": [page_number], "title_len": len(first), "col_len": len(table[-1]), "table": table, "confidence": 1, "table_name": table_name if table_name else ""}) return tail = [''.join([i for i in cell.split() if i]) if cell else cell for cell in table[-1]] # 表格行数 > 1 if len(table) > 1: second = [''.join([i for i in cell.split() if i]) if cell else cell for cell in table[1]] else: second = None # pprint(first) if len(HEADERS & set(first)) > 2: # pprint("找到大量表头元素,判断为独立表头,生成新表!") self.tables.append({"page_numbers": [page_number], "title_len": len(first), "col_len": len(table[-1]), "table": table, "confidence": 1, "table_name": table_name if table_name else ""}) elif second and (len(HEADERS & set(second)) > 2): # pprint("找到大量表头元素,判断为独立表头,生成新表!") if not table_name: first = [i for i in first if i] if len(first) == 1: table_name = "".join(first) self.tables.append({"page_numbers": [page_number], "title_len": len(second), "col_len": len(table[-1]), "table": table[1:], "confidence": 1, "table_name": table_name if table_name else ""}) # TODO 目前会因为开头一页具备多张表格而导致此时self.tables=[],因此使用self.tables[-1]出现list index out of range的情况 # TODO 处理self.tables[-1]出现list index out of range的情况 # 添加判断条件(当self.tables中没有表格项时自动添加为新表) elif len(self.tables) == 0: # pprint("判断为起始表格,生成新表!") self.tables.append({"page_numbers": [page_number], "title_len": len(first), "col_len": len(table[-1]), "table": table, "confidence": 1, "table_name": table_name if table_name else ""}) elif ((page_number-1) in self.tables[-1]['page_numbers']) and (len(first) == self.tables[-1]['col_len']): # pprint("有空列,不是单独表,直接合并") self.tables[-1]['page_numbers'].append(page_number) self.tables[-1]['table'].extend(table) else: self.tables.append({"page_numbers": [page_number], "title_len": len(first), "col_len": len(table[-1]), "table": table, "confidence": 0, "table_name": table_name if table_name else ""}) # 表格解析的主函数 ### 注意!!self.detail_df存储所有LTTextBoxHorizontal类(文本框)的元素细节 def parse_table(self, out_path: str): """表格解析 """ with pdfplumber.open(self.file_path) as pdf: # 遍历pdf的每一页 for page_number, page_layout in enumerate(pdf.pages): # 查询是否存在表格 tables = page_layout.find_tables() # 检测到该页面存在一个表格,对其进行合并判断 if len(tables) == 1: table = tables[0] # 获取当前表格的边检框坐标 x0, y0, x1, y1 = table.bbox # 查询detail_df中是否有符合条件的表格标题 try: table_title_df = self.detail_df.query(f''' page_number == {page_number} and is_table_name == True and alignment == "center" ''') except: continue # 如果找不到符合条件的表格标题 # 则调用concat_table() # 将表格内容连接起来 # 如果找到了符合条件的表格标题 # 则先获取表格标题 # 将表格标题和内容一起传递给concat_table() if table_title_df.empty: print(f'processing page_number: {page_number}') self.concat_table(table.extract(), page_number=page_number) else: table_title_name = table_title_df.iloc[0]['text'] print(f'processing page_number with table_name: {table_title_name}') self.concat_table(table.extract(), page_number=page_number, table_name=table_title_name) table = tables[0] #self.concat_table(table.extract(), table_title_name) # 检测到存在多个表格,对第一个表格进行合并判断之后的表格一定不相干 ### 暂时未对一页具有多个表格的情况进行处理 elif len(tables) > 1: print(f'current page {page_number} has multiple tables') # import pdb; pdb.set_trace() #TODO 暂未对一页多张表格的table_name匹配算法进行实现 # 对第一个表格进行合并判断 first_table = tables[0] self.concat_table(first_table.extract(), page_number=page_number) # 剩余表格指定new = True for table_index in range(1, len(tables)): self.concat_table(tables[table_index].extract(), page_number=page_number, new=True) with open(out_path, 'w', encoding='utf-8') as fp: json.dump(self.tables, fp, indent=4, ensure_ascii=False) return self.tables if __name__ == '__main__': pdf_path = './南方电网数字研究院有限公司.pdf' title_path = './南方电网数字研究院有限公司.json' image_dir = './test_images' os.makedirs(image_dir, exist_ok=True) main_parse(pdf_path=pdf_path, title_path=title_path, image_dir=image_dir) # agent = PdfExtractAttr(file_path=pdf_path) # agent.parse_outline() # agent.parse_text() # agent.parse_table()