get_info.py 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251
  1. # -*- coding: utf-8 -*-
  2. # @Author: privacy
  3. # @Date: 2024-06-11 13:43:14
  4. # @Last Modified by: privacy
  5. # @Last Modified time: 2024-06-11 14:10:56
  6. # import os
  7. # from PIL import Image
  8. # from PyPDF2 import PdfReader
  9. # # 读取PDF文件
  10. # with open(pdf_path, 'rb') as file:
  11. # reader = PdfReader(file)
  12. # num_pages = len(reader.pages)
  13. # # 遍历PDF的每一页
  14. # for page_num in range(num_pages):
  15. # page = reader.pages[page_num]
  16. # # 提取页面中的图像
  17. # if '/XObject' in page['/Resources']:
  18. # xobjects = page['/Resources']['/XObject'].get_object()
  19. # for obj in xobjects:
  20. # if xobjects[obj]['/Subtype'] == '/Image':
  21. # size = (xobjects[obj]['/Width'], xobjects[obj]['/Height'])
  22. # data = xobjects[obj].get_data()
  23. # if xobjects[obj]['/ColorSpace'] == '/DeviceRGB':
  24. # mode = "RGB"
  25. # else:
  26. # mode = "P"
  27. # img = Image.frombytes(mode, size, data)
  28. # img_path = os.path.join(output_dir, f'image_{page_num}_{obj}.png')
  29. # img.save(img_path)
  30. # print(f'Image saved: {img_path}')
  31. #######################################################################
  32. # import os
  33. # import re
  34. # import fitz
  35. # def pdf2pic(path, save_path):
  36. # checkXO = r"/Type(?= */XObject)"
  37. # checkIM = r"/Subtype(?= */Image)"
  38. # pdf = fitz.open(path)
  39. # lenXREF = pdf._getXrefLength()
  40. # imgcount = 0
  41. # for i in range(1, lenXREF):
  42. # text = pdf._getXrefString(i)
  43. # isXObject = re.search(checkXO, text)
  44. # isImage = re.search(checkIM, text)
  45. # if not isXObject or not isImage:
  46. # continue
  47. # imgcount += 1
  48. # pix = fitz.Pixmap(pdf, i)
  49. # new_name = f"img_{imgcount}.png"
  50. # if pix.n < 5:
  51. # pix.writePNG(os.path.join(pic_path, new_name))
  52. # else:
  53. # pix0 = fitz.Pixmap(fitz.csRGB, pix)
  54. # pix0.writePNG(os.path.join(pic_path, new_name))
  55. # pix0 = None
  56. # pix = None
  57. # if __name__ == '__main__':
  58. # pdf2pic(pdf_path, image_dir)
  59. #######################################################################
  60. import os
  61. import re
  62. import json
  63. from io import BytesIO
  64. from pprint import pprint
  65. import numpy as np
  66. import cv2
  67. from pdfminer.high_level import extract_pages
  68. from pdfminer.layout import LTRect, LTTextBoxHorizontal, LTLine, LTFigure, LTCurve, LTImage, LTChar
  69. from pdfminer.pdfcolor import LITERAL_DEVICE_CMYK
  70. from pdfminer.pdfcolor import LITERAL_DEVICE_GRAY
  71. from pdfminer.pdfcolor import LITERAL_DEVICE_RGB
  72. from pdfminer.pdftypes import (
  73. LITERALS_DCT_DECODE,
  74. LITERALS_JBIG2_DECODE,
  75. LITERALS_JPX_DECODE,
  76. LITERALS_FLATE_DECODE,
  77. )
  78. import pandas as pd
  79. import pdfplumber
  80. def is_title(line: str) -> bool:
  81. title_word = re.findall('^[(\(][一二三四五六七八九十]+[\))]|^\d\.|^1\d\.|^2\d\.|^[第][一二三四五六七八九十\d]+[章节条]|[一二三四五六七八九十]+[、要是]', line.strip())
  82. if title_word:
  83. return True
  84. title_word = re.findall('^附录|^参考文献|^附表', line.strip())
  85. if title_word:
  86. return True
  87. return False
  88. def export_image(image: LTImage, path: str) -> str:
  89. """Save an LTImage to disk"""
  90. (width, height) = image.srcsize
  91. filters = image.stream.get_filters()
  92. if len(filters) == 1 and filters[0][0] in LITERALS_DCT_DECODE:
  93. name = _save_jpeg(image, path)
  94. elif len(filters) == 1 and filters[0][0] in LITERALS_JPX_DECODE:
  95. name = _save_jpeg2000(image, path)
  96. elif image.bits == 1:
  97. name = _save_bmp(image, width, height, (width + 7) // 8, image.bits)
  98. elif image.bits == 8 and LITERAL_DEVICE_RGB in image.colorspace:
  99. name = _save_bmp(image, width, height, width * 3, image.bits * 3)
  100. elif image.bits == 8 and LITERAL_DEVICE_GRAY in image.colorspace:
  101. name = _save_bmp(image, width, height, width, image.bits)
  102. elif len(filters) == 1 and filters[0][0] in LITERALS_FLATE_DECODE:
  103. name = _save_bytes(image)
  104. else:
  105. name = _save_raw(image)
  106. return name
  107. def _save_jpeg(image: LTImage, path: str) -> str:
  108. """Save a JPEG encoded image"""
  109. raw_data = image.stream.get_rawdata()
  110. assert raw_data is not None
  111. path = path + ".jpg"
  112. with open(path, "wb") as fp:
  113. if LITERAL_DEVICE_CMYK in image.colorspace:
  114. try:
  115. from PIL import Image, ImageChops # type: ignore[import]
  116. except ImportError:
  117. raise ImportError(PIL_ERROR_MESSAGE)
  118. ifp = BytesIO(raw_data)
  119. i = Image.open(ifp)
  120. i = ImageChops.invert(i)
  121. i = i.convert("RGB")
  122. i.save(fp, "JPEG")
  123. else:
  124. fp.write(raw_data)
  125. return path
  126. def _save_jpeg2000(image: LTImage, path: str) -> str:
  127. """Save a JPEG 2000 encoded image"""
  128. raw_data = image.stream.get_rawdata()
  129. assert raw_data is not None
  130. path = path + ".png"
  131. try:
  132. from PIL import Image # type: ignore[import]
  133. except ImportError:
  134. raise ImportError(PIL_ERROR_MESSAGE)
  135. # if we just write the raw data, most image programs
  136. # that I have tried cannot open the file. However,
  137. # open and saving with PIL produces a file that
  138. # seems to be easily opened by other programs
  139. ifp = BytesIO(raw_data)
  140. i = Image.open(ifp)
  141. opencv_image = cv2.cvtColor(np.array(i), cv2.COLOR_RGB2BGR)
  142. cv2.imwrite(path, opencv_image)
  143. return path
  144. def main_parse(pdf_path: str, title_path: str, image_dir: str) -> None:
  145. texts = []
  146. images = []
  147. # 读取PDF文件并提取页面
  148. for page_number, page_layout in enumerate(extract_pages(pdf_path)):
  149. title_index = 0
  150. image_index = 0
  151. for element in page_layout:
  152. if isinstance(element, LTLine):
  153. pass
  154. elif isinstance(element, LTRect):
  155. pass
  156. elif isinstance(element, LTTextBoxHorizontal) and len(element._objs) == 1:
  157. text = element.get_text().strip()
  158. # # 假设标题通常是一行且字体较大
  159. if text and (is_title(text) or element.height > 15):
  160. texts.append({'index': title_index, 'pageno': page_number, 'bbox': element.bbox, 'text': text})
  161. title_index += 1
  162. elif isinstance(element, LTFigure):
  163. for e_obj in element._objs:
  164. if isinstance(e_obj, LTImage):
  165. # 提取图片数据
  166. image_file = os.path.join(image_dir, f'image_page_{page_number}_{image_index}')
  167. image_file = export_image(e_obj, image_file)
  168. images.append(image_file)
  169. pprint(f'Image saved: {image_file}')
  170. image_index += 1
  171. with open(title_path, 'w', encoding='utf-8') as fp:
  172. json.dump(texts, fp, indent=4, ensure_ascii=False)
  173. def table_parse(pdf_path: str, title_path: str, start_title: str = '六、已标价工程量清单', end_title: str = '七、施工组织设计', table_path: str = 'table.json') -> list:
  174. tables = []
  175. df = pd.read_json(title_path)
  176. start_page_number = df[df['text'] == start_title].pageno.max()
  177. end_page_number = df[df['text'] == end_title].pageno.max()
  178. pdf = pdfplumber.open(pdf_path)
  179. for i in range(start_page_number, end_page_number):
  180. table = pdf.pages[i].extract_table()
  181. if table:
  182. first = [''.join([i for i in cell.split() if i]) if cell else cell for cell in table[0]]
  183. # pprint(first)
  184. if len(set({'序号', '项目编码', '项目名称', '项目特征', '单位', '工程量', '全费用综合单价', '合价', '备注', '主材名称', '规格型号', '不低于下列同档次品牌', '投标选用品牌及规格型号', '名称', '事项', '数量', '含税单价(元)', '含税合价(元)'}) & set(first)) > 2:
  185. # pprint("找到大量表头元素,判断为独立表头,生成新表!")
  186. tables.append({"pagenos": [i], "title_len": len(first), "col_len": len(table[-1]), "table": table, "confidence": 1})
  187. elif ((i-1) in tables[-1]['pagenos']) and (len(first) == tables[-1]['col_len']):
  188. # pprint("有空列,不是单独表,直接合并")
  189. tables[-1]['pagenos'].append(i)
  190. tables[-1]['table'].extend(table)
  191. else:
  192. tables.append({"pagenos": [i], "title_len": len(first), "col_len": len(table[-1]), "table": table, "confidence": 0})
  193. with open(table_path, 'w', encoding='utf-8') as fp:
  194. json.dump(tables, fp, indent=4, ensure_ascii=False)
  195. return tables
  196. if __name__ == '__main__':
  197. pdf_path = './投标文件-修改版9-5-1-1.pdf'
  198. title_path = './投标文件-修改版9-5-1-1.json'
  199. image_dir = './extracted_images'
  200. os.makedirs(image_dir, exist_ok=True)
  201. main_parse(pdf_path=pdf_path, title_path=title_path, image_dir=image_dir)
  202. tables = table_parse(pdf_path=pdf_path, title_path=title_path, start_title='六、已标价工程量清单', end_title = '七、施工组织设计')