extract.py 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065
  1. ### 解析所有pdf文件并提取信息进行测试的框架
  2. ### PdfExtractAttr作为提取pdf信息的基类
  3. # 子类在其基础上实现匹配功能
  4. # 标准包导入
  5. import os
  6. import re
  7. import json
  8. import re
  9. import shutil
  10. import pandas as pd
  11. import pdb
  12. import base64
  13. from io import BytesIO
  14. from pprint import pprint
  15. # 第三方包导入
  16. import numpy as np
  17. import pandas as pd
  18. import cv2
  19. import torch
  20. import glob
  21. import logging
  22. import requests
  23. import time
  24. import datetime
  25. from tqdm import tqdm
  26. from tools import RefPageNumberResolver
  27. from get_info import PdfExtractAttr
  28. from get_info import is_title, export_image, _save_jpeg, _save_jpeg2000, _save_bmp, main_parse, table_parse, load_json
  29. from PIL import Image
  30. import cn_clip.clip as clip
  31. from cn_clip.clip import load_from_name, available_models
  32. from pdfminer.image import ImageWriter
  33. # global envs
  34. device = "cuda" if torch.cuda.is_available() else "cpu"
  35. clip_version = "ViT-B-16"
  36. model, preprocess = load_from_name(clip_version)
  37. model.eval()
  38. log_path = "/home/stf/miner_pdf/info.log"
  39. # log
  40. def create_logger(log_path):
  41. """
  42. 将日志输出到日志文件和控制台
  43. """
  44. logger = logging.getLogger()
  45. logger.setLevel(logging.INFO)
  46. formatter = logging.Formatter(
  47. '%(asctime)s - %(levelname)s - %(message)s')
  48. # 创建一个handler,用于写入日志文件
  49. file_handler = logging.FileHandler(
  50. filename=log_path, mode='w')
  51. file_handler.setFormatter(formatter)
  52. file_handler.setLevel(logging.INFO)
  53. logger.addHandler(file_handler)
  54. # 创建一个handler,用于将日志输出到控制台
  55. console = logging.StreamHandler()
  56. console.setLevel(logging.DEBUG)
  57. console.setFormatter(formatter)
  58. logger.addHandler(console)
  59. return logger
  60. logger = create_logger(log_path=log_path)
  61. # ocr外部接口
  62. class OcrAgent():
  63. def __init__(self, url):
  64. self.url = url
  65. self.datetime_re = r'\d{4}年\d{1,2}月\d{1,2}日至(?:\d{4}年\d{1,2}月\d{1,2}日|长期)'
  66. # 不同类型证书资质正则
  67. self.re_dict = {
  68. "business_license" : r'营业执照',
  69. "deposit": r'^(?:开户许可证|[\u4e00-\u9fff]+存款账户[\u4e00-\u9fff]+)$',
  70. "production_license": r'\b[\u4e00-\u9fff]*许可证\b',
  71. "qualtifications" : r'\b[\u4e00-\u9fff]*证书',
  72. "proof": r'\b[\u4e00-\u9fff]*证明',
  73. }
  74. # 字迹阈值
  75. self.sign_threshold = 0.05
  76. def get_content(self, image_path):
  77. try:
  78. with open(image_path, 'rb') as image_file:
  79. files = {"file": ("image.jpg", image_file, "image/jpeg")}
  80. # files = {"file": ("image.png", image_file, "image/png")}
  81. response = requests.post(url, files=files)
  82. return response.json()
  83. except:
  84. raise ValueError(f"传入图像{image_path}已损坏")
  85. def remove_red_seal(self, image_path):
  86. # 读取图像
  87. input_img = cv2.imread(image_path)
  88. # 分离图片的通道
  89. blue_c, green_c, red_c = cv2.split(input_img)
  90. #利用大津法自动选择阈值
  91. thresh, ret = cv2.threshold(red_c, 0, 255, cv2.THRESH_OTSU)
  92. #对阈值进行调整
  93. filter_condition = int(thresh * 1.0)
  94. #移除红色的印章
  95. _, red_thresh = cv2.threshold(red_c, filter_condition, 255, cv2.THRESH_BINARY)
  96. # 把图片转回3通道
  97. result_img = np.expand_dims(red_thresh, axis=2)
  98. result_img = np.concatenate((result_img, result_img, result_img), axis=-1)
  99. return result_img
  100. def judge(self, image_path: str, firm_name: str):
  101. '''使用正则判断是否属于营业执照或资质证书类型'''
  102. # image_prefix = image_path.split('/')[-1][:-4]
  103. image_prefix = image_path.split('/')[-1]
  104. logger.info(f'processing img: {image_prefix}')
  105. # page_number = image_prefix.split('_')[-2]
  106. response_item = {
  107. "qualtified": None, # 是否为证书
  108. "matched": None, # 是否出现匹配的公司名称
  109. "license_name": None, # 证书名
  110. # "page_number": page_number, # 证书所在页
  111. "start_datetime": None, # 有效起始时间
  112. "end_datetime": None # 有效终止时间
  113. }
  114. content = self.get_content(image_path=image_path)
  115. image_info = content["rawjson"]["ret"]
  116. # 必须包含公司名称信息
  117. if not self.search(image_info=image_info, key=firm_name):
  118. return None
  119. else:
  120. response_item['matched'] = True
  121. # 是否匹配营业执照或资质证书
  122. for key, format in self.re_dict.items():
  123. if key == 'business_license':
  124. match_name = self.re_match(image_info=image_info, format=format)
  125. else:
  126. match_name = self.re_search(image_info=image_info, format=format)
  127. if match_name and key == 'business_license':
  128. response_item["qualtified"] = True
  129. response_item["license_name"] = match_name
  130. response_item = self.find_license_datetime(image_info=image_info, response_item=response_item)
  131. return response_item
  132. elif match_name:
  133. response_item["qualtified"] = True
  134. response_item["license_name"] = match_name
  135. response_item = self.find_certificate_datetime(image_info=image_info, response_item=response_item)
  136. return response_item
  137. return response_item
  138. # TODO 资质证书有效期定位
  139. def find_certificate_datetime(self, image_info, response_item):
  140. # keyword
  141. start_keywords = ['颁发日期', '发证日期', '生效日期']
  142. end_keywords = ['终止日期']
  143. priority_keywords = ['有效期', '使用期限', '有效日期']
  144. keywords_list = ['有效期', '使用期限', '有效日期', '终止日期', '颁发日期', '发证日期', '生效日期']
  145. # re format
  146. format = r'(?:[自至])?\d{4}年\d{1,2}月\d{1,2}日(?:至)?(?:\d{4}年\d{1,2}月\d{1,2}日)?'
  147. special_format = r'\d{4}-\d{1,2}-\d{1,2}'
  148. # 判断是否存在日期关键字
  149. flag = False
  150. keyword_dict = {}
  151. for info in image_info:
  152. word = info['word']
  153. left = info['rect']['left']
  154. top = info['rect']['top']
  155. width = info['rect']['width']
  156. height = info['rect']['height']
  157. for keyword in keywords_list:
  158. # 该证书存在日期关键字
  159. if keyword in word:
  160. flag = True
  161. charset_list = info['charset']
  162. for char_dc in charset_list:
  163. if char_dc['word'] == keyword[-1]:
  164. right = char_dc['rect']['left'] + char_dc['rect']['width']
  165. keyword_dict[keyword] = {
  166. "left": left,
  167. "top": top,
  168. "right": right
  169. }
  170. if flag:
  171. for info in image_info:
  172. word = info['word']
  173. if '年' in word or re.search(r'\d', word):
  174. left = info['rect']['left']
  175. top = info['rect']['top']
  176. width = info['rect']['width']
  177. if '年' in word:
  178. find_list = re.findall(pattern=format, string=word)
  179. else:
  180. find_list = re.findall(pattern=special_format, string=word)
  181. logger.info(f'word {word} has find_list{find_list}')
  182. # if self.check:
  183. # pdb.set_trace()
  184. if len(find_list) == 1:
  185. find_string = find_list[0]
  186. if '至' in find_string:
  187. start_prefix = find_string.split('至')[0].replace('自', '')
  188. end_prefix = find_string.split('至')[-1]
  189. if '年' in start_prefix:
  190. response_item['start_datetime'] = start_prefix
  191. if end_prefix != '':
  192. response_item['end_datetime'] = end_prefix
  193. return response_item
  194. # 不存在{至}的情况下通过位置和已有期限关键字来分配日期
  195. else:
  196. for k, k_info in keyword_dict.items():
  197. k_left = k_info['left']
  198. k_right = k_info['right']
  199. k_top = k_info['top']
  200. # 捕获关键字
  201. if left == k_left:
  202. if (k in priority_keywords) or (k in end_keywords) and response_item['end_datetime'] is None:
  203. response_item['end_datetime'] = find_string
  204. elif k in start_keywords and response_item['start_datetime'] is None:
  205. response_item['start_datetime'] = find_string
  206. break
  207. elif left >= k_right and top >= k_top:
  208. if (k in priority_keywords) or (k in end_keywords) and response_item['end_datetime'] is None:
  209. response_item['end_datetime'] = find_string
  210. elif k in start_keywords and response_item['start_datetime'] is None:
  211. response_item['start_datetime'] = find_string
  212. elif len(find_list) == 2:
  213. start_prefix = find_list[0].replace('自', '')
  214. end_prefix = find_list[-1].replace('至', '')
  215. if response_item['start_datetime'] is None:
  216. response_item['start_datetime'] = start_prefix
  217. if response_item['end_datetime'] is None:
  218. response_item['end_datetime'] = end_prefix
  219. else:
  220. logger.info(f'wrong word: {word} ...')
  221. else:
  222. continue
  223. return response_item
  224. # 找到营业执照中id与date信息
  225. def find_license_datetime(self, image_info, response_item):
  226. for info in image_info:
  227. word = info['word']
  228. # id
  229. if (word.startswith('证照编号:') and len(word) == 25) or (word.isdigit() and len(word) == 20):
  230. response_item['id'] = word if word.isdigit() else word[5:]
  231. elif bool(re.match(self.datetime_re, word)):
  232. split = word.split('至')
  233. start_datetime = split[0]
  234. end_datetime = split[-1]
  235. response_item['start_datetime'] = start_datetime
  236. response_item['end_datetime'] = end_datetime
  237. elif word == '长期':
  238. response_item['start_datetime'] = response_item['end_datetime'] = '长期'
  239. return response_item
  240. # 在image_info中搜寻word中包含key的内容
  241. def search(self, image_info, key):
  242. for info in image_info:
  243. word = info['word']
  244. if key in word:
  245. return True
  246. return False
  247. # 在image_info中使用re.search搜寻满足{format}正则的信息
  248. def re_search(self, image_info, format):
  249. for info in image_info:
  250. word = info['word']
  251. match = re.search(format, word)
  252. if match:
  253. return match.group(0)
  254. return False
  255. # 在image_info中使用re.match搜寻满足{format}正则的信息
  256. def re_match(self, image_info, format):
  257. for info in image_info:
  258. word = info['word']
  259. match = re.match(format, word)
  260. if match:
  261. return word
  262. return False
  263. # 用于识别固定位置是否有公司法人签名
  264. def signature_recognition(self, image_path: str):
  265. keywords = ['投标函', '(法定代表人CA电子印章)','(法定代表人CA电子印章或签字)', '(签字)', '法定代表人或其委托代理人:', '法定代表人:']
  266. key_pos = {}
  267. image_prefix = image_path.split('/')[0]
  268. image_name = image_path.split('/')[-1][:-4]
  269. removed_image_name = image_name + '_roi' + image_path.split('/')[-1][-4:]
  270. ink_image_name = image_name + '_ink' + image_path.split('/')[-1][-4:]
  271. removed_image_path = os.path.join(image_prefix, removed_image_name)
  272. ink_image_path = os.path.join(image_prefix, ink_image_name)
  273. if not os.path.exists(removed_image_path):
  274. removed_seal_img = self.remove_red_seal(image_path=image_path)
  275. cv2.imwrite(removed_image_name, removed_seal_img)
  276. content = self.get_content(image_path=removed_image_path)
  277. image_info = content["rawjson"]["ret"]
  278. for info in image_info:
  279. word = info['word']
  280. left = info['rect']['left']
  281. top = info['rect']['top']
  282. width = info['rect']['width']
  283. height = info['rect']['height']
  284. right = left + width
  285. bottom = top + height
  286. for keyword in keywords:
  287. if keyword in word:
  288. key_pos[keyword] = {
  289. "word": word,
  290. "left": left,
  291. "right": right,
  292. "top": top,
  293. "bottom": bottom
  294. }
  295. break
  296. # 如果不存在"投标函"、"法定代表人"等关键字,则返回False
  297. if len(key_pos) == 0:
  298. return False
  299. # 定位到法定代表人所在位置
  300. if ((key_pos.get('法定代表人:') is not None) or (key_pos.get('法定代表人或其委托代理人:') is not None)) and \
  301. ((key_pos.get('(法定代表人CA电子印章)') is not None) or (key_pos.get('(法定代表人CA电子印章或签字)') is not None) or (key_pos.get('(签字)') is not None)):
  302. if key_pos.get('法定代表人或其委托代理人:') is not None:
  303. l_info = key_pos['法定代表人或其委托代理人:']
  304. l_cnt = 13
  305. l_string = '法定代表人或其委托代理人:'
  306. else:
  307. l_info = key_pos['法定代表人:']
  308. l_cnt = 6
  309. l_string = '法定代表人:'
  310. if key_pos.get('(法定代表人CA电子印章)') is not None:
  311. r_info = key_pos['(法定代表人CA电子印章)']
  312. r_string = '(法定代表人CA电子印章)'
  313. elif key_pos.get('(法定代表人CA电子印章或签字)') is not None:
  314. r_info = key_pos['(法定代表人CA电子印章或签字)']
  315. r_string = '(法定代表人CA电子印章或签字)'
  316. else:
  317. r_info = key_pos['(签字)']
  318. r_string = '(签字)'
  319. # 此时签名应在两者之间
  320. l = l_info['right']
  321. l_word = l_info['word']
  322. r = r_info['left']
  323. r_word = r_info['word']
  324. t = max(l_info['top'], r_info['top'])
  325. b = min(l_info['bottom'], r_info['bottom']) - 5
  326. if l_word[-l_cnt:] != l_string or r_word != r_string:
  327. return True
  328. else:
  329. black_ratio = self.ink_recognition(
  330. input_img=removed_seal_img,
  331. out_path=ink_image_path,
  332. meta={
  333. "left": l,
  334. "right": r,
  335. "top": t,
  336. "bottom": b
  337. }
  338. )
  339. if black_ratio >= self.sign_threshold:
  340. return True
  341. return False
  342. elif (key_pos.get('(法定代表人CA电子印章)') is not None) or (key_pos.get('(法定代表人CA电子印章或签字)') is not None) or (key_pos.get('(签字)') is not None):
  343. # 此时签名应已包含
  344. if key_pos.get('(法定代表人CA电子印章)') is not None:
  345. key = key_pos['(法定代表人CA电子印章)']
  346. elif key_pos.get('(法定代表人CA电子印章或签字)') is not None:
  347. key = key_pos['(法定代表人CA电子印章或签字)']
  348. elif key_pos.get('(签字)') is not None:
  349. key = key_pos['(签字)']
  350. key_word = key['word']
  351. key_word = key_word.replace('(法定代表人CA电子印章)','').replace('(法定代表人CA电子印章或签字)', '').replace('(签字)','').replace('法定代表人或其委托代理人:', '').replace('法定代表人:', '')
  352. if key_word != '':
  353. return True
  354. return False
  355. elif key_pos.get('法定代表人:') is not None:
  356. # 此时签名在右边或已包含
  357. word = key_pos['法定代表人:']['word']
  358. l = key_pos['法定代表人:']['left']
  359. r = l + 100
  360. t = key_pos['法定代表人:']['top']
  361. b = key_pos['法定代表人:']['bottom'] - 5
  362. if word[-6:] != '法定代表人:':
  363. return True
  364. else:
  365. black_ratio = self.ink_recognition(
  366. input_img=removed_seal_img,
  367. out_path=ink_image_path,
  368. meta={
  369. "left": l,
  370. "right": r,
  371. "top": t,
  372. "bottom": b
  373. }
  374. )
  375. if black_ratio >= self.sign_threshold:
  376. return True
  377. return False
  378. elif key_pos.get('法定代表人或其委托代理人:') is not None:
  379. # 此时签名在右边或已包含
  380. word = key_pos['法定代表人或其委托代理人:']['word']
  381. l = key_pos['法定代表人或其委托代理人:']['left']
  382. r = l + 100
  383. t = key_pos['法定代表人或其委托代理人:']['top']
  384. b = key_pos['法定代表人或其委托代理人:']['bottom'] - 5
  385. if word[-13:] != '法定代表人或其委托代理人:':
  386. return True
  387. else:
  388. black_ratio = self.ink_recognition(
  389. input_img=removed_seal_img,
  390. meta={
  391. "left": l,
  392. "right": r,
  393. "top": t,
  394. "bottom": b
  395. }
  396. )
  397. if black_ratio >= self.sign_threshold:
  398. return True
  399. return False
  400. else:
  401. return False
  402. # 用于判断固定位置的长方形框内是否存在签名字迹
  403. def ink_recognition(self, input_img, out_path, meta: dict):
  404. left = meta["left"]
  405. right = meta["right"]
  406. top = meta["top"]
  407. bottom = meta["bottom"]
  408. crop_img = input_img[top:bottom, left:right, :]
  409. cv2.imwrite(out_path, crop_img)
  410. gray_img = cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY)
  411. thresh, ret = cv2.threshold(gray_img, 0, 255, cv2.THRESH_OTSU)
  412. filter_condition = int(thresh * 0.90)
  413. _, black_thresh = cv2.threshold(gray_img, filter_condition, 255, cv2.THRESH_BINARY_INV)
  414. total_pixels = black_thresh.size
  415. black_pixels = np.count_nonzero(black_thresh)
  416. black_ratio = black_pixels / total_pixels
  417. return black_ratio
  418. # seal ocr外部接口,用于提取页面中的印章信息
  419. # 集成签名判断函数
  420. class seal_agent():
  421. def __init__(self,
  422. base_url: str,
  423. access_token: str,
  424. headers: dict,
  425. ):
  426. self.base_url = base_url
  427. self.access_token = access_token
  428. self.headers = headers
  429. self.request_url = base_url + access_token
  430. def seal_recognition(self, img_path):
  431. f = open(img_path, 'rb')
  432. img = base64.b64encode(f.read())
  433. params = {"image":img}
  434. response = requests.post(self.request_url, data=params, headers=self.headers)
  435. if response:
  436. data = response.json()
  437. else:
  438. data = {}
  439. return data
  440. class PdfMatcher(PdfExtractAttr):
  441. '''pdf匹配'''
  442. def __init__(self, file_path: str):
  443. super(PdfMatcher, self).__init__(
  444. file_path=file_path
  445. )
  446. # 投标书名称
  447. self.bid_name = file_path.split('/')[-1][:-4]
  448. # 投标书数据文件夹
  449. self.bid_dir = os.path.join(os.path.dirname(file_path), self.bid_name)
  450. # 公司名称
  451. self.firm_name = file_path.split('/')[-2]
  452. # title list
  453. title_path = os.path.join(self.bid_dir, "title.json")
  454. self.title = load_json(title_path)
  455. # outline list
  456. outline_path = os.path.join(self.bid_dir, "outlines.json")
  457. self.outline = self.parse_outline(out_path=outline_path)
  458. # text list
  459. text_path = os.path.join(self.bid_dir, "all_texts.json")
  460. self.details = self.parse_text(out_path=text_path)
  461. # table list
  462. table_path = os.path.join(self.bid_dir, "all_tables.json")
  463. if os.path.exists(table_path):
  464. self.table = load_json(table_path)
  465. else:
  466. self.tables = self.parse_table(out_path=table_path)
  467. # image list
  468. self.image_dir = os.path.join(self.bid_dir, "extracted_images")
  469. # image format
  470. self.image_format = "image_page_{}*"
  471. # image filter threshold
  472. self.start_threshold = 10
  473. self.distance_threshold = 6
  474. self.search_threshold = 20
  475. self.match_threshold = 44.0
  476. self.degrade_threshold = 42.0
  477. def search_interval(self):
  478. '''定位营业执照、资质证书的区间范围'''
  479. # 通过关键字模糊定位
  480. keywords = ['资格审查资料','资格审查材料','其它材料','其他材料','其他资料','附件', '影印件']
  481. search_interval = []
  482. # locate in title.json
  483. left_pos = -1 # 左指针
  484. right_pos = -1 # 右指针
  485. for title_block in self.title:
  486. block_text = title_block['text'].replace(' ', '').strip()
  487. # 先进行左区间判定
  488. if left_pos != -1 and '证书' not in block_text:
  489. right_pos = title_block['page_number']
  490. search_interval.append((left_pos, right_pos))
  491. # 重置
  492. left_pos = -1
  493. for keyword in keywords:
  494. if keyword in block_text:
  495. # 先进行模糊的outline定位
  496. center_page = None
  497. if '.' in block_text:
  498. center_page = block_text.split('.')[-1]
  499. if center_page.isdigit():
  500. center_page = eval(center_page)
  501. left_pos = min(title_block['page_number'], center_page)
  502. else:
  503. left_pos = title_block['page_number']
  504. # 最终判定
  505. if left_pos != -1:
  506. search_interval.append((left_pos, right_pos))
  507. # 重置
  508. left_pos = -1
  509. right_pos = -1
  510. # locate in outlines.json
  511. if len(self.outline) > 0:
  512. for outline_block in self.outline:
  513. if left_pos != -1:
  514. right_pos = outline_block["page_number"]
  515. right_pos = right_pos if right_pos is not None else -1
  516. search_interval.append((left_pos, right_pos))
  517. left_pos = -1
  518. outline_text = outline_block['title'].strip()
  519. for keyword in keywords:
  520. if keyword in outline_text:
  521. if outline_block["page_number"] is not None:
  522. left_pos = outline_block["page_number"]
  523. # 最终判定
  524. if left_pos != -1:
  525. search_interval.append((left_pos, right_pos))
  526. # 搜寻区间合并
  527. search_interval.sort()
  528. merge_interval = []
  529. if len(search_interval) > 0:
  530. left = -1
  531. right = -1
  532. for interval in search_interval:
  533. l, r = interval
  534. if r < l:
  535. continue
  536. if left == -1 and right == -1:
  537. left = l
  538. right = r
  539. elif l <= right:
  540. right = r
  541. else:
  542. merge_interval.append((left, right))
  543. left = l
  544. right = r
  545. merge_interval.append((left, right))
  546. return merge_interval
  547. def find_candidate_images(self):
  548. candidate_images = set()
  549. merge_intervals = self.search_interval()
  550. for interval in merge_intervals:
  551. start_page, end_page = interval
  552. if start_page <= self.start_threshold:
  553. continue
  554. if end_page == -1:
  555. end_page = start_page + 20
  556. candidate_images = self.image_regularization(start_page=max(0, start_page-self.search_threshold), end_page=end_page+self.search_threshold, candidate_images=candidate_images)
  557. candidate_images = list(candidate_images)
  558. return candidate_images
  559. # 定位营业执照图像
  560. def locate_business_license(self):
  561. '''locate business license and return image'''
  562. keywords = ["资格审查资料", "其它资格审查材料", "资格审查材料"]
  563. candidate_pages = []
  564. center_pages = []
  565. candidate_images = set()
  566. # locate in title.json
  567. for title_block in self.title:
  568. block_text = title_block['text'].replace(' ', '').strip()
  569. for keyword in keywords:
  570. if keyword in block_text:
  571. # 先进行模糊的outline定位
  572. center_page = None
  573. if '.' in block_text:
  574. center_page = block_text.split('.')[-1]
  575. if center_page.isdigit():
  576. center_page = eval(center_page)
  577. center_pages.append(center_page)
  578. candidate_pages.append(title_block['page_number'])
  579. # locate in outlines.json
  580. if len(self.outline) > 0:
  581. for outline_block in self.outline:
  582. outline_text = outline_block['title'].strip()
  583. for keyword in keywords:
  584. if keyword in outline_text:
  585. center_pages.append(outline_block["page_number"])
  586. # information match
  587. filter_pages = set()
  588. if len(center_pages) == 0 and len(candidate_pages) == 0:
  589. return None
  590. elif len(center_pages) == 0:
  591. filter_pages.update(candidate_pages)
  592. elif len(candidate_pages) == 0:
  593. filter_pages.update(center_pages)
  594. else:
  595. # center_pages作为锚点,全部加入
  596. filter_pages.update(center_pages)
  597. # candidate_page与center_page进行匹配加入
  598. for candidate_page in candidate_pages:
  599. if candidate_page <= self.start_threshold:
  600. continue
  601. for center_page in center_pages:
  602. distance = abs(candidate_page - center_page)
  603. if distance <= self.distance_threshold:
  604. filter_pages.add(min(candidate_page, center_page) + distance // 2)
  605. # 得到筛选后的图片集存储于self.candidate_images
  606. for filter_page in filter_pages:
  607. # candidate_images = self.image_regularization(candidate_images=candidate_images, start_page=max(filter_page-self.search_threshold, 0), end_page=filter_page+self.search_threshold)
  608. candidate_images = self.image_regularization(start_page=max(filter_page-self.search_threshold, 0), end_page=filter_page+self.search_threshold, candidate_images=candidate_images)
  609. # 获取最终图像的地址
  610. candidate_images = list(candidate_images)
  611. target_list = self.exact_match(candidate_images=candidate_images)
  612. # return target_path list
  613. return target_list
  614. # 定位资质证书
  615. def locate_qualtification_certificate(self):
  616. '''返回资质证书的图像列表'''
  617. # 通过关键字模糊定位
  618. keywords = ['资格审查资料','资格审查材料','其它材料','其他材料','影印件']
  619. search_interval = []
  620. candidate_images = set()
  621. # locate in title.json
  622. left_pos = -1 # 左指针
  623. right_pos = -1 # 右指针
  624. for title_block in self.title:
  625. block_text = title_block['text'].replace(' ', '').strip()
  626. # 先进行左区间判定
  627. if left_pos != -1 and '证书' not in block_text:
  628. right_pos = title_block['page_number']
  629. search_interval.append((left_pos, right_pos))
  630. # 重置
  631. left_pos = -1
  632. for keyword in keywords:
  633. if keyword in block_text:
  634. # 先进行模糊的outline定位
  635. center_page = None
  636. if '.' in block_text:
  637. center_page = block_text.split('.')[-1]
  638. if center_page.isdigit():
  639. center_page = eval(center_page)
  640. left_pos = min(title_block['page_number'], center_page)
  641. else:
  642. left_pos = title_block['page_number']
  643. # 最终判定
  644. if left_pos != -1:
  645. search_interval.append((left_pos, right_pos))
  646. # 重置
  647. left_pos = -1
  648. right_pos = -1
  649. # locate in outlines.json
  650. if len(self.outline) > 0:
  651. for outline_block in self.outline:
  652. if left_pos != -1:
  653. right_pos = outline_block["page_number"]
  654. right_pos = right_pos if right_pos is not None else -1
  655. search_interval.append((left_pos, right_pos))
  656. left_pos = -1
  657. outline_text = outline_block['title'].strip()
  658. for keyword in keywords:
  659. if keyword in outline_text:
  660. if outline_block["page_number"] is not None:
  661. left_pos = outline_block["page_number"]
  662. # 最终判定
  663. if left_pos != -1:
  664. search_interval.append((left_pos, right_pos))
  665. # 搜寻区间合并
  666. search_interval.sort()
  667. merge_interval = []
  668. if len(search_interval) > 0:
  669. left = -1
  670. right = -1
  671. for interval in search_interval:
  672. l, r = interval
  673. if r < l:
  674. continue
  675. if left == -1 and right == -1:
  676. left = l
  677. right = r
  678. elif l <= right:
  679. right = r
  680. else:
  681. merge_interval.append((left, right))
  682. left = l
  683. right = r
  684. merge_interval.append((left, right))
  685. for interval in merge_interval:
  686. start_page, end_page = interval
  687. if end_page == -1:
  688. end_page = start_page + 20
  689. if start_page <= self.start_threshold:
  690. continue
  691. candidate_images = self.image_regularization(start_page=max(0, start_page-self.search_threshold), end_page=end_page+self.search_threshold, candidate_images=candidate_images)
  692. candidate_images = list(candidate_images)
  693. target_list = self.search_qualtification_certificate(candidate_images=candidate_images)
  694. return target_list
  695. # 查询符合格式的图像
  696. def image_regularization(self, start_page: int, end_page:int, candidate_images: set):
  697. for index in range(start_page, end_page + 1):
  698. current_format = self.image_format.format(index)
  699. files = glob.glob(os.path.join(self.image_dir, current_format))
  700. # cut_files = list(map(lambda x: x.split('/')[-1], files))
  701. # filter_files = [file for file in cut_files if not file.endswith('.unk')]
  702. filter_files = [file for file in files if not file.endswith('.unk')]
  703. candidate_images.update(filter_files)
  704. return candidate_images
  705. def exact_match(self, candidate_images: list):
  706. '''精确匹配营业执照位置'''
  707. if len(candidate_images) == 0:
  708. return None
  709. target_list = []
  710. sim_list = []
  711. for image_path in candidate_images:
  712. score = self.get_similarity(image_path=image_path, tamplate=self.bl_tamplate)
  713. sim_list.append(score.cpu().numpy())
  714. # top-k > match_threshold
  715. sim_list = np.array(sim_list).reshape(len(sim_list))
  716. for i, cos_sim in enumerate(sim_list):
  717. if cos_sim > self.match_threshold:
  718. target_list.append(candidate_images[i])
  719. # 未找寻到符合当前阈值要求的图像,降低阈值
  720. if len(target_list) == 0:
  721. for i, cos_sim in enumerate(sim_list):
  722. if cos_sim > self.degrade_threshold:
  723. target_list.append(candidate_images[i])
  724. return target_list
  725. def search_qualtification_certificate(self, candidate_images: list):
  726. '''从candidate images中搜寻是否有符合资质证书的图像'''
  727. if len(candidate_images) == 0:
  728. return None
  729. target_list = []
  730. sim_list = []
  731. for image_path in candidate_images:
  732. score = self.get_similarity(image_path=image_path, tamplate=self.qc_tamplate)
  733. sim_list.append(score.cpu().numpy())
  734. sim_list = np.array(sim_list).reshape(len(sim_list))
  735. for i, cos_sim in enumerate(sim_list):
  736. if cos_sim > self.qc_threshold:
  737. target_list.append(candidate_images[i])
  738. return target_list
  739. def get_similarity(self, image_path, tamplate):
  740. image = preprocess(Image.open(image_path)).unsqueeze(0).to(device)
  741. text = clip.tokenize([tamplate]).to(device)
  742. with torch.no_grad():
  743. logits_per_image, logits_per_text = model.get_similarity(image, text)
  744. return logits_per_image
  745. if __name__ == '__main__':
  746. start_time = time.time()
  747. url = "http://120.48.103.13:18000/ctr_ocr"
  748. base_url = "https://aip.baidubce.com/rest/2.0/ocr/v1/seal?access_token="
  749. access_token = "24.6bbe9987c6bd19ba65e4402917811657.2592000.1724573148.282335-86574608"
  750. headers = {'content-type': 'application/x-www-form-urlencoded'}
  751. data_path = "/home/stf/miner_pdf/data/投标公司pdf"
  752. out_path = "/home/stf/miner_pdf/test.json"
  753. ground_truth = "/home/stf/miner_pdf/ground_truth.json"
  754. firm_excel_file = "/home/stf/miner_pdf/data/certificate.xlsx"
  755. df = pd.read_excel(firm_excel_file)
  756. ocr = OcrAgent(url=url)
  757. seal_ocr = seal_agent(base_url=base_url, access_token=access_token, headers=headers)
  758. unscanned_firm_list = df[(df['是否为扫描件'] == '否')]['公司名称'].tolist()
  759. scanned_firm_list = df[(df['是否为扫描件'] == '是')]['公司名称'].tolist()
  760. all_firm_list = unscanned_firm_list + scanned_firm_list
  761. data = {}
  762. start_time = time.time()
  763. # 以下为提取pdf中标题文本
  764. for firm_name in all_firm_list:
  765. firm_path = os.path.join(data_path, firm_name)
  766. # 在firm_path下创建新文件
  767. for bid_name in tqdm(os.listdir(firm_path)):
  768. if bid_name.endswith('.pdf'):
  769. # 获取bid_dictionary
  770. bid_dir = os.path.join(firm_path, bid_name[:-4])
  771. os.makedirs(bid_dir, exist_ok=True)
  772. # 获取bid_dictionary / image_dir
  773. image_dir = os.path.join(bid_dir, 'extracted_images')
  774. os.makedirs(image_dir, exist_ok=True)
  775. document = os.path.join(firm_path, bid_name)
  776. logger.info(f'processing document {document} ...')
  777. # 提取文档标题存入title_path
  778. '''
  779. title_path = os.path.join(bid_dir, 'title.json')
  780. main_parse(pdf_path=document, title_path=title_path, image_dir=image_dir)
  781. extractor = PdfExtractAttr(file_path=document)
  782. extractor.parse_outline(out_path=os.path.join(bid_dir, 'outlines.json'))
  783. extractor.parse_text(out_path=os.path.join(bid_dir, 'all_texts.json'))
  784. extractor.parse_table(out_path=os.path.join(bid_dir, 'all_tables.json'))
  785. '''
  786. # 以下为提取scanned firm的pdf中营业执照、资质证书等信息
  787. # 已经失效
  788. '''
  789. for firm_name in scanned_firm_list:
  790. data[firm_name] = {'license_list':[]}
  791. firm_path = os.path.join(data_path, firm_name)
  792. # 在firm_path下创建新文件
  793. for bid_name in tqdm(os.listdir(firm_path)):
  794. if bid_name.endswith('.pdf'):
  795. # 获取img_dictionary
  796. image_dir = os.path.join(firm_path, 'scanned')
  797. document = os.path.join(firm_path, bid_name)
  798. logger.info(f'processing document {document} ...')
  799. # 统计该pdf文件共转换为多少张扫描图像
  800. total_img = len(os.listdir(image_dir))
  801. logger.info(f'当前文档共扫描出{total_img}张图像 ...')
  802. for img in os.listdir(image_dir):
  803. img_path = os.path.join(image_dir, img)
  804. try:
  805. response = ocr.judge(image_path=img_path, firm_name=firm_name)
  806. if response == None or response['qualtified'] == None:
  807. # logger.info(json.dumps(response, indent=4, ensure_ascii=False))
  808. continue
  809. else:
  810. data[firm_name]["license_list"].append({
  811. "license_name": response["license_name"],
  812. "license_path": img,
  813. # "license_page": response["page_number"],
  814. "start_datetime": response["start_datetime"],
  815. "end_datetime": response["end_datetime"]
  816. })
  817. except ValueError as e:
  818. logger.info(e)
  819. with open(out_path, 'w', encoding='utf-8') as f:
  820. json.dump(data, f, ensure_ascii=False, indent=4)
  821. '''
  822. # 以下原本为提取非扫描件公司的营业执照、资质证书信息
  823. # 现面向所有公司
  824. # for firm_name in unscanned_firm_list:
  825. for firm_name in all_firm_list:
  826. firm_path = os.path.join(data_path, firm_name)
  827. # 在firm_path下创建新文件
  828. for bid_name in tqdm(os.listdir(firm_path)):
  829. if bid_name.endswith('.pdf'):
  830. # 获取bid_dictionary
  831. bid_dir = os.path.join(firm_path, bid_name[:-4])
  832. os.makedirs(bid_dir, exist_ok=True)
  833. # 获取bid_dictionary / image_dir
  834. image_dir = os.path.join(bid_dir, 'extracted_images')
  835. os.makedirs(image_dir, exist_ok=True)
  836. document = os.path.join(firm_path, bid_name)
  837. logger.info(f'processing document {document} ...')
  838. agent = PdfMatcher(document)
  839. data[firm_name] = {"license_list":[]}
  840. candidate_images = agent.find_candidate_images()
  841. if len(candidate_images) == 0:
  842. logger.info(f'current firm: {firm_name} is unqualtified ...')
  843. else:
  844. for img in candidate_images:
  845. try:
  846. response = ocr.judge(image_path=img, firm_name=firm_name)
  847. if response == None or response['qualtified'] == None:
  848. # logger.info(json.dumps(response, indent=4, ensure_ascii=False))
  849. continue
  850. else:
  851. data[firm_name]["license_list"].append({
  852. "license_name": response["license_name"],
  853. "license_path": img,
  854. "license_page": response["page_number"],
  855. "start_datetime": response["start_datetime"],
  856. "end_datetime": response["end_datetime"]
  857. })
  858. except ValueError as e:
  859. logger.info(e)
  860. '''
  861. result = agent.locate_business_license()
  862. media_time = time.time()
  863. logger.info(f'search cost time: {media_time - start_time}')
  864. if result is None:
  865. logger.info(f'current firm {firm_name} detects None ...')
  866. else:
  867. for tgt_path in result:
  868. response = ocr.get_license_info(image_path=tgt_path, firm_name=firm_name)
  869. #TODO 新旧营业执照的逻辑处理
  870. if (response['qualtified'] == True) and (response['matched'] == True):
  871. break
  872. logger.info(f'detect cost time: {time.time() - media_time}')
  873. # 提取图片存入image_dir
  874. # 提取文档标题存入title_path
  875. # title_path = os.path.join(bid_dir, 'title.json')
  876. # main_parse(pdf_path=document, title_path=title_path, image_dir=image_dir)
  877. # extractor = PdfExtractAttr(file_path=document)
  878. # extractor.parse_outline(out_path=os.path.join(bid_dir, 'outlines.json'))
  879. # extractor.parse_text(out_path=os.path.join(bid_dir, 'all_texts.json'))
  880. # extractor.parse_table(out_path=os.path.join(bid_dir, 'all_tables.json'))
  881. # df.to_excel(firm_excel_file, index=False)
  882. '''
  883. # 以下将data的数据存入out_path
  884. with open(out_path, 'w', encoding='utf-8') as f:
  885. json.dump(data, f, ensure_ascii=False, indent=4)
  886. # 以下根据ground-truth进行精度测试
  887. business_lic_num = 0
  888. certificate_lic_num = 0
  889. business_lic_cnt = 0
  890. certificate_lic_cnt = 0
  891. true_data = load_json(ground_truth)
  892. for firm_name in true_data:
  893. y_data = true_data[firm_name]['license_list']
  894. pred_data = data[firm_name]['license_list']
  895. for lic in y_data:
  896. if lic['license_name'] == '营业执照':
  897. business_lic_num += 1
  898. for i in range(len(pred_data)):
  899. if pred_data[i]['license_name'] == '营业执照' and pred_data[i]['license_path'] == lic['license_path']:
  900. business_lic_cnt += 1
  901. elif lic['license_name'] == '资质证书':
  902. certificate_lic_num += 1
  903. for i in range(len(pred_data)):
  904. if pred_data[i]['license_path'] == lic['license_path']:
  905. certificate_lic_cnt += 1
  906. else:
  907. logger.info('wrong truth data of {}'.format(lic['license_path']))
  908. logger.info(f'营业执照识别准确率为:{business_lic_cnt / business_lic_num}')
  909. logger.info(f'资质证书识别率为:{certificate_lic_cnt / certificate_lic_num}')
  910. delta = time.time() - start_time
  911. logger.info(f"total unscanned document {len(unscanned_firm_list)} cost time: {delta // 60} min {delta % 60} sec ...")