text_extractor.py 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136
  1. from pdfminer.high_level import extract_pages
  2. from pdfminer.layout import LTTextBoxHorizontal
  3. from pdfminer.pdfinterp import resolve1
  4. from pdfminer.pdfdocument import PDFDocument
  5. from pdfminer.pdfparser import PDFParser
  6. from matcher import Matcher
  7. from get_info import PdfExtractAttr, is_title
  8. from typing import Callable, Union, List, Tuple, Dict
  9. from re import fullmatch
  10. from tqdm import tqdm
  11. import pandas as pd
  12. def absolute_not_title(line: str) -> bool:
  13. if fullmatch(r'^\d(\d*\.?\d*)+\d(%)?', line):
  14. return True
  15. else:
  16. return False
  17. def parse_title(pdf_path: str) -> list[dict[str, int | str | tuple[float, float, float, float]]]:
  18. texts = []
  19. for page_number, page_layout in tqdm(enumerate(extract_pages(pdf_path)),
  20. total=resolve1(PDFDocument(
  21. PDFParser(open(pdf_path, 'rb'))).catalog['Pages'])['Count']
  22. ):
  23. title_index = 0
  24. for element in page_layout:
  25. if isinstance(element, LTTextBoxHorizontal) and len(element._objs) == 1:
  26. text = element.get_text().strip()
  27. if text and (is_title(text) or element.height > 15) and (not absolute_not_title(text)):
  28. texts.append({'index': title_index, 'page_number': page_number, 'bbox': element.bbox, 'text': text})
  29. title_index += 1
  30. results = []
  31. for i, text in enumerate(texts):
  32. results.append({'title': text['text'],
  33. 'index': text['index'],
  34. 'page_number': text['page_number'],
  35. 'seq_num': i
  36. })
  37. return results
  38. def pagination_texts(contents: List[dict], start: int, end: int = None) -> Tuple[Dict, List[str]]:
  39. if end is None:
  40. end = start + 1
  41. results = {}
  42. texts = []
  43. pages = set(range(start, end))
  44. for page in contents:
  45. if page['page_number'] in pages:
  46. results.get(int(page['page_number']), {}).update(
  47. {
  48. page['index']: {
  49. 'page_number': page['page_number'],
  50. 'index': page['index'],
  51. 'text': page['text'],
  52. 'lines': page['lines'],
  53. 'is_table_name': page['is_table_name']
  54. }
  55. })
  56. texts.append(page['text'])
  57. return results, texts
  58. def similarity_filter(data: List[dict], expect_similarity: float = None):
  59. def f(x: dict):
  60. return x['相似度'] > (expect_similarity if isinstance(expect_similarity, float) else 0.5)
  61. return filter(f, data)
  62. def extract_from_texts(text: List[str], extractor: Union[Callable[[str, float], List[str]], Callable[[str], List[str]]],
  63. instances: List[str], similarity: float = None) -> Tuple[List[str], List[int]]:
  64. texts = ','.join(filter(lambda x: x != '',
  65. ''.join([''.join(filter(lambda x: x != ' ', list(i.strip()))) for i in text]).split(
  66. '。'))).split(',')
  67. sims = similar_match([{'text': i} for i in texts], instances, 'text')
  68. s_texts = [i['text'] for i in sims]
  69. similarities = [i['相似度'] for i in sims]
  70. if similarity is None:
  71. return list(filter(lambda x: x != [], [extractor(i) for i in s_texts])), similarities
  72. else:
  73. return list(filter(lambda x: x != [], [extractor(i, similarity) for i in s_texts])), similarities
  74. def similar_match(data: List[dict], instances: List[str], key: str) -> {}:
  75. matcher = Matcher()
  76. df = pd.DataFrame(data)
  77. keyword_embeddings = matcher.get_embeddings(instances)
  78. tqdm.pandas(desc='标题相似度匹配')
  79. result = df[key].apply(lambda x: matcher.TopK1(x, instances, matcher.get_embedding(x), keyword_embeddings))
  80. result.columns = ['因素', '相似度']
  81. df['因素'] = result['因素']
  82. df['相似度'] = result['相似度']
  83. max_sim_idx = df.groupby('因素')['相似度'].idxmax()
  84. max_sim_rows = df.loc[max_sim_idx]
  85. return max_sim_rows.to_dict(orient='records')
  86. def get_instance(title_instances: List[str], content_instances: List[str], pdf: str,
  87. extractor: Union[Callable[[str, float], List[str]], Callable[[str], List[str]]],
  88. page_bias: int = 1, similarity: float = None):
  89. file = PdfExtractAttr(file_path=pdf)
  90. # titles = file.parse_outline()
  91. titles = parse_title(pdf)
  92. texts = file.parse_text()
  93. title_sims = similarity_filter(similar_match(titles, title_instances, key='title'), similarity)
  94. results = []
  95. for i in title_sims:
  96. current_page = i['page_number']
  97. _, text = pagination_texts(texts, current_page, current_page + page_bias)
  98. results.extend(extract_from_texts(text, extractor, content_instances))
  99. return results
  100. if __name__ == '__main__':
  101. # price_zhs = get_instance(['投标函', '开标一览表'], ['人民币投标总报价'],
  102. # '/Users/zelate/Codes/pvas/pdf_title_image/投标文件-修改版9-5-1-1.pdf',
  103. # match_price_zhs)
  104. # price_num = get_instance(['投标函', '开标一览表'], ['人民币投标总报价'],
  105. # '/Users/zelate/Codes/pvas/pdf_title_image/投标文件-修改版9-5-1-1.pdf',
  106. # match_price_num)
  107. # duration = get_instance(['投标函', '开标一览表'], ['工期日历天'],
  108. # '/Users/zelate/Codes/pvas/pdf_title_image/投标文件-修改版9-5-1-1.pdf',
  109. # match_duration)
  110. # quality = get_instance(['投标函', '开标一览表'], ['工程质量'],
  111. # '/Users/zelate/Codes/pvas/pdf_title_image/投标文件-修改版9-5-1-1.pdf',
  112. # match_quality)
  113. # valid = rmb_to_digit(price_zhs[0][0][0]) == price_num[0][0][0][1:]
  114. # test = rmb_to_digit('壹仟肆佰贰拾万捌仟玖佰陆拾柒元叁角陆分元')
  115. # valid = (rmb_to_digit('壹仟肆佰贰拾万捌仟玖佰陆拾柒元叁角陆分元')) == '14208967.36'
  116. pass