123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114 |
- # -*- coding: utf-8 -*-
- # @Author: privacy
- # @Date: 2024-08-30 11:17:21
- # @Last Modified by: privacy
- # @Last Modified time: 2024-09-03 10:23:35
- from typing import Callable, Union, List, Tuple, Dict, Optional
- import pandas as pd
- from tqdm import tqdm
- from celery_tasks.matcher import Matcher
- def pagination_texts(contents: List[dict], start: int, end: int = None) -> Tuple[Dict, List[str]]:
- """
- """
- if end is None:
- end = start + 1
- results = {}
- texts = []
- pages = set(range(start, end))
- for page in contents:
- if page['page_number'] in pages:
- results.get(int(page['page_number']), {}).update(
- {
- page['index']: {
- 'page_number': page['page_number'],
- 'index': page['index'],
- 'text': page['text'],
- 'lines': page['lines'],
- 'is_table_name': page['is_table_name']
- }
- })
- texts.append(page['text'])
- return results, texts
- def similarity_filter(data: List[dict], expect_similarity: float = None):
- """
- """
- def f(x: dict):
- return x['相似度'] > (expect_similarity if isinstance(expect_similarity, float) else 0.5)
- return filter(f, data)
- def extract_from_texts(text: List[str], extractor: Union[Callable[[str, float], List[str]], Callable[[str], List[str]]],
- instances: List[str], similarity: float = None) -> Tuple[List[str], List[int]]:
- texts = ','.join(filter(lambda x: x != '',
- ''.join([''.join(filter(lambda x: x != ' ', list(i.strip()))) for i in text]).split(
- '。'))).split(',')
- sims = similar_match([{'text': i} for i in texts], instances, 'text')
- s_texts = [i['text'] for i in sims]
- similarities = [i['相似度'] for i in sims]
- if similarity is None:
- return list(filter(lambda x: x != [], [extractor(i) for i in s_texts])), similarities
- else:
- return list(filter(lambda x: x != [], [extractor(i, similarity) for i in s_texts])), similarities
- def similar_match(data: List[dict], instances: List[str], key: str) -> {}:
- """
- """
- matcher = Matcher()
- df = pd.DataFrame(data)
- keyword_embeddings = matcher.get_embeddings(instances)
- tqdm.pandas(desc='标题相似度匹配')
- result = df[key].apply(lambda x: matcher.TopK1(x, instances, matcher.get_embedding(x), keyword_embeddings))
- result.columns = ['因素', '相似度']
- df['因素'] = result['因素']
- df['相似度'] = result['相似度']
- max_sim_idx = df.groupby('因素')['相似度'].idxmax()
- max_sim_rows = df.loc[max_sim_idx]
- return max_sim_rows.to_dict(orient='records')
- def get_instance(title_instances: List[str],
- content_instances: List[str],
- extractor: Union[Callable[[str, float], List[str]], Callable[[str], List[str]]],
- titles_list: Optional[list] = None,
- texts_list: Optional[list] = None,
- pdf_path: Optional[str] = None,
- page_bias: int = 1,
- similarity: float = None):
- """
- Args:
- title_instances
- content_instances
- file_path
- extractor
- page_bias
- similarity
- Returns:
- results
- """
- title_sims = similarity_filter(
- similar_match(
- titles_list,
- title_instances,
- key='title'
- ),
- similarity
- )
- results = []
- for i in title_sims:
- current_page = i['page_number']
- _, text = pagination_texts(texts_list, current_page, current_page + page_bias)
- results.extend(extract_from_texts(text, extractor, content_instances))
- return results
|