Commit 1b9d65b3 authored by 赵小蒙's avatar 赵小蒙

1、Trace类的key增加前置下划线

2、实现UNIPipe
parent 877160a7
...@@ -67,9 +67,7 @@ def demo_classify_by_type(book_name=None, debug_mode=True): ...@@ -67,9 +67,7 @@ def demo_classify_by_type(book_name=None, debug_mode=True):
img_num_list = pdf_meta["imgs_per_page"] img_num_list = pdf_meta["imgs_per_page"]
text_len_list = pdf_meta["text_len_per_page"] text_len_list = pdf_meta["text_len_per_page"]
text_layout_list = pdf_meta["text_layout_per_page"] text_layout_list = pdf_meta["text_layout_per_page"]
pdf_path = json_object.get("file_location")
is_text_pdf, results = classify( is_text_pdf, results = classify(
pdf_path,
total_page, total_page,
page_width, page_width,
page_height, page_height,
...@@ -89,7 +87,7 @@ def demo_meta_scan(book_name=None, debug_mode=True): ...@@ -89,7 +87,7 @@ def demo_meta_scan(book_name=None, debug_mode=True):
s3_pdf_path = json_object.get("file_location") s3_pdf_path = json_object.get("file_location")
s3_config = get_s3_config_dict(s3_pdf_path) s3_config = get_s3_config_dict(s3_pdf_path)
pdf_bytes = read_file(s3_pdf_path, s3_config) pdf_bytes = read_file(s3_pdf_path, s3_config)
res = pdf_meta_scan(s3_pdf_path, pdf_bytes) res = pdf_meta_scan(pdf_bytes)
logger.info(json.dumps(res, ensure_ascii=False)) logger.info(json.dumps(res, ensure_ascii=False))
write_json_to_local(res, book_name) write_json_to_local(res, book_name)
......
...@@ -2,6 +2,7 @@ import math ...@@ -2,6 +2,7 @@ import math
from loguru import logger from loguru import logger
from magic_pdf.libs.boxbase import find_bottom_nearest_text_bbox, find_top_nearest_text_bbox from magic_pdf.libs.boxbase import find_bottom_nearest_text_bbox, find_top_nearest_text_bbox
from magic_pdf.libs.commons import join_path
from magic_pdf.libs.ocr_content_type import ContentType from magic_pdf.libs.ocr_content_type import ContentType
TYPE_INLINE_EQUATION = ContentType.InlineEquation TYPE_INLINE_EQUATION = ContentType.InlineEquation
...@@ -227,7 +228,7 @@ def __insert_before_para(text, type, element, content_list): ...@@ -227,7 +228,7 @@ def __insert_before_para(text, type, element, content_list):
logger.error(f"Can't find the location of image {element.get('image_path')} in the markdown file, search target is {text}") logger.error(f"Can't find the location of image {element.get('image_path')} in the markdown file, search target is {text}")
def mk_universal_format(para_dict: dict): def mk_universal_format(para_dict: dict, img_buket_path):
""" """
构造统一格式 https://aicarrier.feishu.cn/wiki/FqmMwcH69iIdCWkkyjvcDwNUnTY 构造统一格式 https://aicarrier.feishu.cn/wiki/FqmMwcH69iIdCWkkyjvcDwNUnTY
""" """
...@@ -249,7 +250,7 @@ def mk_universal_format(para_dict: dict): ...@@ -249,7 +250,7 @@ def mk_universal_format(para_dict: dict):
for img in all_page_images: for img in all_page_images:
content_node = { content_node = {
"type": "image", "type": "image",
"img_path": img['image_path'], "img_path": join_path(img_buket_path, img['image_path']),
"img_alt":"", "img_alt":"",
"img_title":"", "img_title":"",
"img_caption":"" "img_caption":""
...@@ -258,7 +259,7 @@ def mk_universal_format(para_dict: dict): ...@@ -258,7 +259,7 @@ def mk_universal_format(para_dict: dict):
for table in all_page_tables: for table in all_page_tables:
content_node = { content_node = {
"type": "table", "type": "table",
"img_path": table['image_path'], "img_path": join_path(img_buket_path, table['image_path']),
"table_latex": table.get("text"), "table_latex": table.get("text"),
"table_title": "", "table_title": "",
"table_caption": "", "table_caption": "",
......
from magic_pdf.libs.commons import join_path
from magic_pdf.libs.language import detect_lang from magic_pdf.libs.language import detect_lang
from magic_pdf.libs.markdown_utils import ocr_escape_special_markdown_char from magic_pdf.libs.markdown_utils import ocr_escape_special_markdown_char
from magic_pdf.libs.ocr_content_type import ContentType from magic_pdf.libs.ocr_content_type import ContentType
...@@ -137,10 +138,10 @@ def ocr_mk_markdown_with_para_core(paras_of_layout, mode): ...@@ -137,10 +138,10 @@ def ocr_mk_markdown_with_para_core(paras_of_layout, mode):
return page_markdown return page_markdown
def para_to_standard_format(para): def para_to_standard_format(para, img_buket_path):
para_content = {} para_content = {}
if len(para) == 1: if len(para) == 1:
para_content = line_to_standard_format(para[0]) para_content = line_to_standard_format(para[0], img_buket_path)
elif len(para) > 1: elif len(para) > 1:
para_text = '' para_text = ''
inline_equation_num = 0 inline_equation_num = 0
...@@ -170,7 +171,7 @@ def para_to_standard_format(para): ...@@ -170,7 +171,7 @@ def para_to_standard_format(para):
} }
return para_content return para_content
def make_standard_format_with_para(pdf_info_dict: dict): def make_standard_format_with_para(pdf_info_dict: dict, img_buket_path: str):
content_list = [] content_list = []
for _, page_info in pdf_info_dict.items(): for _, page_info in pdf_info_dict.items():
paras_of_layout = page_info.get("para_blocks") paras_of_layout = page_info.get("para_blocks")
...@@ -178,12 +179,12 @@ def make_standard_format_with_para(pdf_info_dict: dict): ...@@ -178,12 +179,12 @@ def make_standard_format_with_para(pdf_info_dict: dict):
continue continue
for paras in paras_of_layout: for paras in paras_of_layout:
for para in paras: for para in paras:
para_content = para_to_standard_format(para) para_content = para_to_standard_format(para, img_buket_path)
content_list.append(para_content) content_list.append(para_content)
return content_list return content_list
def line_to_standard_format(line): def line_to_standard_format(line, img_buket_path):
line_text = "" line_text = ""
inline_equation_num = 0 inline_equation_num = 0
for span in line['spans']: for span in line['spans']:
...@@ -194,13 +195,13 @@ def line_to_standard_format(line): ...@@ -194,13 +195,13 @@ def line_to_standard_format(line):
if span['type'] == ContentType.Image: if span['type'] == ContentType.Image:
content = { content = {
'type': 'image', 'type': 'image',
'img_path': span['image_path'] 'img_path': join_path(img_buket_path, span['image_path'])
} }
return content return content
elif span['type'] == ContentType.Table: elif span['type'] == ContentType.Table:
content = { content = {
'type': 'table', 'type': 'table',
'img_path': span['image_path'] 'img_path': join_path(img_buket_path, span['image_path'])
} }
return content return content
else: else:
......
...@@ -15,6 +15,7 @@ from collections import Counter ...@@ -15,6 +15,7 @@ from collections import Counter
import click import click
import numpy as np import numpy as np
from loguru import logger
from magic_pdf.libs.commons import mymax, get_top_percent_list from magic_pdf.libs.commons import mymax, get_top_percent_list
from magic_pdf.filter.pdf_meta_scan import scan_max_page, junk_limit_min from magic_pdf.filter.pdf_meta_scan import scan_max_page, junk_limit_min
...@@ -298,7 +299,7 @@ def classify_by_img_narrow_strips(page_width, page_height, img_sz_list): ...@@ -298,7 +299,7 @@ def classify_by_img_narrow_strips(page_width, page_height, img_sz_list):
return narrow_strip_pages_ratio < 0.5 return narrow_strip_pages_ratio < 0.5
def classify(pdf_path, total_page: int, page_width, page_height, img_sz_list: list, text_len_list: list, img_num_list: list, text_layout_list: list): def classify(total_page: int, page_width, page_height, img_sz_list: list, text_len_list: list, img_num_list: list, text_layout_list: list):
""" """
这里的图片和页面长度单位是pts 这里的图片和页面长度单位是pts
:param total_page: :param total_page:
...@@ -323,7 +324,7 @@ def classify(pdf_path, total_page: int, page_width, page_height, img_sz_list: li ...@@ -323,7 +324,7 @@ def classify(pdf_path, total_page: int, page_width, page_height, img_sz_list: li
elif not any(results.values()): elif not any(results.values()):
return False, results return False, results
else: else:
print(f"WARNING: {pdf_path} is not classified by area and text_len, by_image_area: {results['by_image_area']}, by_text: {results['by_text_len']}, by_avg_words: {results['by_avg_words']}, by_img_num: {results['by_img_num']}, by_text_layout: {results['by_text_layout']}, by_img_narrow_strips: {results['by_img_narrow_strips']}", file=sys.stderr) # 利用这种情况可以快速找出来哪些pdf比较特殊,针对性修正分类算法 logger.warning(f"pdf is not classified by area and text_len, by_image_area: {results['by_image_area']}, by_text: {results['by_text_len']}, by_avg_words: {results['by_avg_words']}, by_img_num: {results['by_img_num']}, by_text_layout: {results['by_text_layout']}, by_img_narrow_strips: {results['by_img_narrow_strips']}", file=sys.stderr) # 利用这种情况可以快速找出来哪些pdf比较特殊,针对性修正分类算法
return False, results return False, results
...@@ -350,7 +351,7 @@ def main(json_file): ...@@ -350,7 +351,7 @@ def main(json_file):
is_needs_password = o['is_needs_password'] is_needs_password = o['is_needs_password']
if is_encrypted or total_page == 0 or is_needs_password: # 加密的,需要密码的,没有页面的,都不处理 if is_encrypted or total_page == 0 or is_needs_password: # 加密的,需要密码的,没有页面的,都不处理
continue continue
tag = classify(pdf_path, total_page, page_width, page_height, img_sz_list, text_len_list, text_layout_list) tag = classify(total_page, page_width, page_height, img_sz_list, text_len_list, text_layout_list)
o['is_text_pdf'] = tag o['is_text_pdf'] = tag
print(json.dumps(o, ensure_ascii=False)) print(json.dumps(o, ensure_ascii=False))
except Exception as e: except Exception as e:
......
...@@ -287,7 +287,7 @@ def get_language(doc: fitz.Document): ...@@ -287,7 +287,7 @@ def get_language(doc: fitz.Document):
return language return language
def pdf_meta_scan(s3_pdf_path: str, pdf_bytes: bytes): def pdf_meta_scan(pdf_bytes: bytes):
""" """
:param s3_pdf_path: :param s3_pdf_path:
:param pdf_bytes: pdf文件的二进制数据 :param pdf_bytes: pdf文件的二进制数据
...@@ -298,8 +298,8 @@ def pdf_meta_scan(s3_pdf_path: str, pdf_bytes: bytes): ...@@ -298,8 +298,8 @@ def pdf_meta_scan(s3_pdf_path: str, pdf_bytes: bytes):
is_encrypted = doc.is_encrypted is_encrypted = doc.is_encrypted
total_page = len(doc) total_page = len(doc)
if total_page == 0: if total_page == 0:
logger.warning(f"drop this pdf: {s3_pdf_path}, drop_reason: {DropReason.EMPTY_PDF}") logger.warning(f"drop this pdf, drop_reason: {DropReason.EMPTY_PDF}")
result = {"need_drop": True, "drop_reason": DropReason.EMPTY_PDF} result = {"_need_drop": True, "_drop_reason": DropReason.EMPTY_PDF}
return result return result
else: else:
page_width_pts, page_height_pts = get_pdf_page_size_pts(doc) page_width_pts, page_height_pts = get_pdf_page_size_pts(doc)
...@@ -322,7 +322,6 @@ def pdf_meta_scan(s3_pdf_path: str, pdf_bytes: bytes): ...@@ -322,7 +322,6 @@ def pdf_meta_scan(s3_pdf_path: str, pdf_bytes: bytes):
# 最后输出一条json # 最后输出一条json
res = { res = {
"pdf_path": s3_pdf_path,
"is_needs_password": is_needs_password, "is_needs_password": is_needs_password,
"is_encrypted": is_encrypted, "is_encrypted": is_encrypted,
"total_page": total_page, "total_page": total_page,
...@@ -350,7 +349,7 @@ def main(s3_pdf_path: str, s3_profile: str): ...@@ -350,7 +349,7 @@ def main(s3_pdf_path: str, s3_profile: str):
""" """
try: try:
file_content = read_file(s3_pdf_path, s3_profile) file_content = read_file(s3_pdf_path, s3_profile)
pdf_meta_scan(s3_pdf_path, file_content) pdf_meta_scan(file_content)
except Exception as e: except Exception as e:
print(f"ERROR: {s3_pdf_path}, {e}", file=sys.stderr) print(f"ERROR: {s3_pdf_path}, {e}", file=sys.stderr)
logger.exception(e) logger.exception(e)
......
...@@ -8,7 +8,7 @@ class DropReason: ...@@ -8,7 +8,7 @@ class DropReason:
HIGH_COMPUTATIONAL_lOAD_BY_SVGS = "high_computational_load_by_svgs" # 特殊的SVG图,计算量太大,从而丢弃 HIGH_COMPUTATIONAL_lOAD_BY_SVGS = "high_computational_load_by_svgs" # 特殊的SVG图,计算量太大,从而丢弃
HIGH_COMPUTATIONAL_lOAD_BY_TOTAL_PAGES = "high_computational_load_by_total_pages" # 计算量超过负荷,当前方法下计算量消耗过大 HIGH_COMPUTATIONAL_lOAD_BY_TOTAL_PAGES = "high_computational_load_by_total_pages" # 计算量超过负荷,当前方法下计算量消耗过大
MISS_DOC_LAYOUT_RESULT = "missing doc_layout_result" # 版面分析失败 MISS_DOC_LAYOUT_RESULT = "missing doc_layout_result" # 版面分析失败
Exception = "exception" # 解析中发生异常 Exception = "_exception" # 解析中发生异常
ENCRYPTED = "encrypted" # PDF是加密的 ENCRYPTED = "encrypted" # PDF是加密的
EMPTY_PDF = "total_page=0" # PDF页面总数为0 EMPTY_PDF = "total_page=0" # PDF页面总数为0
NOT_IS_TEXT_PDF = "not_is_text_pdf" # 不是文字版PDF,无法直接解析 NOT_IS_TEXT_PDF = "not_is_text_pdf" # 不是文字版PDF,无法直接解析
......
...@@ -107,7 +107,7 @@ def parse_pdf_by_txt( ...@@ -107,7 +107,7 @@ def parse_pdf_by_txt(
# 去除对junkimg的依赖,简化逻辑 # 去除对junkimg的依赖,简化逻辑
if len(page_imgs) > 1500: # 如果当前页超过1500张图片,直接跳过 if len(page_imgs) > 1500: # 如果当前页超过1500张图片,直接跳过
logger.warning(f"page_id: {page_id}, img_counts: {len(page_imgs)}, drop this pdf") logger.warning(f"page_id: {page_id}, img_counts: {len(page_imgs)}, drop this pdf")
result = {"need_drop": True, "drop_reason": DropReason.HIGH_COMPUTATIONAL_lOAD_BY_IMGS} result = {"_need_drop": True, "_drop_reason": DropReason.HIGH_COMPUTATIONAL_lOAD_BY_IMGS}
if not debug_mode: if not debug_mode:
return result return result
...@@ -236,7 +236,7 @@ def parse_pdf_by_txt( ...@@ -236,7 +236,7 @@ def parse_pdf_by_txt(
if is_text_block_horz_overlap: if is_text_block_horz_overlap:
# debug_show_bbox(pdf_docs, page_id, [b['bbox'] for b in remain_text_blocks], [], [], join_path(save_path, book_name, f"{book_name}_debug.pdf"), 0) # debug_show_bbox(pdf_docs, page_id, [b['bbox'] for b in remain_text_blocks], [], [], join_path(save_path, book_name, f"{book_name}_debug.pdf"), 0)
logger.warning(f"page_id: {page_id}, drop this pdf: {pdf_bytes_md5}, reason: {DropReason.TEXT_BLCOK_HOR_OVERLAP}") logger.warning(f"page_id: {page_id}, drop this pdf: {pdf_bytes_md5}, reason: {DropReason.TEXT_BLCOK_HOR_OVERLAP}")
result = {"need_drop": True, "drop_reason": DropReason.TEXT_BLCOK_HOR_OVERLAP} result = {"_need_drop": True, "_drop_reason": DropReason.TEXT_BLCOK_HOR_OVERLAP}
if not debug_mode: if not debug_mode:
return result return result
...@@ -255,14 +255,14 @@ def parse_pdf_by_txt( ...@@ -255,14 +255,14 @@ def parse_pdf_by_txt(
if len(remain_text_blocks)>0 and len(all_bboxes)>0 and len(layout_bboxes)==0: if len(remain_text_blocks)>0 and len(all_bboxes)>0 and len(layout_bboxes)==0:
logger.warning(f"page_id: {page_id}, drop this pdf: {pdf_bytes_md5}, reason: {DropReason.CAN_NOT_DETECT_PAGE_LAYOUT}") logger.warning(f"page_id: {page_id}, drop this pdf: {pdf_bytes_md5}, reason: {DropReason.CAN_NOT_DETECT_PAGE_LAYOUT}")
result = {"need_drop": True, "drop_reason": DropReason.CAN_NOT_DETECT_PAGE_LAYOUT} result = {"_need_drop": True, "_drop_reason": DropReason.CAN_NOT_DETECT_PAGE_LAYOUT}
if not debug_mode: if not debug_mode:
return result return result
"""以下去掉复杂的布局和超过2列的布局""" """以下去掉复杂的布局和超过2列的布局"""
if any([lay["layout_label"] == LAYOUT_UNPROC for lay in layout_bboxes]): # 复杂的布局 if any([lay["layout_label"] == LAYOUT_UNPROC for lay in layout_bboxes]): # 复杂的布局
logger.warning(f"page_id: {page_id}, drop this pdf: {pdf_bytes_md5}, reason: {DropReason.COMPLICATED_LAYOUT}") logger.warning(f"page_id: {page_id}, drop this pdf: {pdf_bytes_md5}, reason: {DropReason.COMPLICATED_LAYOUT}")
result = {"need_drop": True, "drop_reason": DropReason.COMPLICATED_LAYOUT} result = {"_need_drop": True, "_drop_reason": DropReason.COMPLICATED_LAYOUT}
if not debug_mode: if not debug_mode:
return result return result
...@@ -270,8 +270,8 @@ def parse_pdf_by_txt( ...@@ -270,8 +270,8 @@ def parse_pdf_by_txt(
if layout_column_width > 2: # 去掉超过2列的布局pdf if layout_column_width > 2: # 去掉超过2列的布局pdf
logger.warning(f"page_id: {page_id}, drop this pdf: {pdf_bytes_md5}, reason: {DropReason.TOO_MANY_LAYOUT_COLUMNS}") logger.warning(f"page_id: {page_id}, drop this pdf: {pdf_bytes_md5}, reason: {DropReason.TOO_MANY_LAYOUT_COLUMNS}")
result = { result = {
"need_drop": True, "_need_drop": True,
"drop_reason": DropReason.TOO_MANY_LAYOUT_COLUMNS, "_drop_reason": DropReason.TOO_MANY_LAYOUT_COLUMNS,
"extra_info": {"column_cnt": layout_column_width}, "extra_info": {"column_cnt": layout_column_width},
} }
if not debug_mode: if not debug_mode:
...@@ -377,23 +377,23 @@ def parse_pdf_by_txt( ...@@ -377,23 +377,23 @@ def parse_pdf_by_txt(
logger.warning(f"page_id: {page_id}, drop this pdf: {pdf_bytes_md5}, reason: {error_info}") logger.warning(f"page_id: {page_id}, drop this pdf: {pdf_bytes_md5}, reason: {error_info}")
if error_info == denseSingleLineBlockException_msg: if error_info == denseSingleLineBlockException_msg:
logger.warning(f"Drop this pdf: {pdf_bytes_md5}, reason: {DropReason.DENSE_SINGLE_LINE_BLOCK}") logger.warning(f"Drop this pdf: {pdf_bytes_md5}, reason: {DropReason.DENSE_SINGLE_LINE_BLOCK}")
result = {"need_drop": True, "drop_reason": DropReason.DENSE_SINGLE_LINE_BLOCK} result = {"_need_drop": True, "_drop_reason": DropReason.DENSE_SINGLE_LINE_BLOCK}
return result return result
if error_info == titleDetectionException_msg: if error_info == titleDetectionException_msg:
logger.warning(f"Drop this pdf: {pdf_bytes_md5}, reason: {DropReason.TITLE_DETECTION_FAILED}") logger.warning(f"Drop this pdf: {pdf_bytes_md5}, reason: {DropReason.TITLE_DETECTION_FAILED}")
result = {"need_drop": True, "drop_reason": DropReason.TITLE_DETECTION_FAILED} result = {"_need_drop": True, "_drop_reason": DropReason.TITLE_DETECTION_FAILED}
return result return result
elif error_info == titleLevelException_msg: elif error_info == titleLevelException_msg:
logger.warning(f"Drop this pdf: {pdf_bytes_md5}, reason: {DropReason.TITLE_LEVEL_FAILED}") logger.warning(f"Drop this pdf: {pdf_bytes_md5}, reason: {DropReason.TITLE_LEVEL_FAILED}")
result = {"need_drop": True, "drop_reason": DropReason.TITLE_LEVEL_FAILED} result = {"_need_drop": True, "_drop_reason": DropReason.TITLE_LEVEL_FAILED}
return result return result
elif error_info == paraSplitException_msg: elif error_info == paraSplitException_msg:
logger.warning(f"Drop this pdf: {pdf_bytes_md5}, reason: {DropReason.PARA_SPLIT_FAILED}") logger.warning(f"Drop this pdf: {pdf_bytes_md5}, reason: {DropReason.PARA_SPLIT_FAILED}")
result = {"need_drop": True, "drop_reason": DropReason.PARA_SPLIT_FAILED} result = {"_need_drop": True, "_drop_reason": DropReason.PARA_SPLIT_FAILED}
return result return result
elif error_info == paraMergeException_msg: elif error_info == paraMergeException_msg:
logger.warning(f"Drop this pdf: {pdf_bytes_md5}, reason: {DropReason.PARA_MERGE_FAILED}") logger.warning(f"Drop this pdf: {pdf_bytes_md5}, reason: {DropReason.PARA_MERGE_FAILED}")
result = {"need_drop": True, "drop_reason": DropReason.PARA_MERGE_FAILED} result = {"_need_drop": True, "_drop_reason": DropReason.PARA_MERGE_FAILED}
return result return result
pdf_info_dict, error_info = para_process_pipeline.para_process_pipeline(pdf_info_dict) pdf_info_dict, error_info = para_process_pipeline.para_process_pipeline(pdf_info_dict)
......
...@@ -183,8 +183,8 @@ def parse_pdf_for_train( ...@@ -183,8 +183,8 @@ def parse_pdf_for_train(
f"page_id: {page_id}, img_counts: {img_counts}, drop this pdf: {book_name}, drop_reason: {DropReason.HIGH_COMPUTATIONAL_lOAD_BY_IMGS}" f"page_id: {page_id}, img_counts: {img_counts}, drop this pdf: {book_name}, drop_reason: {DropReason.HIGH_COMPUTATIONAL_lOAD_BY_IMGS}"
) )
result = { result = {
"need_drop": True, "_need_drop": True,
"drop_reason": DropReason.HIGH_COMPUTATIONAL_lOAD_BY_IMGS, "_drop_reason": DropReason.HIGH_COMPUTATIONAL_lOAD_BY_IMGS,
} }
if not debug_mode: if not debug_mode:
return result return result
...@@ -396,8 +396,8 @@ def parse_pdf_for_train( ...@@ -396,8 +396,8 @@ def parse_pdf_for_train(
f"page_id: {page_id}, drop this pdf: {book_name}, reason: {DropReason.TEXT_BLCOK_HOR_OVERLAP}" f"page_id: {page_id}, drop this pdf: {book_name}, reason: {DropReason.TEXT_BLCOK_HOR_OVERLAP}"
) )
result = { result = {
"need_drop": True, "_need_drop": True,
"drop_reason": DropReason.TEXT_BLCOK_HOR_OVERLAP, "_drop_reason": DropReason.TEXT_BLCOK_HOR_OVERLAP,
} }
if not debug_mode: if not debug_mode:
return result return result
...@@ -443,8 +443,8 @@ def parse_pdf_for_train( ...@@ -443,8 +443,8 @@ def parse_pdf_for_train(
f"page_id: {page_id}, drop this pdf: {book_name}, reason: {DropReason.CAN_NOT_DETECT_PAGE_LAYOUT}" f"page_id: {page_id}, drop this pdf: {book_name}, reason: {DropReason.CAN_NOT_DETECT_PAGE_LAYOUT}"
) )
result = { result = {
"need_drop": True, "_need_drop": True,
"drop_reason": DropReason.CAN_NOT_DETECT_PAGE_LAYOUT, "_drop_reason": DropReason.CAN_NOT_DETECT_PAGE_LAYOUT,
} }
if not debug_mode: if not debug_mode:
return result return result
...@@ -456,7 +456,7 @@ def parse_pdf_for_train( ...@@ -456,7 +456,7 @@ def parse_pdf_for_train(
logger.warning( logger.warning(
f"page_id: {page_id}, drop this pdf: {book_name}, reason: {DropReason.COMPLICATED_LAYOUT}" f"page_id: {page_id}, drop this pdf: {book_name}, reason: {DropReason.COMPLICATED_LAYOUT}"
) )
result = {"need_drop": True, "drop_reason": DropReason.COMPLICATED_LAYOUT} result = {"_need_drop": True, "_drop_reason": DropReason.COMPLICATED_LAYOUT}
if not debug_mode: if not debug_mode:
return result return result
...@@ -466,8 +466,8 @@ def parse_pdf_for_train( ...@@ -466,8 +466,8 @@ def parse_pdf_for_train(
f"page_id: {page_id}, drop this pdf: {book_name}, reason: {DropReason.TOO_MANY_LAYOUT_COLUMNS}" f"page_id: {page_id}, drop this pdf: {book_name}, reason: {DropReason.TOO_MANY_LAYOUT_COLUMNS}"
) )
result = { result = {
"need_drop": True, "_need_drop": True,
"drop_reason": DropReason.TOO_MANY_LAYOUT_COLUMNS, "_drop_reason": DropReason.TOO_MANY_LAYOUT_COLUMNS,
"extra_info": {"column_cnt": layout_column_width}, "extra_info": {"column_cnt": layout_column_width},
} }
if not debug_mode: if not debug_mode:
...@@ -616,8 +616,8 @@ def parse_pdf_for_train( ...@@ -616,8 +616,8 @@ def parse_pdf_for_train(
f"Drop this pdf: {book_name}, reason: {DropReason.DENSE_SINGLE_LINE_BLOCK}" f"Drop this pdf: {book_name}, reason: {DropReason.DENSE_SINGLE_LINE_BLOCK}"
) )
result = { result = {
"need_drop": True, "_need_drop": True,
"drop_reason": DropReason.DENSE_SINGLE_LINE_BLOCK, "_drop_reason": DropReason.DENSE_SINGLE_LINE_BLOCK,
} }
return result return result
if error_info == titleDetectionException_msg: if error_info == titleDetectionException_msg:
...@@ -625,27 +625,27 @@ def parse_pdf_for_train( ...@@ -625,27 +625,27 @@ def parse_pdf_for_train(
f"Drop this pdf: {book_name}, reason: {DropReason.TITLE_DETECTION_FAILED}" f"Drop this pdf: {book_name}, reason: {DropReason.TITLE_DETECTION_FAILED}"
) )
result = { result = {
"need_drop": True, "_need_drop": True,
"drop_reason": DropReason.TITLE_DETECTION_FAILED, "_drop_reason": DropReason.TITLE_DETECTION_FAILED,
} }
return result return result
elif error_info == titleLevelException_msg: elif error_info == titleLevelException_msg:
logger.warning( logger.warning(
f"Drop this pdf: {book_name}, reason: {DropReason.TITLE_LEVEL_FAILED}" f"Drop this pdf: {book_name}, reason: {DropReason.TITLE_LEVEL_FAILED}"
) )
result = {"need_drop": True, "drop_reason": DropReason.TITLE_LEVEL_FAILED} result = {"_need_drop": True, "_drop_reason": DropReason.TITLE_LEVEL_FAILED}
return result return result
elif error_info == paraSplitException_msg: elif error_info == paraSplitException_msg:
logger.warning( logger.warning(
f"Drop this pdf: {book_name}, reason: {DropReason.PARA_SPLIT_FAILED}" f"Drop this pdf: {book_name}, reason: {DropReason.PARA_SPLIT_FAILED}"
) )
result = {"need_drop": True, "drop_reason": DropReason.PARA_SPLIT_FAILED} result = {"_need_drop": True, "_drop_reason": DropReason.PARA_SPLIT_FAILED}
return result return result
elif error_info == paraMergeException_msg: elif error_info == paraMergeException_msg:
logger.warning( logger.warning(
f"Drop this pdf: {book_name}, reason: {DropReason.PARA_MERGE_FAILED}" f"Drop this pdf: {book_name}, reason: {DropReason.PARA_MERGE_FAILED}"
) )
result = {"need_drop": True, "drop_reason": DropReason.PARA_MERGE_FAILED} result = {"_need_drop": True, "_drop_reason": DropReason.PARA_MERGE_FAILED}
return result return result
if debug_mode: if debug_mode:
......
...@@ -32,8 +32,8 @@ def meta_scan(jso: dict, doc_layout_check=True) -> dict: ...@@ -32,8 +32,8 @@ def meta_scan(jso: dict, doc_layout_check=True) -> dict:
if ( if (
"doc_layout_result" not in jso "doc_layout_result" not in jso
): # 检测json中是存在模型数据,如果没有则需要跳过该pdf ): # 检测json中是存在模型数据,如果没有则需要跳过该pdf
jso["need_drop"] = True jso["_need_drop"] = True
jso["drop_reason"] = DropReason.MISS_DOC_LAYOUT_RESULT jso["_drop_reason"] = DropReason.MISS_DOC_LAYOUT_RESULT
return jso return jso
try: try:
data_source = get_data_source(jso) data_source = get_data_source(jso)
...@@ -58,10 +58,10 @@ def meta_scan(jso: dict, doc_layout_check=True) -> dict: ...@@ -58,10 +58,10 @@ def meta_scan(jso: dict, doc_layout_check=True) -> dict:
start_time = time.time() # 记录开始时间 start_time = time.time() # 记录开始时间
res = pdf_meta_scan(s3_pdf_path, file_content) res = pdf_meta_scan(s3_pdf_path, file_content)
if res.get( if res.get(
"need_drop", False "_need_drop", False
): # 如果返回的字典里有need_drop,则提取drop_reason并跳过本次解析 ): # 如果返回的字典里有need_drop,则提取drop_reason并跳过本次解析
jso["need_drop"] = True jso["_need_drop"] = True
jso["drop_reason"] = res["drop_reason"] jso["_drop_reason"] = res["_drop_reason"]
else: # 正常返回 else: # 正常返回
jso["pdf_meta"] = res jso["pdf_meta"] = res
jso["content"] = "" jso["content"] = ""
...@@ -85,7 +85,7 @@ def classify_by_type(jso: dict, debug_mode=False) -> dict: ...@@ -85,7 +85,7 @@ def classify_by_type(jso: dict, debug_mode=False) -> dict:
if debug_mode: if debug_mode:
pass pass
else: # 如果debug没开,则检测是否有needdrop字段 else: # 如果debug没开,则检测是否有needdrop字段
if jso.get("need_drop", False): if jso.get("_need_drop", False):
return jso return jso
# 开始正式逻辑 # 开始正式逻辑
try: try:
...@@ -113,8 +113,8 @@ def classify_by_type(jso: dict, debug_mode=False) -> dict: ...@@ -113,8 +113,8 @@ def classify_by_type(jso: dict, debug_mode=False) -> dict:
if ( if (
is_encrypted or is_needs_password is_encrypted or is_needs_password
): # 加密的,需要密码的,没有页面的,都不处理 ): # 加密的,需要密码的,没有页面的,都不处理
jso["need_drop"] = True jso["_need_drop"] = True
jso["drop_reason"] = DropReason.ENCRYPTED jso["_drop_reason"] = DropReason.ENCRYPTED
else: else:
start_time = time.time() # 记录开始时间 start_time = time.time() # 记录开始时间
is_text_pdf, results = classify( is_text_pdf, results = classify(
...@@ -139,8 +139,8 @@ def classify_by_type(jso: dict, debug_mode=False) -> dict: ...@@ -139,8 +139,8 @@ def classify_by_type(jso: dict, debug_mode=False) -> dict:
if ( if (
text_language not in allow_language text_language not in allow_language
): # 如果语言不在允许的语言中,则drop ): # 如果语言不在允许的语言中,则drop
jso["need_drop"] = True jso["_need_drop"] = True
jso["drop_reason"] = DropReason.NOT_ALLOW_LANGUAGE jso["_drop_reason"] = DropReason.NOT_ALLOW_LANGUAGE
return jso return jso
else: else:
# 先不drop # 先不drop
...@@ -148,8 +148,8 @@ def classify_by_type(jso: dict, debug_mode=False) -> dict: ...@@ -148,8 +148,8 @@ def classify_by_type(jso: dict, debug_mode=False) -> dict:
jso["_pdf_type"] = "OCR" jso["_pdf_type"] = "OCR"
jso["pdf_meta"] = pdf_meta jso["pdf_meta"] = pdf_meta
jso["classify_time"] = classify_time jso["classify_time"] = classify_time
# jso["need_drop"] = True # jso["_need_drop"] = True
# jso["drop_reason"] = DropReason.NOT_IS_TEXT_PDF # jso["_drop_reason"] = DropReason.NOT_IS_TEXT_PDF
extra_info = {"classify_rules": []} extra_info = {"classify_rules": []}
for condition, result in results.items(): for condition, result in results.items():
if not result: if not result:
...@@ -162,7 +162,7 @@ def classify_by_type(jso: dict, debug_mode=False) -> dict: ...@@ -162,7 +162,7 @@ def classify_by_type(jso: dict, debug_mode=False) -> dict:
def drop_needdrop_pdf(jso: dict) -> dict: def drop_needdrop_pdf(jso: dict) -> dict:
if jso.get("need_drop", False): if jso.get("_need_drop", False):
logger.info( logger.info(
f"book_name is:{get_data_source(jso)}/{jso['file_id']} need drop", f"book_name is:{get_data_source(jso)}/{jso['file_id']} need drop",
file=sys.stderr, file=sys.stderr,
...@@ -176,7 +176,7 @@ def pdf_intermediate_dict_to_markdown(jso: dict, debug_mode=False) -> dict: ...@@ -176,7 +176,7 @@ def pdf_intermediate_dict_to_markdown(jso: dict, debug_mode=False) -> dict:
if debug_mode: if debug_mode:
pass pass
else: # 如果debug没开,则检测是否有needdrop字段 else: # 如果debug没开,则检测是否有needdrop字段
if jso.get("need_drop", False): if jso.get("_need_drop", False):
book_name = join_path(get_data_source(jso), jso["file_id"]) book_name = join_path(get_data_source(jso), jso["file_id"])
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr) logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
jso["dropped"] = True jso["dropped"] = True
...@@ -203,7 +203,7 @@ def parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict: ...@@ -203,7 +203,7 @@ def parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
if debug_mode: if debug_mode:
pass pass
else: # 如果debug没开,则检测是否有needdrop字段 else: # 如果debug没开,则检测是否有needdrop字段
if jso.get("need_drop", False): if jso.get("_need_drop", False):
return jso return jso
# 开始正式逻辑 # 开始正式逻辑
s3_pdf_path = jso.get("file_location") s3_pdf_path = jso.get("file_location")
...@@ -220,8 +220,8 @@ def parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict: ...@@ -220,8 +220,8 @@ def parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
svgs_per_page_list = jso["pdf_meta"]["svgs_per_page"] svgs_per_page_list = jso["pdf_meta"]["svgs_per_page"]
max_svgs = max(svgs_per_page_list) max_svgs = max(svgs_per_page_list)
if max_svgs > 3000: if max_svgs > 3000:
jso["need_drop"] = True jso["_need_drop"] = True
jso["drop_reason"] = DropReason.HIGH_COMPUTATIONAL_lOAD_BY_SVGS jso["_drop_reason"] = DropReason.HIGH_COMPUTATIONAL_lOAD_BY_SVGS
else: else:
try: try:
save_path = s3_image_save_path save_path = s3_image_save_path
...@@ -244,10 +244,10 @@ def parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict: ...@@ -244,10 +244,10 @@ def parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
debug_mode=debug_mode, debug_mode=debug_mode,
) )
if pdf_info_dict.get( if pdf_info_dict.get(
"need_drop", False "_need_drop", False
): # 如果返回的字典里有need_drop,则提取drop_reason并跳过本次解析 ): # 如果返回的字典里有need_drop,则提取drop_reason并跳过本次解析
jso["need_drop"] = True jso["_need_drop"] = True
jso["drop_reason"] = pdf_info_dict["drop_reason"] jso["_drop_reason"] = pdf_info_dict["_drop_reason"]
else: # 正常返回,将 pdf_info_dict 压缩并存储 else: # 正常返回,将 pdf_info_dict 压缩并存储
pdf_info_dict = JsonCompressor.compress_json(pdf_info_dict) pdf_info_dict = JsonCompressor.compress_json(pdf_info_dict)
jso["pdf_intermediate_dict"] = pdf_info_dict jso["pdf_intermediate_dict"] = pdf_info_dict
...@@ -269,7 +269,7 @@ def parse_pdf_for_model_train(jso: dict, start_page_id=0, debug_mode=False) -> d ...@@ -269,7 +269,7 @@ def parse_pdf_for_model_train(jso: dict, start_page_id=0, debug_mode=False) -> d
if debug_mode: if debug_mode:
pass pass
else: # 如果debug没开,则检测是否有needdrop字段 else: # 如果debug没开,则检测是否有needdrop字段
if jso.get("need_drop", False): if jso.get("_need_drop", False):
return jso return jso
# 开始正式逻辑 # 开始正式逻辑
s3_pdf_path = jso.get("file_location") s3_pdf_path = jso.get("file_location")
...@@ -295,8 +295,8 @@ def parse_pdf_for_model_train(jso: dict, start_page_id=0, debug_mode=False) -> d ...@@ -295,8 +295,8 @@ def parse_pdf_for_model_train(jso: dict, start_page_id=0, debug_mode=False) -> d
svgs_per_page_list = jso["pdf_meta"]["svgs_per_page"] svgs_per_page_list = jso["pdf_meta"]["svgs_per_page"]
max_svgs = max(svgs_per_page_list) max_svgs = max(svgs_per_page_list)
if max_svgs > 3000: if max_svgs > 3000:
jso["need_drop"] = True jso["_need_drop"] = True
jso["drop_reason"] = DropReason.HIGH_COMPUTATIONAL_lOAD_BY_SVGS jso["_drop_reason"] = DropReason.HIGH_COMPUTATIONAL_lOAD_BY_SVGS
# elif total_page > 1000: # elif total_page > 1000:
# jso['need_drop'] = True # jso['need_drop'] = True
# jso['drop_reason'] = DropReason.HIGH_COMPUTATIONAL_lOAD_BY_TOTAL_PAGES # jso['drop_reason'] = DropReason.HIGH_COMPUTATIONAL_lOAD_BY_TOTAL_PAGES
...@@ -323,10 +323,10 @@ def parse_pdf_for_model_train(jso: dict, start_page_id=0, debug_mode=False) -> d ...@@ -323,10 +323,10 @@ def parse_pdf_for_model_train(jso: dict, start_page_id=0, debug_mode=False) -> d
debug_mode=debug_mode, debug_mode=debug_mode,
) )
if pdf_info_dict.get( if pdf_info_dict.get(
"need_drop", False "_need_drop", False
): # 如果返回的字典里有need_drop,则提取drop_reason并跳过本次解析 ): # 如果返回的字典里有need_drop,则提取drop_reason并跳过本次解析
jso["need_drop"] = True jso["_need_drop"] = True
jso["drop_reason"] = pdf_info_dict["drop_reason"] jso["_drop_reason"] = pdf_info_dict["_drop_reason"]
else: # 正常返回,将 pdf_info_dict 压缩并存储 else: # 正常返回,将 pdf_info_dict 压缩并存储
jso["parsed_results"] = convert_to_train_format(pdf_info_dict) jso["parsed_results"] = convert_to_train_format(pdf_info_dict)
pdf_info_dict = JsonCompressor.compress_json(pdf_info_dict) pdf_info_dict = JsonCompressor.compress_json(pdf_info_dict)
......
...@@ -17,7 +17,7 @@ def ocr_pdf_intermediate_dict_to_markdown(jso: dict, debug_mode=False) -> dict: ...@@ -17,7 +17,7 @@ def ocr_pdf_intermediate_dict_to_markdown(jso: dict, debug_mode=False) -> dict:
if debug_mode: if debug_mode:
pass pass
else: # 如果debug没开,则检测是否有needdrop字段 else: # 如果debug没开,则检测是否有needdrop字段
if jso.get("need_drop", False): if jso.get("_need_drop", False):
book_name = join_path(get_data_source(jso), jso["file_id"]) book_name = join_path(get_data_source(jso), jso["file_id"])
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr) logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
jso["dropped"] = True jso["dropped"] = True
...@@ -45,7 +45,7 @@ def ocr_pdf_intermediate_dict_to_markdown_with_para(jso: dict, mode, debug_mode= ...@@ -45,7 +45,7 @@ def ocr_pdf_intermediate_dict_to_markdown_with_para(jso: dict, mode, debug_mode=
if debug_mode: if debug_mode:
pass pass
else: # 如果debug没开,则检测是否有needdrop字段 else: # 如果debug没开,则检测是否有needdrop字段
if jso.get("need_drop", False): if jso.get("_need_drop", False):
book_name = join_path(get_data_source(jso), jso["file_id"]) book_name = join_path(get_data_source(jso), jso["file_id"])
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr) logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
jso["dropped"] = True jso["dropped"] = True
...@@ -78,7 +78,7 @@ def ocr_pdf_intermediate_dict_to_markdown_with_para_and_pagination(jso: dict, de ...@@ -78,7 +78,7 @@ def ocr_pdf_intermediate_dict_to_markdown_with_para_and_pagination(jso: dict, de
if debug_mode: if debug_mode:
pass pass
else: # 如果debug没开,则检测是否有needdrop字段 else: # 如果debug没开,则检测是否有needdrop字段
if jso.get("need_drop", False): if jso.get("_need_drop", False):
book_name = join_path(get_data_source(jso), jso["file_id"]) book_name = join_path(get_data_source(jso), jso["file_id"])
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr) logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
jso["dropped"] = True jso["dropped"] = True
...@@ -108,7 +108,7 @@ def ocr_pdf_intermediate_dict_to_markdown_with_para_for_qa( ...@@ -108,7 +108,7 @@ def ocr_pdf_intermediate_dict_to_markdown_with_para_for_qa(
if debug_mode: if debug_mode:
pass pass
else: # 如果debug没开,则检测是否有needdrop字段 else: # 如果debug没开,则检测是否有needdrop字段
if jso.get("need_drop", False): if jso.get("_need_drop", False):
book_name = join_path(get_data_source(jso), jso["file_id"]) book_name = join_path(get_data_source(jso), jso["file_id"])
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr) logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
jso["dropped"] = True jso["dropped"] = True
...@@ -137,7 +137,7 @@ def ocr_pdf_intermediate_dict_to_standard_format(jso: dict, debug_mode=False) -> ...@@ -137,7 +137,7 @@ def ocr_pdf_intermediate_dict_to_standard_format(jso: dict, debug_mode=False) ->
if debug_mode: if debug_mode:
pass pass
else: # 如果debug没开,则检测是否有needdrop字段 else: # 如果debug没开,则检测是否有needdrop字段
if jso.get("need_drop", False): if jso.get("_need_drop", False):
book_name = join_path(get_data_source(jso), jso["file_id"]) book_name = join_path(get_data_source(jso), jso["file_id"])
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr) logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
jso["dropped"] = True jso["dropped"] = True
...@@ -165,7 +165,7 @@ def ocr_pdf_intermediate_dict_to_standard_format_with_para(jso: dict, debug_mode ...@@ -165,7 +165,7 @@ def ocr_pdf_intermediate_dict_to_standard_format_with_para(jso: dict, debug_mode
if debug_mode: if debug_mode:
pass pass
else: # 如果debug没开,则检测是否有needdrop字段 else: # 如果debug没开,则检测是否有needdrop字段
if jso.get("need_drop", False): if jso.get("_need_drop", False):
book_name = join_path(get_data_source(jso), jso["file_id"]) book_name = join_path(get_data_source(jso), jso["file_id"])
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr) logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
jso["dropped"] = True jso["dropped"] = True
...@@ -221,7 +221,7 @@ def ocr_parse_pdf_core(pdf_bytes, model_output_json_list, book_name, start_page_ ...@@ -221,7 +221,7 @@ def ocr_parse_pdf_core(pdf_bytes, model_output_json_list, book_name, start_page_
# 专门用来跑被drop的pdf,跑完之后需要把need_drop字段置为false # 专门用来跑被drop的pdf,跑完之后需要把need_drop字段置为false
def ocr_dropped_parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict: def ocr_dropped_parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
if not jso.get("need_drop", False): if not jso.get("_need_drop", False):
return jso return jso
else: else:
try: try:
...@@ -233,7 +233,7 @@ def ocr_dropped_parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict: ...@@ -233,7 +233,7 @@ def ocr_dropped_parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
) )
jso["pdf_intermediate_dict"] = JsonCompressor.compress_json(pdf_info_dict) jso["pdf_intermediate_dict"] = JsonCompressor.compress_json(pdf_info_dict)
jso["parse_time"] = parse_time jso["parse_time"] = parse_time
jso["need_drop"] = False jso["_need_drop"] = False
except Exception as e: except Exception as e:
jso = exception_handler(jso, e) jso = exception_handler(jso, e)
return jso return jso
...@@ -244,7 +244,7 @@ def ocr_parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict: ...@@ -244,7 +244,7 @@ def ocr_parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
if debug_mode: if debug_mode:
pass pass
else: # 如果debug没开,则检测是否有needdrop字段 else: # 如果debug没开,则检测是否有needdrop字段
if jso.get("need_drop", False): if jso.get("_need_drop", False):
return jso return jso
try: try:
pdf_bytes = get_pdf_bytes(jso) pdf_bytes = get_pdf_bytes(jso)
......
...@@ -18,7 +18,7 @@ def txt_pdf_to_standard_format(jso: dict, debug_mode=False) -> dict: ...@@ -18,7 +18,7 @@ def txt_pdf_to_standard_format(jso: dict, debug_mode=False) -> dict:
if debug_mode: if debug_mode:
pass pass
else: # 如果debug没开,则检测是否有needdrop字段 else: # 如果debug没开,则检测是否有needdrop字段
if jso.get("need_drop", False): if jso.get("_need_drop", False):
book_name = join_path(get_data_source(jso), jso["file_id"]) book_name = join_path(get_data_source(jso), jso["file_id"])
logger.info(f"book_name is:{book_name} need drop") logger.info(f"book_name is:{book_name} need drop")
jso["dropped"] = True jso["dropped"] = True
...@@ -46,7 +46,7 @@ def txt_pdf_to_mm_markdown_format(jso: dict, debug_mode=False) -> dict: ...@@ -46,7 +46,7 @@ def txt_pdf_to_mm_markdown_format(jso: dict, debug_mode=False) -> dict:
if debug_mode: if debug_mode:
pass pass
else: # 如果debug没开,则检测是否有needdrop字段 else: # 如果debug没开,则检测是否有needdrop字段
if jso.get("need_drop", False): if jso.get("_need_drop", False):
book_name = join_path(get_data_source(jso), jso["file_id"]) book_name = join_path(get_data_source(jso), jso["file_id"])
logger.info(f"book_name is:{book_name} need drop") logger.info(f"book_name is:{book_name} need drop")
jso["dropped"] = True jso["dropped"] = True
......
...@@ -62,6 +62,6 @@ def pdf_post_filter(page_info) -> tuple: ...@@ -62,6 +62,6 @@ def pdf_post_filter(page_info) -> tuple:
""" """
bool_is_pseudo_single_column, extra_info = __is_pseudo_single_column(page_info) bool_is_pseudo_single_column, extra_info = __is_pseudo_single_column(page_info)
if bool_is_pseudo_single_column: if bool_is_pseudo_single_column:
return False, {"need_drop": True, "drop_reason": DropReason.PSEUDO_SINGLE_COLUMN, "extra_info": extra_info} return False, {"_need_drop": True, "_drop_reason": DropReason.PSEUDO_SINGLE_COLUMN, "extra_info": extra_info}
return True, None return True, None
\ No newline at end of file
...@@ -68,7 +68,7 @@ def pdf_filter(page:fitz.Page, text_blocks, table_bboxes, image_bboxes) -> tuple ...@@ -68,7 +68,7 @@ def pdf_filter(page:fitz.Page, text_blocks, table_bboxes, image_bboxes) -> tuple
""" """
if __is_contain_color_background_rect(page, text_blocks, image_bboxes): if __is_contain_color_background_rect(page, text_blocks, image_bboxes):
return False, {"need_drop": True, "drop_reason": DropReason.COLOR_BACKGROUND_TEXT_BOX} return False, {"_need_drop": True, "_drop_reason": DropReason.COLOR_BACKGROUND_TEXT_BOX}
return True, None return True, None
\ No newline at end of file
from loguru import logger
from magic_pdf.dict2md.mkcontent import mk_universal_format
from magic_pdf.dict2md.ocr_mkcontent import make_standard_format_with_para
from magic_pdf.filter.pdf_classify_by_type import classify
from magic_pdf.filter.pdf_meta_scan import pdf_meta_scan
from magic_pdf.libs.drop_reason import DropReason
from magic_pdf.libs.json_compressor import JsonCompressor
from magic_pdf.spark.spark_api import parse_union_pdf, parse_ocr_pdf
class UNIPipe:
def __init__(self):
pass
def classify(self, pdf_bytes: bytes) -> str:
"""
根据pdf的元数据,判断是否是文本pdf,还是ocr pdf
"""
pdf_meta = pdf_meta_scan(pdf_bytes)
if pdf_meta.get("_need_drop", False): # 如果返回了需要丢弃的标志,则抛出异常
raise Exception(f"pdf meta_scan need_drop,reason is {pdf_meta['_drop_reason']}")
else:
is_encrypted = pdf_meta["is_encrypted"]
is_needs_password = pdf_meta["is_needs_password"]
if is_encrypted or is_needs_password: # 加密的,需要密码的,没有页面的,都不处理
raise Exception(f"pdf meta_scan need_drop,reason is {DropReason.ENCRYPTED}")
else:
is_text_pdf, results = classify(
pdf_meta["total_page"],
pdf_meta["page_width_pts"],
pdf_meta["page_height_pts"],
pdf_meta["image_info_per_page"],
pdf_meta["text_len_per_page"],
pdf_meta["imgs_per_page"],
pdf_meta["text_layout_per_page"],
)
if is_text_pdf:
allow_language = ["zh", "en"] # 允许的语言,目前只允许简中和英文的
text_language = pdf_meta["text_language"]
logger.info(f"pdf meta_scan text_language is {text_language}")
if text_language not in allow_language: # 如果语言不在允许的语言中,则drop
if text_language == "un": # unknow的话可能是中文乱码,可以尝试用ocr识别
return "ocr"
else:
raise Exception(f"pdf meta_scan need_drop,reason is {DropReason.NOT_ALLOW_LANGUAGE}")
else:
return "txt"
else:
return "ocr"
def parse(self, pdf_bytes: bytes, image_writer, jso_useful_key) -> dict:
"""
根据pdf类型,解析pdf
"""
if jso_useful_key['_pdf_type'] == "txt":
pdf_mid_data = parse_union_pdf(pdf_bytes, jso_useful_key['model_list'], image_writer)
elif jso_useful_key['_pdf_type'] == "ocr":
pdf_mid_data = parse_ocr_pdf(pdf_bytes, jso_useful_key['model_list'], image_writer)
else:
raise Exception(f"pdf type is not txt or ocr")
return JsonCompressor.compress(pdf_mid_data)
def mk_uni_format(self, pdf_mid_data: str, img_buket_path: str) -> list:
"""
根据pdf类型,生成统一格式content_list
"""
pdf_mid_data = JsonCompressor.decompress_json(pdf_mid_data)
parse_type = pdf_mid_data["_parse_type"]
if parse_type == "txt":
content_list = mk_universal_format(pdf_mid_data, img_buket_path)
elif parse_type == "ocr":
content_list = make_standard_format_with_para(pdf_mid_data, img_buket_path)
return content_list
if __name__ == '__main__':
# 测试
pipe = UNIPipe()
pdf_bytes = open(r"D:\project\20231108code-clean\magic_pdf\tmp\unittest\download-pdfs\数学新星网\edu_00001544.pdf",
"rb").read()
pdf_type = pipe.classify(pdf_bytes)
logger.info(f"pdf_type is {pdf_type}")
...@@ -26,9 +26,9 @@ def get_bookid(jso: dict): ...@@ -26,9 +26,9 @@ def get_bookid(jso: dict):
def exception_handler(jso: dict, e): def exception_handler(jso: dict, e):
logger.exception(e) logger.exception(e)
jso["need_drop"] = True jso["_need_drop"] = True
jso["drop_reason"] = DropReason.Exception jso["_drop_reason"] = DropReason.Exception
jso["exception"] = f"ERROR: {e}" jso["_exception"] = f"ERROR: {e}"
return jso return jso
......
...@@ -36,7 +36,7 @@ def parse_txt_pdf(pdf_bytes:bytes, pdf_models:list, imageWriter: AbsReaderWriter ...@@ -36,7 +36,7 @@ def parse_txt_pdf(pdf_bytes:bytes, pdf_models:list, imageWriter: AbsReaderWriter
return pdf_info_dict return pdf_info_dict
def parse_ocr_pdf(pdf_bytes:bytes, pdf_models:list, imageWriter: AbsReaderWriter, is_debug=False, start_page=0, *args, **kwargs): def parse_ocr_pdf(pdf_bytes:bytes, pdf_models:list, imageWriter: AbsReaderWriter, is_debug=False, start_page=0, *args, **kwargs):
""" """
解析ocr类pdf 解析ocr类pdf
""" """
...@@ -48,12 +48,12 @@ def parse_ocr_pdf(pdf_bytes:bytes, pdf_models:list, imageWriter: AbsReaderWrite ...@@ -48,12 +48,12 @@ def parse_ocr_pdf(pdf_bytes:bytes, pdf_models:list, imageWriter: AbsReaderWrite
debug_mode=is_debug, debug_mode=is_debug,
) )
pdf_info_dict["parse_type"] = "ocr" pdf_info_dict["_parse_type"] = "ocr"
return pdf_info_dict return pdf_info_dict
def parse_union_pdf(pdf_bytes:bytes, pdf_models:list, imageWriter: AbsReaderWriter, is_debug=False, start_page=0, *args, **kwargs): def parse_union_pdf(pdf_bytes:bytes, pdf_models:list, imageWriter: AbsReaderWriter, is_debug=False, start_page=0, *args, **kwargs):
""" """
ocr和文本混合的pdf,全部解析出来 ocr和文本混合的pdf,全部解析出来
""" """
...@@ -72,18 +72,26 @@ def parse_union_pdf(pdf_bytes:bytes, pdf_models:list, imageWriter: AbsReaderWri ...@@ -72,18 +72,26 @@ def parse_union_pdf(pdf_bytes:bytes, pdf_models:list, imageWriter: AbsReaderWri
pdf_info_dict = parse_pdf(parse_pdf_by_txt) pdf_info_dict = parse_pdf(parse_pdf_by_txt)
if pdf_info_dict is None or pdf_info_dict.get("need_drop", False): if pdf_info_dict is None or pdf_info_dict.get("_need_drop", False):
logger.warning(f"parse_pdf_by_txt drop or error, switch to parse_pdf_by_ocr") logger.warning(f"parse_pdf_by_txt drop or error, switch to parse_pdf_by_ocr")
pdf_info_dict = parse_pdf(parse_pdf_by_ocr) pdf_info_dict = parse_pdf(parse_pdf_by_ocr)
if pdf_info_dict is None: if pdf_info_dict is None:
raise Exception("Both parse_pdf_by_txt and parse_pdf_by_ocr failed.") raise Exception("Both parse_pdf_by_txt and parse_pdf_by_ocr failed.")
else: else:
pdf_info_dict["parse_type"] = "ocr" pdf_info_dict["_parse_type"] = "ocr"
else: else:
pdf_info_dict["parse_type"] = "txt" pdf_info_dict["_parse_type"] = "txt"
return pdf_info_dict return pdf_info_dict
def spark_json_extractor(jso:dict): def spark_json_extractor(jso: dict) -> dict:
pass
"""
从json中提取数据,返回一个dict
"""
return {
"_pdf_type": jso["_pdf_type"],
"model_list": jso["doc_layout_result"],
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment