Commit c9af3457 authored by 赵小蒙's avatar 赵小蒙

delete useless files

parent 89518eff
import json
from magic_pdf.libs.config_reader import get_s3_config_dict
from magic_pdf.libs.commons import join_path, read_file, json_dump_path
local_json_path = "Z:/format.json"
local_jsonl_path = "Z:/format.jsonl"
def get_json_from_local_or_s3(book_name=None):
if book_name is None:
with open(local_json_path, "r", encoding="utf-8") as json_file:
json_line = json_file.read()
json_object = json.loads(json_line)
else:
# error_log_path & json_dump_path
# 可配置从上述两个地址获取源json
json_path = join_path(json_dump_path, book_name + ".json")
s3_config = get_s3_config_dict(json_path)
file_content = read_file(json_path, s3_config)
json_str = file_content.decode("utf-8")
# logger.info(json_str)
json_object = json.loads(json_str)
return json_object
def write_json_to_local(jso, book_name=None):
if book_name is None:
with open(local_json_path, "w", encoding="utf-8") as file:
file.write(json.dumps(jso, ensure_ascii=False))
else:
pass
\ No newline at end of file
import json
import os
from tqdm import tqdm
from magic_pdf.libs.commons import join_path
with open('/mnt/petrelfs/share_data/ouyanglinke/OCR/OCR_validation_dataset.json', 'r') as f:
samples = json.load(f)
pdf_model_dir = 's3://llm-pdf-text/eval_1k/layout_res/'
labels = []
det_res = []
edit_distance_list = []
for sample in tqdm(samples):
pdf_name = sample['pdf_name']
page_num = sample['page']
pdf_model_path = join_path(pdf_model_dir, pdf_name)
model_output_json = join_path(pdf_model_path, f"page_{page_num}.json") # 模型输出的页面编号从1开始的
save_root_path = '/mnt/petrelfs/share_data/ouyanglinke/OCR/OCR_val_docxchain/'
save_path = join_path(save_root_path, pdf_name)
os.makedirs(save_path, exist_ok=True)
# print("s3c cp {} {}".format(model_output_json, save_path))
os.system("aws --profile langchao --endpoint-url=http://10.140.85.161:80 s3 cp {} {}".format(model_output_json, save_path))
import json
import os
import sys
import time
from loguru import logger
from pathlib import Path
from magic_pdf.libs.config_reader import get_s3_config_dict
from magic_pdf.pdf_parse_by_ocr import parse_pdf_by_ocr
from demo.demo_commons import get_json_from_local_or_s3
from magic_pdf.dict2md.ocr_mkcontent import (
ocr_mk_mm_markdown_with_para,
make_standard_format_with_para
)
from magic_pdf.libs.commons import join_path, read_file, formatted_time
def save_markdown(markdown_text, input_filepath):
# 获取输入文件的目录
directory = os.path.dirname(input_filepath)
# 获取输入文件的文件名(不带扩展名)
base_name = os.path.basename(input_filepath)
file_name_without_ext = os.path.splitext(base_name)[0]
# 定义输出文件的路径
output_filepath = os.path.join(directory, f"{file_name_without_ext}.md")
# 将Markdown文本写入.md文件
with open(output_filepath, 'w', encoding='utf-8') as file:
file.write(markdown_text)
def read_json_file(file_path):
with open(file_path, 'r') as f:
data = json.load(f)
return data
def ocr_local_parse(ocr_pdf_path, ocr_json_file_path):
try:
ocr_pdf_model_info = read_json_file(ocr_json_file_path)
pth = Path(ocr_json_file_path)
book_name = pth.name
pdf_bytes = read_file(ocr_pdf_path, None)
ocr_parse_core(book_name, pdf_bytes, ocr_pdf_model_info)
except Exception as e:
logger.exception(e)
def ocr_online_parse(book_name, start_page_id=0, debug_mode=True):
try:
json_object = get_json_from_local_or_s3(book_name)
# logger.info(json_object)
s3_pdf_path = json_object["file_location"]
s3_config = get_s3_config_dict(s3_pdf_path)
pdf_bytes = read_file(s3_pdf_path, s3_config)
ocr_pdf_model_info = json_object.get("doc_layout_result")
ocr_parse_core(book_name, pdf_bytes, ocr_pdf_model_info)
except Exception as e:
logger.exception(e)
def ocr_parse_core(book_name, pdf_bytes, ocr_pdf_model_info, start_page_id=0):
save_tmp_path = os.path.join(os.path.dirname(__file__), "../..", "tmp", "unittest")
save_path = join_path(save_tmp_path, "md")
save_path_with_bookname = os.path.join(save_path, book_name)
text_content_save_path = f"{save_path_with_bookname}/book.md"
pdf_info_dict, parse_time = ocr_parse_pdf_core(pdf_bytes, ocr_pdf_model_info, book_name, start_page_id=start_page_id, debug_mode=True)
parent_dir = os.path.dirname(text_content_save_path)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
# markdown_content = mk_nlp_markdown(pdf_info_dict)
markdown_content = ocr_mk_mm_markdown_with_para(pdf_info_dict)
# markdown_pagination = ocr_mk_mm_markdown_with_para_and_pagination(pdf_info_dict)
with open(text_content_save_path, "w", encoding="utf-8") as f:
f.write(markdown_content)
standard_format = make_standard_format_with_para(pdf_info_dict)
standard_format_save_path = f"{save_path_with_bookname}/standard_format.txt"
with open(standard_format_save_path, "w", encoding="utf-8") as f:
# 将standard_format dump成json文本并保存
f.write(json.dumps(standard_format, ensure_ascii=False))
def ocr_parse_pdf_core(pdf_bytes, model_output_json_list, book_name, start_page_id=0, debug_mode=False):
start_time = time.time() # 记录开始时间
# 先打印一下book_name和解析开始的时间
logger.info(
f"book_name is:{book_name},start_time is:{formatted_time(start_time)}",
file=sys.stderr,
)
pdf_info_dict = parse_pdf_by_ocr(
pdf_bytes,
model_output_json_list,
"",
book_name,
pdf_model_profile=None,
start_page_id=start_page_id,
debug_mode=debug_mode,
)
end_time = time.time() # 记录完成时间
parse_time = int(end_time - start_time) # 计算执行时间
# 解析完成后打印一下book_name和耗时
logger.info(
f"book_name is:{book_name},end_time is:{formatted_time(end_time)},cost_time is:{parse_time}",
file=sys.stderr,
)
return pdf_info_dict, parse_time
if __name__ == '__main__':
pdf_path = r"/home/cxu/workspace/Magic-PDF/ocr_demo/j.1540-627x.2006.00176.x.pdf"
json_file_path = r"/home/cxu/workspace/Magic-PDF/ocr_demo/j.1540-627x.2006.00176.x.json"
# ocr_local_parse(pdf_path, json_file_path)
book_name = "数学新星网/edu_00001236"
ocr_online_parse(book_name)
pass
import json
import os
import sys
from pathlib import Path
import click
from loguru import logger
from magic_pdf.libs.commons import join_path, read_file
from magic_pdf.dict2md.mkcontent import mk_mm_markdown, mk_universal_format
from magic_pdf.pdf_parse_by_txt import parse_pdf_by_txt
def main(s3_pdf_path: str, s3_pdf_profile: str, pdf_model_path: str, pdf_model_profile: str, start_page_num=0, debug_mode=True):
""" """
pth = Path(s3_pdf_path)
book_name = pth.name
# book_name = "".join(os.path.basename(s3_pdf_path).split(".")[0:-1])
save_tmp_path = os.path.join(os.path.dirname(__file__), "../..", "..", "tmp", "unittest")
save_path = join_path(save_tmp_path, "md")
text_content_save_path = f"{save_path}/{book_name}/book.md"
# metadata_save_path = f"{save_path}/{book_name}/metadata.json"
pdf_bytes = read_file(s3_pdf_path, s3_pdf_profile)
try:
paras_dict = parse_pdf_by_txt(
pdf_bytes, pdf_model_path, save_path, book_name, pdf_model_profile, start_page_num, debug_mode=debug_mode
)
parent_dir = os.path.dirname(text_content_save_path)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
if not paras_dict.get('need_drop'):
content_list = mk_universal_format(paras_dict)
markdown_content = mk_mm_markdown(content_list)
else:
markdown_content = paras_dict['drop_reason']
with open(text_content_save_path, "w", encoding="utf-8") as f:
f.write(markdown_content)
except Exception as e:
print(f"ERROR: {s3_pdf_path}, {e}", file=sys.stderr)
logger.exception(e)
@click.command()
@click.option("--pdf-file-path", help="s3上pdf文件的路径")
@click.option("--save-path", help="解析出来的图片,文本的保存父目录")
def main_shell(pdf_file_path: str, save_path: str):
# pdf_bin_file_path = "s3://llm-raw-snew/llm-raw-scihub/scimag07865000-07865999/10.1007/"
pdf_bin_file_parent_path = "s3://llm-raw-snew/llm-raw-scihub/"
pdf_bin_file_profile = "s2"
pdf_model_parent_dir = "s3://llm-pdf-text/eval_1k/layout_res/"
pdf_model_profile = "langchao"
p = Path(pdf_file_path)
pdf_parent_path = p.parent
pdf_file_name = p.name # pdf文件名字,含后缀
pdf_bin_file_path = join_path(pdf_bin_file_parent_path, pdf_parent_path)
pdf_model_dir = join_path(pdf_model_parent_dir, pdf_parent_path)
main(
join_path(pdf_bin_file_path, pdf_file_name),
pdf_bin_file_profile,
join_path(pdf_model_dir, pdf_file_name),
pdf_model_profile,
save_path,
)
@click.command()
@click.option("--pdf-dir", help="本地pdf文件的路径")
@click.option("--model-dir", help="本地模型文件的路径")
@click.option("--start-page-num", default=0, help="从第几页开始解析")
def main_shell2(pdf_dir: str, model_dir: str,start_page_num: int):
# 先扫描所有的pdf目录里的文件名字
pdf_dir = Path(pdf_dir)
model_dir = Path(model_dir)
if pdf_dir.is_file():
pdf_file_names = [pdf_dir.name]
pdf_dir = pdf_dir.parent
else:
pdf_file_names = [f.name for f in pdf_dir.glob("*.pdf")]
for pdf_file in pdf_file_names:
pdf_file_path = os.path.join(pdf_dir, pdf_file)
model_file_path = os.path.join(model_dir, pdf_file).rstrip(".pdf") + ".json"
with open(model_file_path, "r") as json_file:
model_list = json.load(json_file)
main(pdf_file_path, None, model_list, None, start_page_num)
if __name__ == "__main__":
main_shell2()
from pathlib import Path
import click
import json
from demo.pdf2md import main
@click.command()
@click.option("--pdf-file-path", help="s3上pdf文件的路径")
@click.option("--pdf-name", help="pdf name")
def main_shell(pdf_file_path: str, pdf_name: str):
with open('/mnt/petrelfs/share_data/ouyanglinke/OCR/OCR_validation_dataset_final_rotated_formulafix_highdpi_scihub.json', 'r') as f:
samples = json.load(f)
for sample in samples:
pdf_file_path = sample['s3_path']
pdf_bin_file_profile = "outsider"
pdf_name = sample['pdf_name']
pdf_model_dir = f"s3://llm-pdf-text/eval_1k/layout_res/{pdf_name}"
pdf_model_profile = "langchao"
p = Path(pdf_file_path)
pdf_file_name = p.name # pdf文件名字,含后缀
#pdf_model_dir = join_path(pdf_model_parent_dir, pdf_file_name)
main(
pdf_file_path,
pdf_bin_file_profile,
pdf_model_dir,
pdf_model_profile,
debug_mode=True,
)
if __name__ == "__main__":
main_shell()
import json
import os
import sys
from pathlib import Path
import click
from demo.demo_commons import get_json_from_local_or_s3, write_json_to_local, local_jsonl_path, local_json_path
from magic_pdf.dict2md.mkcontent import mk_mm_markdown, mk_universal_format
from magic_pdf.filter.pdf_classify_by_type import classify
from magic_pdf.filter.pdf_meta_scan import pdf_meta_scan
from magic_pdf.libs.commons import join_path, read_file
from loguru import logger
from magic_pdf.libs.config_reader import get_s3_config_dict
from magic_pdf.pdf_parse_by_txt import parse_pdf_by_txt
from magic_pdf.spark.spark_api import get_data_source
def demo_parse_pdf(book_name=None, start_page_id=0, debug_mode=True):
json_object = get_json_from_local_or_s3(book_name)
s3_pdf_path = json_object.get("file_location")
s3_config = get_s3_config_dict(s3_pdf_path)
pdf_bytes = read_file(s3_pdf_path, s3_config)
model_output_json_list = json_object.get("doc_layout_result")
data_source = get_data_source(json_object)
file_id = json_object.get("file_id")
junk_img_bojids = json_object["pdf_meta"]["junk_img_bojids"]
save_path = ""
pdf_info_dict = parse_pdf_by_txt(
pdf_bytes,
model_output_json_list,
save_path,
f"{data_source}/{file_id}",
pdf_model_profile=None,
start_page_id=start_page_id,
junk_img_bojids=junk_img_bojids,
debug_mode=debug_mode,
)
write_json_to_local(pdf_info_dict, book_name)
content_list = mk_universal_format(pdf_info_dict)
markdown_content = mk_mm_markdown(content_list)
if book_name is not None:
save_tmp_path = os.path.join(os.path.dirname(__file__), "../..", "tmp", "unittest", "md", book_name)
uni_format_save_path = join_path(save_tmp_path, "book" + ".json")
markdown_save_path = join_path(save_tmp_path, "book" + ".md")
with open(uni_format_save_path, "w", encoding="utf-8") as f:
f.write(json.dumps(content_list, ensure_ascii=False, indent=4))
with open(markdown_save_path, "w", encoding="utf-8") as f:
f.write(markdown_content)
else:
logger.info(json.dumps(content_list, ensure_ascii=False))
def demo_classify_by_type(book_name=None, debug_mode=True):
json_object = get_json_from_local_or_s3(book_name)
pdf_meta = json_object.get("pdf_meta")
total_page = pdf_meta["total_page"]
page_width = pdf_meta["page_width_pts"]
page_height = pdf_meta["page_height_pts"]
img_sz_list = pdf_meta["image_info_per_page"]
img_num_list = pdf_meta["imgs_per_page"]
text_len_list = pdf_meta["text_len_per_page"]
text_layout_list = pdf_meta["text_layout_per_page"]
is_text_pdf, results = classify(
total_page,
page_width,
page_height,
img_sz_list,
text_len_list,
img_num_list,
text_layout_list,
)
logger.info(f"is_text_pdf: {is_text_pdf}")
logger.info(json.dumps(results, ensure_ascii=False))
write_json_to_local(results, book_name)
def demo_meta_scan(book_name=None, debug_mode=True):
json_object = get_json_from_local_or_s3(book_name)
s3_pdf_path = json_object.get("file_location")
s3_config = get_s3_config_dict(s3_pdf_path)
pdf_bytes = read_file(s3_pdf_path, s3_config)
res = pdf_meta_scan(pdf_bytes)
logger.info(json.dumps(res, ensure_ascii=False))
write_json_to_local(res, book_name)
def demo_test5():
with open(local_json_path, "r", encoding="utf-8") as json_file:
json_line = json_file.read()
jso = json.loads(json_line)
img_list_len = len(jso["content"]["image_info_per_page"])
logger.info(f"img_list_len: {img_list_len}")
def read_more_para_test_samples(type="scihub"):
# 读取多段落测试样本
curr_dir = Path(__file__).parent
files_path = ""
if type == "gift":
relative_path = "../tests/assets/more_para_test_samples/gift_files.txt"
files_path = os.path.join(curr_dir, relative_path)
if type == "scihub":
relative_path = "../tests/assets/more_para_test_samples/scihub_files.txt"
files_path = os.path.join(curr_dir, relative_path)
if type == "zlib":
relative_path = "../tests/assets/more_para_test_samples/zlib_files.txt"
files_path = os.path.join(curr_dir, relative_path)
# check if file exists
if not os.path.exists(files_path):
print("File not exist!")
sys.exit(0)
with open(files_path, "r", encoding="utf-8") as f:
lines = f.readlines()
# print("lines", lines)
return lines
def batch_test_more_para(type="scihub"):
# 批量测试多段落
para_test_files = read_more_para_test_samples(type)
for file in para_test_files:
file = file.strip()
print(file)
demo_parse_pdf(book_name=file)
@click.command()
@click.option("--book-name", help="s3上pdf文件的路径")
def main(book_name: str):
demo_parse_pdf(book_name, start_page_id=0)
if __name__ == "__main__":
main()
import time
from loguru import logger
from magic_pdf.libs.commons import (
fitz,
get_delta_time,
get_docx_model_output,
)
from magic_pdf.libs.convert_utils import dict_to_list
from magic_pdf.libs.coordinate_transform import get_scale_ratio
from magic_pdf.libs.drop_tag import DropTag
from magic_pdf.libs.hash_utils import compute_md5
from magic_pdf.libs.ocr_content_type import ContentType
from magic_pdf.para.para_split import para_split
from magic_pdf.pre_proc.construct_page_dict import ocr_construct_page_component
from magic_pdf.pre_proc.detect_footer_by_model import parse_footers
from magic_pdf.pre_proc.detect_footnote import parse_footnotes_by_model
from magic_pdf.pre_proc.detect_header import parse_headers
from magic_pdf.pre_proc.detect_page_number import parse_pageNos
from magic_pdf.pre_proc.cut_image import ocr_cut_image_and_table
from magic_pdf.pre_proc.ocr_detect_layout import layout_detect
from magic_pdf.pre_proc.ocr_dict_merge import (
merge_spans_to_line_by_layout, merge_lines_to_block,
)
from magic_pdf.pre_proc.ocr_span_list_modify import remove_spans_by_bboxes, remove_overlaps_min_spans, \
adjust_bbox_for_standalone_block, modify_y_axis, modify_inline_equation, get_qa_need_list, \
remove_spans_by_bboxes_dict
from magic_pdf.pre_proc.remove_bbox_overlap import remove_overlap_between_bbox_for_span
def parse_pdf_by_ocr(
pdf_bytes,
pdf_model_output,
imageWriter,
start_page_id=0,
end_page_id=None,
debug_mode=False,
):
pdf_bytes_md5 = compute_md5(pdf_bytes)
pdf_docs = fitz.open("pdf", pdf_bytes)
# 初始化空的pdf_info_dict
pdf_info_dict = {}
start_time = time.time()
end_page_id = end_page_id if end_page_id else len(pdf_docs) - 1
for page_id in range(start_page_id, end_page_id + 1):
# 获取当前页的page对象
page = pdf_docs[page_id]
# 获取当前页的宽高
page_w = page.rect.width
page_h = page.rect.height
if debug_mode:
time_now = time.time()
logger.info(
f"page_id: {page_id}, last_page_cost_time: {get_delta_time(start_time)}"
)
start_time = time_now
# 获取当前页的模型数据
ocr_page_info = get_docx_model_output(
pdf_model_output, page_id
)
"""从json中获取每页的页码、页眉、页脚的bbox"""
page_no_bboxes = parse_pageNos(page_id, page, ocr_page_info)
header_bboxes = parse_headers(page_id, page, ocr_page_info)
footer_bboxes = parse_footers(page_id, page, ocr_page_info)
footnote_bboxes = parse_footnotes_by_model(page_id, page, ocr_page_info, debug_mode=debug_mode)
# 构建需要remove的bbox字典
need_remove_spans_bboxes_dict = {
DropTag.PAGE_NUMBER: page_no_bboxes,
DropTag.HEADER: header_bboxes,
DropTag.FOOTER: footer_bboxes,
DropTag.FOOTNOTE: footnote_bboxes,
}
layout_dets = ocr_page_info["layout_dets"]
spans = []
# 计算模型坐标和pymu坐标的缩放比例
horizontal_scale_ratio, vertical_scale_ratio = get_scale_ratio(
ocr_page_info, page
)
for layout_det in layout_dets:
category_id = layout_det["category_id"]
allow_category_id_list = [1, 7, 13, 14, 15]
if category_id in allow_category_id_list:
x0, y0, _, _, x1, y1, _, _ = layout_det["poly"]
bbox = [
int(x0 / horizontal_scale_ratio),
int(y0 / vertical_scale_ratio),
int(x1 / horizontal_scale_ratio),
int(y1 / vertical_scale_ratio),
]
# 删除高度或者宽度为0的spans
if bbox[2] - bbox[0] == 0 or bbox[3] - bbox[1] == 0:
continue
"""要删除的"""
# 3: 'header', # 页眉
# 4: 'page number', # 页码
# 5: 'footnote', # 脚注
# 6: 'footer', # 页脚
"""当成span拼接的"""
# 1: 'image', # 图片
# 7: 'table', # 表格
# 13: 'inline_equation', # 行内公式
# 14: 'interline_equation', # 行间公式
# 15: 'text', # ocr识别文本
"""layout信息"""
# 11: 'full column', # 单栏
# 12: 'sub column', # 多栏
span = {
"bbox": bbox,
}
if category_id == 1:
span["type"] = ContentType.Image
elif category_id == 7:
span["type"] = ContentType.Table
elif category_id == 13:
span["content"] = layout_det["latex"]
span["type"] = ContentType.InlineEquation
elif category_id == 14:
span["content"] = layout_det["latex"]
span["type"] = ContentType.InterlineEquation
elif category_id == 15:
span["content"] = layout_det["text"]
span["type"] = ContentType.Text
# print(span)
spans.append(span)
else:
continue
'''删除重叠spans中较小的那些'''
spans, dropped_spans_by_span_overlap = remove_overlaps_min_spans(spans)
'''
删除remove_span_block_bboxes中的bbox
并增加drop相关数据
'''
spans, dropped_spans_by_removed_bboxes = remove_spans_by_bboxes_dict(spans, need_remove_spans_bboxes_dict)
'''对image和table截图'''
spans = ocr_cut_image_and_table(spans, page, page_id, pdf_bytes_md5, imageWriter)
'''行内公式调整, 高度调整至与同行文字高度一致(优先左侧, 其次右侧)'''
displayed_list = []
text_inline_lines = []
modify_y_axis(spans, displayed_list, text_inline_lines)
'''模型识别错误的行间公式, type类型转换成行内公式'''
spans = modify_inline_equation(spans, displayed_list, text_inline_lines)
'''bbox去除粘连'''
spans = remove_overlap_between_bbox_for_span(spans)
'''
对tpye=["interline_equation", "image", "table"]进行额外处理,
如果左边有字的话,将该span的bbox中y0调整至不高于文字的y0
'''
spans = adjust_bbox_for_standalone_block(spans)
'''从ocr_page_info中解析layout信息(按自然阅读方向排序,并修复重叠和交错的bad case)'''
layout_bboxes, layout_tree = layout_detect(ocr_page_info['subfield_dets'], page, ocr_page_info)
'''将spans合并成line(在layout内,从上到下,从左到右)'''
lines, dropped_spans_by_layout = merge_spans_to_line_by_layout(spans, layout_bboxes)
'''将lines合并成block'''
blocks = merge_lines_to_block(lines)
'''获取QA需要外置的list'''
images, tables, interline_equations, inline_equations = get_qa_need_list(blocks)
'''drop的span_list合并'''
dropped_spans = []
dropped_spans.extend(dropped_spans_by_span_overlap)
dropped_spans.extend(dropped_spans_by_removed_bboxes)
dropped_spans.extend(dropped_spans_by_layout)
dropped_text_block = []
dropped_image_block = []
dropped_table_block = []
dropped_equation_block = []
for span in dropped_spans:
# drop出的spans进行分类
if span['type'] == ContentType.Text:
dropped_text_block.append(span)
elif span['type'] == ContentType.Image:
dropped_image_block.append(span)
elif span['type'] == ContentType.Table:
dropped_table_block.append(span)
elif span['type'] in [ContentType.InlineEquation, ContentType.InterlineEquation]:
dropped_equation_block.append(span)
'''构造pdf_info_dict'''
page_info = ocr_construct_page_component(blocks, layout_bboxes, page_id, page_w, page_h, layout_tree,
images, tables, interline_equations, inline_equations,
dropped_text_block, dropped_image_block, dropped_table_block,
dropped_equation_block,
need_remove_spans_bboxes_dict)
pdf_info_dict[f"page_{page_id}"] = page_info
"""分段"""
para_split(pdf_info_dict, debug_mode=debug_mode)
"""dict转list"""
pdf_info_list = dict_to_list(pdf_info_dict)
new_pdf_info_dict = {
"pdf_info": pdf_info_list,
}
return new_pdf_info_dict
import time
# from anyio import Path
from magic_pdf.libs.commons import fitz, get_delta_time, get_img_s3_client, get_docx_model_output
import json
import os
import math
from loguru import logger
from magic_pdf.layout.bbox_sort import (
prepare_bboxes_for_layout_split,
)
from magic_pdf.layout.layout_sort import LAYOUT_UNPROC, get_bboxes_layout, get_columns_cnt_of_layout, sort_text_block
from magic_pdf.libs.convert_utils import dict_to_list
from magic_pdf.libs.drop_reason import DropReason
from magic_pdf.libs.hash_utils import compute_md5
from magic_pdf.libs.markdown_utils import escape_special_markdown_char
from magic_pdf.libs.safe_filename import sanitize_filename
from magic_pdf.libs.vis_utils import draw_bbox_on_page, draw_layout_bbox_on_page
from magic_pdf.pre_proc.cut_image import txt_save_images_by_bboxes
from magic_pdf.pre_proc.detect_images import parse_images
from magic_pdf.pre_proc.detect_tables import parse_tables # 获取tables的bbox
from magic_pdf.pre_proc.detect_equation import parse_equations # 获取equations的bbox
from magic_pdf.pre_proc.detect_header import parse_headers # 获取headers的bbox
from magic_pdf.pre_proc.detect_page_number import parse_pageNos # 获取pageNos的bbox
from magic_pdf.pre_proc.detect_footnote import parse_footnotes_by_model, parse_footnotes_by_rule # 获取footnotes的bbox
from magic_pdf.pre_proc.detect_footer_by_model import parse_footers # 获取footers的bbox
from magic_pdf.post_proc.detect_para import (
ParaProcessPipeline,
TitleDetectionException,
TitleLevelException,
ParaSplitException,
ParaMergeException,
DenseSingleLineBlockException,
)
from magic_pdf.pre_proc.main_text_font import get_main_text_font
from magic_pdf.pre_proc.remove_colored_strip_bbox import remove_colored_strip_textblock
from magic_pdf.pre_proc.remove_footer_header import remove_headder_footer_one_page
'''
from para.para_pipeline import ParaProcessPipeline
from para.exceptions import (
TitleDetectionException,
TitleLevelException,
ParaSplitException,
ParaMergeException,
DenseSingleLineBlockException,
)
'''
from magic_pdf.post_proc.remove_footnote import merge_footnote_blocks, remove_footnote_blocks
from magic_pdf.pre_proc.citationmarker_remove import remove_citation_marker
from magic_pdf.pre_proc.equations_replace import combine_chars_to_pymudict, remove_chars_in_text_blocks, replace_equations_in_textblock
from magic_pdf.pre_proc.pdf_pre_filter import pdf_filter
from magic_pdf.pre_proc.detect_footer_header_by_statistics import drop_footer_header
from magic_pdf.pre_proc.construct_page_dict import construct_page_component
from magic_pdf.pre_proc.fix_image import combine_images, fix_image_vertical, fix_seperated_image, include_img_title
from magic_pdf.post_proc.pdf_post_filter import pdf_post_filter
from magic_pdf.pre_proc.remove_rotate_bbox import get_side_boundry, remove_rotate_side_textblock, remove_side_blank_block
from magic_pdf.pre_proc.resolve_bbox_conflict import check_text_block_horizontal_overlap, resolve_bbox_overlap_conflict
from magic_pdf.pre_proc.fix_table import fix_table_text_block, fix_tables, include_table_title
from magic_pdf.pre_proc.solve_line_alien import solve_inline_too_large_interval
denseSingleLineBlockException_msg = DenseSingleLineBlockException().message
titleDetectionException_msg = TitleDetectionException().message
titleLevelException_msg = TitleLevelException().message
paraSplitException_msg = ParaSplitException().message
paraMergeException_msg = ParaMergeException().message
def parse_pdf_by_txt(
pdf_bytes,
pdf_model_output,
imageWriter,
start_page_id=0,
end_page_id=None,
debug_mode=False,
):
pdf_bytes_md5 = compute_md5(pdf_bytes)
pdf_docs = fitz.open("pdf", pdf_bytes)
pdf_info_dict = {}
start_time = time.time()
"""通过统计pdf全篇文字,识别正文字体"""
main_text_font = get_main_text_font(pdf_docs)
end_page_id = end_page_id if end_page_id else len(pdf_docs) - 1
for page_id in range(start_page_id, end_page_id + 1):
page = pdf_docs[page_id]
page_width = page.rect.width
page_height = page.rect.height
if debug_mode:
time_now = time.time()
logger.info(f"page_id: {page_id}, last_page_cost_time: {get_delta_time(start_time)}")
start_time = time_now
"""
# 通过一个规则,过滤掉单页超过1500非junkimg的pdf
# 对单页面非重复id的img数量做统计,如果当前页超过1500则直接return need_drop
"""
page_imgs = page.get_images()
# 去除对junkimg的依赖,简化逻辑
if len(page_imgs) > 1500: # 如果当前页超过1500张图片,直接跳过
logger.warning(f"page_id: {page_id}, img_counts: {len(page_imgs)}, drop this pdf")
result = {"_need_drop": True, "_drop_reason": DropReason.HIGH_COMPUTATIONAL_lOAD_BY_IMGS}
if not debug_mode:
return result
"""
==================================================================================================================================
首先获取基本的block数据,对pdf进行分解,获取图片、表格、公式、text的bbox
"""
# 解析pdf原始文本block
text_raw_blocks = page.get_text(
"dict",
flags=fitz.TEXTFLAGS_TEXT,
)["blocks"]
model_output_json = get_docx_model_output(pdf_model_output, page_id)
# 解析图片
image_bboxes = parse_images(page_id, page, model_output_json)
image_bboxes = fix_image_vertical(image_bboxes, text_raw_blocks) # 修正图片的位置
image_bboxes = fix_seperated_image(image_bboxes) # 合并有边重合的图片
image_bboxes = include_img_title(text_raw_blocks, image_bboxes) # 向图片上方和下方寻找title,使用规则进行匹配,暂时只支持英文规则
"""此时image_bboxes中可能出现这种情况,水平并列的2个图片,下方分别有各自的子标题,2个子标题下方又有大标题(形如Figxxx),会出现2个图片的bbox都包含了这个大标题,这种情况需要把图片合并"""
image_bboxes = combine_images(image_bboxes) # 合并图片
# 解析表格并对table_bboxes进行位置的微调,防止表格周围的文字被截断
table_bboxes = parse_tables(page_id, page, model_output_json)
table_bboxes = fix_tables(page, table_bboxes, include_table_title=True, scan_line_num=2) # 修正
table_bboxes = fix_table_text_block(text_raw_blocks, table_bboxes) # 修正与text block的关系,某些table修正与pymupdf获取到的table内textblock没有完全包含,因此要进行一次修正。
#debug_show_bbox(pdf_docs, page_id, table_bboxes, [], [b['bbox'] for b in text_raw_blocks], join_path(save_path, book_name, f"{book_name}_debug.pdf"), 7)
table_bboxes = include_table_title(text_raw_blocks, table_bboxes) # 向table上方和下方寻找title,使用规则进行匹配,暂时只支持英文规则
# 解析公式
equations_inline_bboxes, equations_interline_bboxes = parse_equations(page_id, page, model_output_json)
"""
==================================================================================================================================
进入预处理-1阶段
-------------------
# # 解析标题
# title_bboxs = parse_titles(page_id, page, model_output_json)
# # 评估Layout是否规整、简单
# isSimpleLayout_flag, fullColumn_cnt, subColumn_cnt, curPage_loss = evaluate_pdf_layout(page_id, page, model_output_json)
接下来开始进行预处理过程
"""
"""去掉每页的页码、页眉、页脚"""
page_no_bboxs = parse_pageNos(page_id, page, model_output_json)
header_bboxs = parse_headers(page_id, page, model_output_json)
footer_bboxs = parse_footers(page_id, page, model_output_json)
image_bboxes, table_bboxes, remain_text_blocks, removed_hdr_foot_txt_block, removed_hdr_foot_img_block, removed_hdr_foot_table = remove_headder_footer_one_page(text_raw_blocks, image_bboxes, table_bboxes, header_bboxs, footer_bboxs, page_no_bboxs, page_width, page_height)
"""去除页面上半部分长条色块内的文本块"""
remain_text_blocks, removed_colored_narrow_strip_background_text_block = remove_colored_strip_textblock(remain_text_blocks, page)
#debug_show_bbox(pdf_docs, page_id, footnote_bboxes_by_model, [b['bbox'] for b in remain_text_blocks], header_bboxs, join_path(save_path, book_name, f"{book_name}_debug.pdf"), 7)
"""去掉旋转的文字:水印、垂直排列的文字"""
remain_text_blocks, removed_non_horz_text_block = remove_rotate_side_textblock(
remain_text_blocks, page_width, page_height
) # 去掉水印,非水平文字
remain_text_blocks, removed_empty_side_block = remove_side_blank_block(remain_text_blocks, page_width, page_height) # 删除页面四周可能会留下的完全空白的textblock,这种block形成原因未知
"""出现在图片、表格上的文字块去掉,把层叠的图片单独分离出来,不参与layout的计算"""
(
image_bboxes,
table_bboxes,
equations_interline_bboxes,
equations_inline_bboxes,
remain_text_blocks,
text_block_on_image_removed,
images_overlap_backup,
interline_eq_temp_text_block
) = resolve_bbox_overlap_conflict(
image_bboxes, table_bboxes, equations_interline_bboxes, equations_inline_bboxes, remain_text_blocks
)
# """去掉footnote, 从文字和图片中"""
# # 通过模型识别到的footnote
# footnote_bboxes_by_model = parse_footnotes_by_model(page_id, page, model_output_json, md_bookname_save_path,
# debug_mode=debug_mode)
# # 通过规则识别到的footnote
# footnote_bboxes_by_rule = parse_footnotes_by_rule(remain_text_blocks, page_height, page_id)
"""
==================================================================================================================================
"""
# 把图、表、公式都进行截图,保存到存储上,返回图片路径作为内容
image_info, image_backup_info, table_info, inline_eq_info, interline_eq_info = txt_save_images_by_bboxes(
page_id,
page,
pdf_bytes_md5,
image_bboxes,
images_overlap_backup,
table_bboxes,
equations_inline_bboxes,
equations_interline_bboxes,
imageWriter
) # 只要表格和图片的截图
""""以下进入到公式替换环节 """
char_level_text_blocks = page.get_text("rawdict", flags=fitz.TEXTFLAGS_TEXT)['blocks']
remain_text_blocks = combine_chars_to_pymudict(remain_text_blocks, char_level_text_blocks)# 合并chars
remain_text_blocks = replace_equations_in_textblock(remain_text_blocks, inline_eq_info, interline_eq_info)
remain_text_blocks = remove_citation_marker(remain_text_blocks) # 公式替换之后去角标,防止公式无法替换成功。但是这样也会带来个问题就是把角标当公式。各有优劣。
remain_text_blocks = remove_chars_in_text_blocks(remain_text_blocks) # 减少中间态数据体积
#debug_show_bbox(pdf_docs, page_id, [b['bbox'] for b in inline_eq_info], [b['bbox'] for b in interline_eq_info], [], join_path(save_path, book_name, f"{book_name}_debug.pdf"), 3)
"""去掉footnote, 从文字和图片中(先去角标再去footnote试试)"""
# 通过模型识别到的footnote
footnote_bboxes_by_model = parse_footnotes_by_model(page_id, page, model_output_json, debug_mode=debug_mode)
# 通过规则识别到的footnote
footnote_bboxes_by_rule = parse_footnotes_by_rule(remain_text_blocks, page_height, page_id, main_text_font)
"""进入pdf过滤器,去掉一些不合理的pdf"""
is_good_pdf, err = pdf_filter(page, remain_text_blocks, table_bboxes, image_bboxes)
if not is_good_pdf:
logger.warning(f"page_id: {page_id}, drop this pdf: {pdf_bytes_md5}, reason: {err}")
if not debug_mode:
return err
"""
==================================================================================================================================
进行版面布局切分和过滤
"""
"""在切分之前,先检查一下bbox是否有左右重叠的情况,如果有,那么就认为这个pdf暂时没有能力处理好,这种左右重叠的情况大概率是由于pdf里的行间公式、表格没有被正确识别出来造成的 """
is_text_block_horz_overlap = check_text_block_horizontal_overlap(remain_text_blocks, header_bboxs, footer_bboxs)
if is_text_block_horz_overlap:
# debug_show_bbox(pdf_docs, page_id, [b['bbox'] for b in remain_text_blocks], [], [], join_path(save_path, book_name, f"{book_name}_debug.pdf"), 0)
logger.warning(f"page_id: {page_id}, drop this pdf: {pdf_bytes_md5}, reason: {DropReason.TEXT_BLCOK_HOR_OVERLAP}")
result = {"_need_drop": True, "_drop_reason": DropReason.TEXT_BLCOK_HOR_OVERLAP}
if not debug_mode:
return result
"""统一格式化成一个数据结构用于计算layout"""
page_y0 = 0 if len(header_bboxs) == 0 else max([b[3] for b in header_bboxs])
page_y1 = page_height if len(footer_bboxs) == 0 else min([b[1] for b in footer_bboxs])
left_x, right_x = get_side_boundry(removed_non_horz_text_block, page_width, page_height)
page_boundry = [math.floor(left_x), page_y0 + 1, math.ceil(right_x), page_y1 - 1]
# 返回的是一个数组,每个元素[x0, y0, x1, y1, block_content, idx_x, idx_y], 初始时候idx_x, idx_y都是None. 对于图片、公式来说,block_content是图片的地址, 对于段落来说,block_content是段落的内容
all_bboxes = prepare_bboxes_for_layout_split(
image_info, image_backup_info, table_info, inline_eq_info, interline_eq_info, remain_text_blocks, page_boundry, page)
#debug_show_bbox(pdf_docs, page_id, [], [], all_bboxes, join_path(save_path, book_name, f"{book_name}_debug.pdf"), 1)
"""page_y0, page_y1能够过滤掉页眉和页脚,不会算作layout内"""
layout_bboxes, layout_tree = get_bboxes_layout(all_bboxes, page_boundry, page_id)
if len(remain_text_blocks)>0 and len(all_bboxes)>0 and len(layout_bboxes)==0:
logger.warning(f"page_id: {page_id}, drop this pdf: {pdf_bytes_md5}, reason: {DropReason.CAN_NOT_DETECT_PAGE_LAYOUT}")
result = {"_need_drop": True, "_drop_reason": DropReason.CAN_NOT_DETECT_PAGE_LAYOUT}
if not debug_mode:
return result
"""以下去掉复杂的布局和超过2列的布局"""
if any([lay["layout_label"] == LAYOUT_UNPROC for lay in layout_bboxes]): # 复杂的布局
logger.warning(f"page_id: {page_id}, drop this pdf: {pdf_bytes_md5}, reason: {DropReason.COMPLICATED_LAYOUT}")
result = {"_need_drop": True, "_drop_reason": DropReason.COMPLICATED_LAYOUT}
if not debug_mode:
return result
layout_column_width = get_columns_cnt_of_layout(layout_tree)
if layout_column_width > 2: # 去掉超过2列的布局pdf
logger.warning(f"page_id: {page_id}, drop this pdf: {pdf_bytes_md5}, reason: {DropReason.TOO_MANY_LAYOUT_COLUMNS}")
result = {
"_need_drop": True,
"_drop_reason": DropReason.TOO_MANY_LAYOUT_COLUMNS,
"extra_info": {"column_cnt": layout_column_width},
}
if not debug_mode:
return result
"""
==================================================================================================================================
构造出下游需要的数据结构
"""
remain_text_blocks = remain_text_blocks + interline_eq_temp_text_block # 把计算layout时候临时删除的行间公式再放回去,防止行间公式替换的时候丢失。
removed_text_blocks = []
removed_text_blocks.extend(removed_hdr_foot_txt_block)
# removed_text_blocks.extend(removed_footnote_text_block)
removed_text_blocks.extend(text_block_on_image_removed)
removed_text_blocks.extend(removed_non_horz_text_block)
removed_text_blocks.extend(removed_colored_narrow_strip_background_text_block)
removed_images = []
# removed_images.extend(footnote_imgs)
removed_images.extend(removed_hdr_foot_img_block)
images_backup = []
images_backup.extend(image_backup_info)
remain_text_blocks = escape_special_markdown_char(remain_text_blocks) # 转义span里的text
sorted_text_remain_text_block = sort_text_block(remain_text_blocks, layout_bboxes)
footnote_bboxes_tmp = []
footnote_bboxes_tmp.extend(footnote_bboxes_by_model)
footnote_bboxes_tmp.extend(footnote_bboxes_by_rule)
page_info = construct_page_component(
page_id,
image_info,
table_info,
sorted_text_remain_text_block,
layout_bboxes,
inline_eq_info,
interline_eq_info,
page.get_text("dict", flags=fitz.TEXTFLAGS_TEXT)["blocks"],
removed_text_blocks=removed_text_blocks,
removed_image_blocks=removed_images,
images_backup=images_backup,
droped_table_block=[],
table_backup=[],
layout_tree=layout_tree,
page_w=page.rect.width,
page_h=page.rect.height,
footnote_bboxes_tmp=footnote_bboxes_tmp
)
pdf_info_dict[f"page_{page_id}"] = page_info
# end page for
'''计算后处理阶段耗时'''
start_time = time.time()
"""
==================================================================================================================================
去掉页眉和页脚,这里需要用到一定的统计量,所以放到最后
页眉和页脚主要从文本box和图片box中去除,位于页面的四周。
下面函数会直接修改pdf_info_dict,从文字块中、图片中删除属于页眉页脚的内容,删除内容做相对应记录
"""
# 去页眉页脚
header, footer = drop_footer_header(pdf_info_dict)
"""对单个layout内footnote和他下面的所有textbbox合并"""
for page_key, page_info in pdf_info_dict.items():
page_info = merge_footnote_blocks(page_info, main_text_font)
page_info = remove_footnote_blocks(page_info)
pdf_info_dict[page_key] = page_info
"""进入pdf后置过滤器,去掉一些不合理的pdf"""
i = 0
for page_info in pdf_info_dict.values():
is_good_pdf, err = pdf_post_filter(page_info)
if not is_good_pdf:
logger.warning(f"page_id: {i}, drop this pdf: {pdf_bytes_md5}, reason: {err}")
if not debug_mode:
return err
i += 1
if debug_mode:
# 打印后处理阶段耗时
logger.info(f"post_processing_time: {get_delta_time(start_time)}")
"""
==================================================================================================================================
进入段落处理-2阶段
"""
# 处理行内文字间距较大问题
pdf_info_dict = solve_inline_too_large_interval(pdf_info_dict)
start_time = time.time()
para_process_pipeline = ParaProcessPipeline()
def _deal_with_text_exception(error_info):
logger.warning(f"page_id: {page_id}, drop this pdf: {pdf_bytes_md5}, reason: {error_info}")
if error_info == denseSingleLineBlockException_msg:
logger.warning(f"Drop this pdf: {pdf_bytes_md5}, reason: {DropReason.DENSE_SINGLE_LINE_BLOCK}")
result = {"_need_drop": True, "_drop_reason": DropReason.DENSE_SINGLE_LINE_BLOCK}
return result
if error_info == titleDetectionException_msg:
logger.warning(f"Drop this pdf: {pdf_bytes_md5}, reason: {DropReason.TITLE_DETECTION_FAILED}")
result = {"_need_drop": True, "_drop_reason": DropReason.TITLE_DETECTION_FAILED}
return result
elif error_info == titleLevelException_msg:
logger.warning(f"Drop this pdf: {pdf_bytes_md5}, reason: {DropReason.TITLE_LEVEL_FAILED}")
result = {"_need_drop": True, "_drop_reason": DropReason.TITLE_LEVEL_FAILED}
return result
elif error_info == paraSplitException_msg:
logger.warning(f"Drop this pdf: {pdf_bytes_md5}, reason: {DropReason.PARA_SPLIT_FAILED}")
result = {"_need_drop": True, "_drop_reason": DropReason.PARA_SPLIT_FAILED}
return result
elif error_info == paraMergeException_msg:
logger.warning(f"Drop this pdf: {pdf_bytes_md5}, reason: {DropReason.PARA_MERGE_FAILED}")
result = {"_need_drop": True, "_drop_reason": DropReason.PARA_MERGE_FAILED}
return result
pdf_info_dict, error_info = para_process_pipeline.para_process_pipeline(pdf_info_dict)
if error_info is not None:
return _deal_with_text_exception(error_info)
"""dict转list"""
pdf_info_list = dict_to_list(pdf_info_dict)
new_pdf_info_dict = {
"pdf_info": pdf_info_list,
}
return new_pdf_info_dict
# coding=utf8
import sys
import time
from urllib.parse import quote
from magic_pdf.libs.commons import (
read_file,
join_path,
parse_bucket_key,
formatted_time,
s3_image_save_path,
)
from magic_pdf.libs.drop_reason import DropReason
from magic_pdf.libs.json_compressor import JsonCompressor
from magic_pdf.dict2md.mkcontent import mk_universal_format
from magic_pdf.pdf_parse_by_txt import parse_pdf_by_txt
from magic_pdf.filter.pdf_classify_by_type import classify
from magic_pdf.filter.pdf_meta_scan import pdf_meta_scan
from loguru import logger
from magic_pdf.pdf_parse_for_train import parse_pdf_for_train
from magic_pdf.spark.base import exception_handler, get_data_source
from magic_pdf.train_utils.convert_to_train_format import convert_to_train_format
from magic_pdf.spark.s3 import get_s3_config
def meta_scan(jso: dict, doc_layout_check=True) -> dict:
s3_pdf_path = jso.get("file_location")
s3_config = get_s3_config(s3_pdf_path)
if doc_layout_check:
if (
"doc_layout_result" not in jso
): # 检测json中是存在模型数据,如果没有则需要跳过该pdf
jso["_need_drop"] = True
jso["_drop_reason"] = DropReason.MISS_DOC_LAYOUT_RESULT
return jso
try:
data_source = get_data_source(jso)
file_id = jso.get("file_id")
book_name = f"{data_source}/{file_id}"
# 首页存在超量drawing问题
# special_pdf_list = ['zlib/zlib_21822650']
# if book_name in special_pdf_list:
# jso['need_drop'] = True
# jso['drop_reason'] = DropReason.SPECIAL_PDF
# return jso
start_time = time.time() # 记录开始时间
logger.info(
f"book_name is:{book_name},start_time is:{formatted_time(start_time)}",
file=sys.stderr,
)
file_content = read_file(s3_pdf_path, s3_config)
read_file_time = int(time.time() - start_time) # 计算执行时间
start_time = time.time() # 记录开始时间
res = pdf_meta_scan(s3_pdf_path, file_content)
if res.get(
"_need_drop", False
): # 如果返回的字典里有need_drop,则提取drop_reason并跳过本次解析
jso["_need_drop"] = True
jso["_drop_reason"] = res["_drop_reason"]
else: # 正常返回
jso["pdf_meta"] = res
jso["content"] = ""
jso["remark"] = ""
jso["data_url"] = ""
end_time = time.time() # 记录结束时间
meta_scan_time = int(end_time - start_time) # 计算执行时间
logger.info(
f"book_name is:{book_name},end_time is:{formatted_time(end_time)},read_file_time is:{read_file_time},meta_scan_time is:{meta_scan_time}",
file=sys.stderr,
)
jso["read_file_time"] = read_file_time
jso["meta_scan_time"] = meta_scan_time
except Exception as e:
jso = exception_handler(jso, e)
return jso
def classify_by_type(jso: dict, debug_mode=False) -> dict:
# 检测debug开关
if debug_mode:
pass
else: # 如果debug没开,则检测是否有needdrop字段
if jso.get("_need_drop", False):
return jso
# 开始正式逻辑
try:
pdf_meta = jso.get("pdf_meta")
data_source = get_data_source(jso)
file_id = jso.get("file_id")
book_name = f"{data_source}/{file_id}"
total_page = pdf_meta["total_page"]
page_width = pdf_meta["page_width_pts"]
page_height = pdf_meta["page_height_pts"]
img_sz_list = pdf_meta["image_info_per_page"]
img_num_list = pdf_meta["imgs_per_page"]
text_len_list = pdf_meta["text_len_per_page"]
text_layout_list = pdf_meta["text_layout_per_page"]
text_language = pdf_meta["text_language"]
# allow_language = ['zh', 'en'] # 允许的语言,目前只允许简中和英文的
# if text_language not in allow_language: # 如果语言不在允许的语言中,则drop
# jso['need_drop'] = True
# jso['drop_reason'] = DropReason.NOT_ALLOW_LANGUAGE
# return jso
pdf_path = pdf_meta["pdf_path"]
is_encrypted = pdf_meta["is_encrypted"]
is_needs_password = pdf_meta["is_needs_password"]
if (
is_encrypted or is_needs_password
): # 加密的,需要密码的,没有页面的,都不处理
jso["_need_drop"] = True
jso["_drop_reason"] = DropReason.ENCRYPTED
else:
start_time = time.time() # 记录开始时间
is_text_pdf, results = classify(
pdf_path,
total_page,
page_width,
page_height,
img_sz_list,
text_len_list,
img_num_list,
text_layout_list,
)
classify_time = int(time.time() - start_time) # 计算执行时间
if is_text_pdf:
pdf_meta["is_text_pdf"] = is_text_pdf
jso["_pdf_type"] = "TXT"
jso["pdf_meta"] = pdf_meta
jso["classify_time"] = classify_time
# print(json.dumps(pdf_meta, ensure_ascii=False))
allow_language = ["zh", "en"] # 允许的语言,目前只允许简中和英文的
if (
text_language not in allow_language
): # 如果语言不在允许的语言中,则drop
jso["_need_drop"] = True
jso["_drop_reason"] = DropReason.NOT_ALLOW_LANGUAGE
return jso
else:
# 先不drop
pdf_meta["is_text_pdf"] = is_text_pdf
jso["_pdf_type"] = "OCR"
jso["pdf_meta"] = pdf_meta
jso["classify_time"] = classify_time
# jso["_need_drop"] = True
# jso["_drop_reason"] = DropReason.NOT_IS_TEXT_PDF
extra_info = {"classify_rules": []}
for condition, result in results.items():
if not result:
extra_info["classify_rules"].append(condition)
jso["extra_info"] = extra_info
except Exception as e:
jso = exception_handler(jso, e)
return jso
def drop_needdrop_pdf(jso: dict) -> dict:
if jso.get("_need_drop", False):
logger.info(
f"book_name is:{get_data_source(jso)}/{jso['file_id']} need drop",
file=sys.stderr,
)
jso["dropped"] = True
return jso
def pdf_intermediate_dict_to_markdown(jso: dict, debug_mode=False) -> dict:
if debug_mode:
pass
else: # 如果debug没开,则检测是否有needdrop字段
if jso.get("_need_drop", False):
book_name = join_path(get_data_source(jso), jso["file_id"])
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
jso["dropped"] = True
return jso
try:
pdf_intermediate_dict = jso["pdf_intermediate_dict"]
# 将 pdf_intermediate_dict 解压
pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
# markdown_content = mk_nlp_markdown(pdf_intermediate_dict)
jso["content_list"] = mk_universal_format(pdf_intermediate_dict)
# jso["content"] = markdown_content
logger.info(f"book_name is:{get_data_source(jso)}/{jso['file_id']}")
# 把无用的信息清空
jso["doc_layout_result"] = ""
jso["pdf_intermediate_dict"] = ""
jso["pdf_meta"] = ""
except Exception as e:
jso = exception_handler(jso, e)
return jso
def parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
# 检测debug开关
if debug_mode:
pass
else: # 如果debug没开,则检测是否有needdrop字段
if jso.get("_need_drop", False):
return jso
# 开始正式逻辑
s3_pdf_path = jso.get("file_location")
s3_config = get_s3_config(s3_pdf_path)
pdf_bytes = read_file(s3_pdf_path, s3_config)
model_output_json_list = jso.get("doc_layout_result")
data_source = get_data_source(jso)
file_id = jso.get("file_id")
book_name = f"{data_source}/{file_id}"
junk_img_bojids = jso["pdf_meta"]["junk_img_bojids"]
# 增加检测 max_svgs 数量的检测逻辑,如果 max_svgs 超过3000则drop
svgs_per_page_list = jso["pdf_meta"]["svgs_per_page"]
max_svgs = max(svgs_per_page_list)
if max_svgs > 3000:
jso["_need_drop"] = True
jso["_drop_reason"] = DropReason.HIGH_COMPUTATIONAL_lOAD_BY_SVGS
else:
try:
save_path = s3_image_save_path
image_s3_config = get_s3_config(save_path)
start_time = time.time() # 记录开始时间
# 先打印一下book_name和解析开始的时间
logger.info(
f"book_name is:{book_name},start_time is:{formatted_time(start_time)}",
file=sys.stderr,
)
pdf_info_dict = parse_pdf_by_txt(
pdf_bytes,
model_output_json_list,
save_path,
book_name,
pdf_model_profile=None,
image_s3_config=image_s3_config,
start_page_id=start_page_id,
junk_img_bojids=junk_img_bojids,
debug_mode=debug_mode,
)
if pdf_info_dict.get(
"_need_drop", False
): # 如果返回的字典里有need_drop,则提取drop_reason并跳过本次解析
jso["_need_drop"] = True
jso["_drop_reason"] = pdf_info_dict["_drop_reason"]
else: # 正常返回,将 pdf_info_dict 压缩并存储
pdf_info_dict = JsonCompressor.compress_json(pdf_info_dict)
jso["pdf_intermediate_dict"] = pdf_info_dict
end_time = time.time() # 记录完成时间
parse_time = int(end_time - start_time) # 计算执行时间
# 解析完成后打印一下book_name和耗时
logger.info(
f"book_name is:{book_name},end_time is:{formatted_time(end_time)},cost_time is:{parse_time}",
file=sys.stderr,
)
jso["parse_time"] = parse_time
except Exception as e:
jso = exception_handler(jso, e)
return jso
def parse_pdf_for_model_train(jso: dict, start_page_id=0, debug_mode=False) -> dict:
# 检测debug开关
if debug_mode:
pass
else: # 如果debug没开,则检测是否有needdrop字段
if jso.get("_need_drop", False):
return jso
# 开始正式逻辑
s3_pdf_path = jso.get("file_location")
s3_config = get_s3_config(s3_pdf_path)
model_output_json_list = jso.get("doc_layout_result")
data_source = get_data_source(jso)
file_id = jso.get("file_id")
book_name = f"{data_source}/{file_id}"
# 1.23.22已修复
# if debug_mode:
# pass
# else:
# if book_name == "zlib/zlib_21929367":
# jso['need_drop'] = True
# jso['drop_reason'] = DropReason.SPECIAL_PDF
# return jso
junk_img_bojids = jso["pdf_meta"]["junk_img_bojids"]
# total_page = jso['pdf_meta']['total_page']
# 增加检测 max_svgs 数量的检测逻辑,如果 max_svgs 超过3000则drop
svgs_per_page_list = jso["pdf_meta"]["svgs_per_page"]
max_svgs = max(svgs_per_page_list)
if max_svgs > 3000:
jso["_need_drop"] = True
jso["_drop_reason"] = DropReason.HIGH_COMPUTATIONAL_lOAD_BY_SVGS
# elif total_page > 1000:
# jso['need_drop'] = True
# jso['drop_reason'] = DropReason.HIGH_COMPUTATIONAL_lOAD_BY_TOTAL_PAGES
else:
try:
save_path = s3_image_save_path
image_s3_config = get_s3_config(save_path)
start_time = time.time() # 记录开始时间
# 先打印一下book_name和解析开始的时间
logger.info(
f"book_name is:{book_name},start_time is:{formatted_time(start_time)}",
file=sys.stderr,
)
pdf_info_dict = parse_pdf_for_train(
s3_pdf_path,
s3_config,
model_output_json_list,
save_path,
book_name,
pdf_model_profile=None,
image_s3_config=image_s3_config,
start_page_id=start_page_id,
junk_img_bojids=junk_img_bojids,
debug_mode=debug_mode,
)
if pdf_info_dict.get(
"_need_drop", False
): # 如果返回的字典里有need_drop,则提取drop_reason并跳过本次解析
jso["_need_drop"] = True
jso["_drop_reason"] = pdf_info_dict["_drop_reason"]
else: # 正常返回,将 pdf_info_dict 压缩并存储
jso["parsed_results"] = convert_to_train_format(pdf_info_dict)
pdf_info_dict = JsonCompressor.compress_json(pdf_info_dict)
jso["pdf_intermediate_dict"] = pdf_info_dict
end_time = time.time() # 记录完成时间
parse_time = int(end_time - start_time) # 计算执行时间
# 解析完成后打印一下book_name和耗时
logger.info(
f"book_name is:{book_name},end_time is:{formatted_time(end_time)},cost_time is:{parse_time}",
file=sys.stderr,
)
jso["parse_time"] = parse_time
except Exception as e:
jso = exception_handler(jso, e)
return jso
"""
统一处理逻辑
1.先调用parse_pdf对文本类pdf进行处理
2.再调用ocr_dropped_parse_pdf,对之前drop的pdf进行处理
"""
# def uni_parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
# jso = parse_pdf(jso, start_page_id=start_page_id, debug_mode=debug_mode)
# jso = ocr_dropped_parse_pdf(jso, start_page_id=start_page_id, debug_mode=debug_mode)
# return jso
if __name__ == "__main__":
pass
import sys
import time
from loguru import logger
from magic_pdf.dict2md.ocr_mkcontent import ocr_mk_mm_markdown, ocr_mk_nlp_markdown_with_para, \
ocr_mk_mm_markdown_with_para_and_pagination, ocr_mk_mm_markdown_with_para, ocr_mk_mm_standard_format, \
make_standard_format_with_para
from magic_pdf.libs.commons import join_path, s3_image_save_path, formatted_time
from magic_pdf.libs.json_compressor import JsonCompressor
from magic_pdf.pdf_parse_by_ocr import parse_pdf_by_ocr
from magic_pdf.spark.base import get_data_source, exception_handler, get_pdf_bytes, get_bookname
from magic_pdf.spark.s3 import get_s3_config
def ocr_pdf_intermediate_dict_to_markdown(jso: dict, debug_mode=False) -> dict:
if debug_mode:
pass
else: # 如果debug没开,则检测是否有needdrop字段
if jso.get("_need_drop", False):
book_name = join_path(get_data_source(jso), jso["file_id"])
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
jso["dropped"] = True
return jso
try:
pdf_intermediate_dict = jso["pdf_intermediate_dict"]
# 将 pdf_intermediate_dict 解压
pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
markdown_content = ocr_mk_mm_markdown(pdf_intermediate_dict)
jso["content"] = markdown_content
logger.info(
f"book_name is:{get_data_source(jso)}/{jso['file_id']},markdown content length is {len(markdown_content)}",
file=sys.stderr,
)
# 把无用的信息清空
jso["doc_layout_result"] = ""
jso["pdf_intermediate_dict"] = ""
jso["pdf_meta"] = ""
except Exception as e:
jso = exception_handler(jso, e)
return jso
def ocr_pdf_intermediate_dict_to_markdown_with_para(jso: dict, mode, debug_mode=False) -> dict:
if debug_mode:
pass
else: # 如果debug没开,则检测是否有needdrop字段
if jso.get("_need_drop", False):
book_name = join_path(get_data_source(jso), jso["file_id"])
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
jso["dropped"] = True
return jso
try:
pdf_intermediate_dict = jso["pdf_intermediate_dict"]
# 将 pdf_intermediate_dict 解压
pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
if mode == "mm":
markdown_content = ocr_mk_mm_markdown_with_para(pdf_intermediate_dict)
elif mode == "nlp":
markdown_content = ocr_mk_nlp_markdown_with_para(pdf_intermediate_dict)
jso["content"] = markdown_content
logger.info(
f"book_name is:{get_data_source(jso)}/{jso['file_id']},markdown content length is {len(markdown_content)}",
file=sys.stderr,
)
# 把无用的信息清空
jso["doc_layout_result"] = ""
jso["pdf_intermediate_dict"] = ""
jso["pdf_meta"] = ""
except Exception as e:
jso = exception_handler(jso, e)
return jso
def ocr_pdf_intermediate_dict_to_markdown_with_para_and_pagination(jso: dict, debug_mode=False) -> dict:
if debug_mode:
pass
else: # 如果debug没开,则检测是否有needdrop字段
if jso.get("_need_drop", False):
book_name = join_path(get_data_source(jso), jso["file_id"])
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
jso["dropped"] = True
return jso
try:
pdf_intermediate_dict = jso["pdf_intermediate_dict"]
# 将 pdf_intermediate_dict 解压
pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
markdown_content = ocr_mk_mm_markdown_with_para_and_pagination(pdf_intermediate_dict)
jso["content"] = markdown_content
logger.info(
f"book_name is:{get_data_source(jso)}/{jso['file_id']},markdown content length is {len(markdown_content)}",
file=sys.stderr,
)
# 把无用的信息清空
# jso["doc_layout_result"] = ""
jso["pdf_intermediate_dict"] = ""
# jso["pdf_meta"] = ""
except Exception as e:
jso = exception_handler(jso, e)
return jso
def ocr_pdf_intermediate_dict_to_markdown_with_para_for_qa(
jso: dict, debug_mode=False
) -> dict:
if debug_mode:
pass
else: # 如果debug没开,则检测是否有needdrop字段
if jso.get("_need_drop", False):
book_name = join_path(get_data_source(jso), jso["file_id"])
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
jso["dropped"] = True
return jso
try:
pdf_intermediate_dict = jso["pdf_intermediate_dict"]
# 将 pdf_intermediate_dict 解压
pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
markdown_content = ocr_mk_mm_markdown_with_para(pdf_intermediate_dict)
jso["content_ocr"] = markdown_content
logger.info(
f"book_name is:{get_data_source(jso)}/{jso['file_id']},markdown content length is {len(markdown_content)}",
file=sys.stderr,
)
# 把无用的信息清空
jso["doc_layout_result"] = ""
jso["pdf_intermediate_dict"] = ""
jso["mid_json_ocr"] = pdf_intermediate_dict
jso["pdf_meta"] = ""
except Exception as e:
jso = exception_handler(jso, e)
return jso
def ocr_pdf_intermediate_dict_to_standard_format(jso: dict, debug_mode=False) -> dict:
if debug_mode:
pass
else: # 如果debug没开,则检测是否有needdrop字段
if jso.get("_need_drop", False):
book_name = join_path(get_data_source(jso), jso["file_id"])
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
jso["dropped"] = True
return jso
try:
pdf_intermediate_dict = jso["pdf_intermediate_dict"]
# 将 pdf_intermediate_dict 解压
pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
standard_format = ocr_mk_mm_standard_format(pdf_intermediate_dict)
jso["content_list"] = standard_format
logger.info(
f"book_name is:{get_data_source(jso)}/{jso['file_id']},content_list length is {len(standard_format)}",
file=sys.stderr,
)
# 把无用的信息清空
jso["doc_layout_result"] = ""
jso["pdf_intermediate_dict"] = ""
jso["pdf_meta"] = ""
except Exception as e:
jso = exception_handler(jso, e)
return jso
def ocr_pdf_intermediate_dict_to_standard_format_with_para(jso: dict, debug_mode=False) -> dict:
if debug_mode:
pass
else: # 如果debug没开,则检测是否有needdrop字段
if jso.get("_need_drop", False):
book_name = join_path(get_data_source(jso), jso["file_id"])
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
jso["dropped"] = True
return jso
try:
pdf_intermediate_dict = jso["pdf_intermediate_dict"]
# 将 pdf_intermediate_dict 解压
pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
standard_format = make_standard_format_with_para(pdf_intermediate_dict)
jso["content_list"] = standard_format
logger.info(
f"book_name is:{get_data_source(jso)}/{jso['file_id']},content_list length is {len(standard_format)}",
file=sys.stderr,
)
# 把无用的信息清空
jso["doc_layout_result"] = ""
jso["pdf_intermediate_dict"] = ""
jso["pdf_meta"] = ""
except Exception as e:
jso = exception_handler(jso, e)
return jso
def ocr_parse_pdf_core(pdf_bytes, model_output_json_list, book_name, start_page_id=0, debug_mode=False):
save_path = s3_image_save_path
image_s3_config = get_s3_config(save_path)
start_time = time.time() # 记录开始时间
# 先打印一下book_name和解析开始的时间
logger.info(
f"book_name is:{book_name},start_time is:{formatted_time(start_time)}",
file=sys.stderr,
)
pdf_info_dict = parse_pdf_by_ocr(
pdf_bytes,
model_output_json_list,
save_path,
book_name,
pdf_model_profile=None,
image_s3_config=image_s3_config,
start_page_id=start_page_id,
debug_mode=debug_mode,
)
end_time = time.time() # 记录完成时间
parse_time = int(end_time - start_time) # 计算执行时间
# 解析完成后打印一下book_name和耗时
logger.info(
f"book_name is:{book_name},end_time is:{formatted_time(end_time)},cost_time is:{parse_time}",
file=sys.stderr,
)
return pdf_info_dict, parse_time
# 专门用来跑被drop的pdf,跑完之后需要把need_drop字段置为false
def ocr_dropped_parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
if not jso.get("_need_drop", False):
return jso
else:
try:
pdf_bytes = get_pdf_bytes(jso)
model_output_json_list = jso.get("doc_layout_result")
book_name = get_bookname(jso)
pdf_info_dict, parse_time = ocr_parse_pdf_core(
pdf_bytes, model_output_json_list, book_name, start_page_id=start_page_id, debug_mode=debug_mode
)
jso["pdf_intermediate_dict"] = JsonCompressor.compress_json(pdf_info_dict)
jso["parse_time"] = parse_time
jso["_need_drop"] = False
except Exception as e:
jso = exception_handler(jso, e)
return jso
def ocr_parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
# 检测debug开关
if debug_mode:
pass
else: # 如果debug没开,则检测是否有needdrop字段
if jso.get("_need_drop", False):
return jso
try:
pdf_bytes = get_pdf_bytes(jso)
model_output_json_list = jso.get("doc_layout_result")
book_name = get_bookname(jso)
pdf_info_dict, parse_time = ocr_parse_pdf_core(pdf_bytes, model_output_json_list, book_name,
start_page_id=start_page_id, debug_mode=debug_mode)
jso["pdf_intermediate_dict"] = JsonCompressor.compress_json(pdf_info_dict)
jso["parse_time"] = parse_time
except Exception as e:
jso = exception_handler(jso, e)
return jso
"""
文本型pdf转化为统一清洗格式
"""
# TODO 移动到spark/目录下
from loguru import logger
from magic_pdf.dict2md.mkcontent import mk_mm_markdown, mk_universal_format
from magic_pdf.libs.commons import join_path
from magic_pdf.libs.json_compressor import JsonCompressor
from magic_pdf.spark.base import exception_handler, get_data_source
def txt_pdf_to_standard_format(jso: dict, debug_mode=False) -> dict:
"""
变成统一的标准格式
"""
if debug_mode:
pass
else: # 如果debug没开,则检测是否有needdrop字段
if jso.get("_need_drop", False):
book_name = join_path(get_data_source(jso), jso["file_id"])
logger.info(f"book_name is:{book_name} need drop")
jso["dropped"] = True
return jso
try:
pdf_intermediate_dict = jso["pdf_intermediate_dict"]
# 将 pdf_intermediate_dict 解压
pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
standard_format = mk_universal_format(pdf_intermediate_dict)
jso["content_list"] = standard_format
logger.info(f"book_name is:{get_data_source(jso)}/{jso['file_id']},content_list length is {len(standard_format)}",)
# 把无用的信息清空
jso["doc_layout_result"] = ""
jso["pdf_intermediate_dict"] = ""
jso["pdf_meta"] = ""
except Exception as e:
jso = exception_handler(jso, e)
return jso
def txt_pdf_to_mm_markdown_format(jso: dict, debug_mode=False) -> dict:
"""
变成多模态的markdown格式
"""
if debug_mode:
pass
else: # 如果debug没开,则检测是否有needdrop字段
if jso.get("_need_drop", False):
book_name = join_path(get_data_source(jso), jso["file_id"])
logger.info(f"book_name is:{book_name} need drop")
jso["dropped"] = True
return jso
try:
pdf_intermediate_dict = jso["pdf_intermediate_dict"]
# 将 pdf_intermediate_dict 解压
pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
standard_format = mk_universal_format(pdf_intermediate_dict)
mm_content = mk_mm_markdown(standard_format)
jso["content"] = mm_content
logger.info(f"book_name is:{get_data_source(jso)}/{jso['file_id']},content_list length is {len(standard_format)}",)
# 把无用的信息清空
to_del_keys = ["doc_layout_result", "pdf_intermediate_dict", "pdf_meta", "parsed_result"]
for key in to_del_keys:
if jso.get(key):
del jso[key]
except Exception as e:
jso = exception_handler(jso, e)
return jso
\ No newline at end of file
from loguru import logger
from magic_pdf.libs.drop_reason import DropReason
import re
from magic_pdf.libs.config_reader import get_s3_config_dict
__re_s3_path = re.compile("^s3a?://([^/]+)(?:/(.*))?$")
def get_s3_config(path):
bucket_name = split_s3_path(path)[0] if path else ""
return get_s3_config_dict(bucket_name)
def split_s3_path(path: str):
"split bucket and key from path"
m = __re_s3_path.match(path)
if m is None:
return "", ""
return m.group(1), (m.group(2) or "")
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment