Commit 051ee3c3 authored by 赵小蒙's avatar 赵小蒙

增加标准格式的拼装逻辑

parent a0135640
......@@ -4,7 +4,7 @@ import os
from loguru import logger
from pathlib import Path
from magic_pdf.dict2md.ocr_mkcontent import mk_mm_markdown2, mk_nlp_markdown, mk_mm_markdown
from magic_pdf.dict2md.ocr_mkcontent import ocr_mk_mm_markdown_with_para, ocr_mk_nlp_markdown, ocr_mk_mm_markdown, ocr_mk_mm_standard_format
from magic_pdf.libs.commons import join_path
from magic_pdf.pdf_parse_by_ocr import parse_pdf_by_ocr
......@@ -35,8 +35,8 @@ if __name__ == '__main__':
# ocr_pdf_path = r"D:\project\20231108code-clean\ocr\new\双栏\j.1540-627x.2006.00176.x.pdf"
# ocr_json_file_path = r"D:\project\20231108code-clean\ocr\new\双栏\j.1540-627x.2006.00176.x.json"
ocr_pdf_path = r"/home/cxu/workspace/Magic-PDF/ocr_demo/j.1540-627x.2006.00176.x.pdf"
ocr_json_file_path = r"/home/cxu/workspace/Magic-PDF/ocr_demo/j.1540-627x.2006.00176.x.json"
# ocr_pdf_path = r"/home/cxu/workspace/Magic-PDF/ocr_demo/j.1540-627x.2006.00176.x.pdf"
# ocr_json_file_path = r"/home/cxu/workspace/Magic-PDF/ocr_demo/j.1540-627x.2006.00176.x.json"
try:
ocr_pdf_model_info = read_json_file(ocr_json_file_path)
pth = Path(ocr_json_file_path)
......@@ -58,11 +58,16 @@ if __name__ == '__main__':
os.makedirs(parent_dir)
# markdown_content = mk_nlp_markdown(pdf_info_dict)
markdown_content = mk_mm_markdown2(pdf_info_dict)
markdown_content = ocr_mk_mm_markdown_with_para(pdf_info_dict)
with open(text_content_save_path, "w", encoding="utf-8") as f:
f.write(markdown_content)
standard_format = ocr_mk_mm_standard_format(pdf_info_dict)
standard_format_save_path = f"{save_path_with_bookname}/standard_format.txt"
with open(standard_format_save_path, "w", encoding="utf-8") as f:
f.write(str(standard_format))
# logger.info(markdown_content)
# save_markdown(markdown_text, ocr_json_file_path)
except Exception as e:
......
......@@ -28,7 +28,6 @@ def ocr_mk_nlp_markdown(pdf_info_dict: dict):
def ocr_mk_mm_markdown(pdf_info_dict: dict):
markdown = []
for _, page_info in pdf_info_dict.items():
......@@ -56,7 +55,7 @@ def ocr_mk_mm_markdown(pdf_info_dict: dict):
return '\n'.join(markdown)
def mk_mm_markdown2(pdf_info_dict:dict):
def ocr_mk_mm_markdown_with_para(pdf_info_dict: dict):
markdown = []
for _, page_info in pdf_info_dict.items():
paras = page_info.get("para_blocks")
......@@ -80,10 +79,64 @@ def mk_mm_markdown2(pdf_info_dict:dict):
return '\n\n'.join(markdown)
def ocr_mk_mm_standard_format():
def line_to_standard_format(line):
line_text = ""
inline_equation_num = 0
for span in line['spans']:
if not span.get('content'):
if not span.get('image_path'):
continue
else:
if span['type'] == ContentType.Image:
content = {
'type': 'image',
'img_path': join_path(s3_image_save_path, span['image_path'])
}
return content
elif span['type'] == ContentType.Table:
content = {
'type': 'table',
'img_path': join_path(s3_image_save_path, span['image_path'])
}
return content
else:
if span['type'] == ContentType.InterlineEquation:
interline_equation = ocr_escape_special_markdown_char(span['content']) # 转义特殊符号
content = {
'type': 'equation',
'latex': f"$$\n{interline_equation}\n$$"
}
return content
elif span['type'] == ContentType.InlineEquation:
inline_equation = ocr_escape_special_markdown_char(span['content']) # 转义特殊符号
line_text += f"${inline_equation}$"
inline_equation_num += 1
elif span['type'] == ContentType.Text:
line_text += span['content']
content = {
'type': 'text',
'text': line_text,
'inline_equation_num': inline_equation_num
}
return content
def ocr_mk_mm_standard_format(pdf_info_dict: dict):
'''
content_list
type string image/text/table/equation(行间的单独拿出来,行内的和text合并)
type string image/text/table/equation(行间的单独拿出来,行内的和text合并)
latex string latex文本字段。
text string 纯文本格式的文本数据。
md string markdown格式的文本数据。
img_path string s3://full/path/to/img.jpg
'''
pass
\ No newline at end of file
content_list = []
for _, page_info in pdf_info_dict.items():
blocks = page_info.get("preproc_blocks")
if not blocks:
continue
for block in blocks:
for line in block['lines']:
content = line_to_standard_format(line)
content_list.append(content)
return content_list
......@@ -3,7 +3,7 @@ import sys
import time
from urllib.parse import quote
from magic_pdf.dict2md.ocr_mkcontent import ocr_mk_nlp_markdown, ocr_mk_mm_markdown
from magic_pdf.dict2md.ocr_mkcontent import ocr_mk_nlp_markdown, ocr_mk_mm_markdown, ocr_mk_mm_standard_format
from magic_pdf.libs.commons import read_file, join_path, parse_bucket_key, formatted_time, s3_image_save_path
from magic_pdf.libs.drop_reason import DropReason
from magic_pdf.libs.json_compressor import JsonCompressor
......@@ -387,5 +387,31 @@ def ocr_pdf_intermediate_dict_to_markdown(jso: dict, debug_mode=False) -> dict:
return jso
def ocr_pdf_intermediate_dict_to_standard_format(jso: dict, debug_mode=False) -> dict:
if debug_mode:
pass
else: # 如果debug没开,则检测是否有needdrop字段
if jso.get('need_drop', False):
book_name = join_path(get_data_source(jso), jso['file_id'])
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
jso["dropped"] = True
return jso
try:
pdf_intermediate_dict = jso['pdf_intermediate_dict']
# 将 pdf_intermediate_dict 解压
pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
standard_format = ocr_mk_mm_standard_format(pdf_intermediate_dict)
jso["content_list"] = standard_format
logger.info(f"book_name is:{get_data_source(jso)}/{jso['file_id']},content_list length is {len(standard_format)}", file=sys.stderr)
# 把无用的信息清空
jso["doc_layout_result"] = ""
jso["pdf_intermediate_dict"] = ""
jso["pdf_meta"] = ""
except Exception as e:
jso = exception_handler(jso, e)
return jso
if __name__ == "__main__":
pass
......@@ -11,5 +11,6 @@ pycld2>=0.41
regex>=2023.12.25
spacy>=3.7.4
termcolor>=2.4.0
scikit-learn>=1.4.1.post1
en_core_web_sm @ https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.7.1/en_core_web_sm-3.7.1-py3-none-any.whl
zh_core_web_sm @ https://github.com/explosion/spacy-models/releases/download/zh_core_web_sm-3.7.0/zh_core_web_sm-3.7.0-py3-none-any.whl
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment