Commit fe58649b authored by liukaiwen's avatar liukaiwen

Merge branch 'master' of github.com:papayalove/Magic-PDF

parents d876cbe8 e9843e15
...@@ -5,7 +5,13 @@ name: PDF ...@@ -5,7 +5,13 @@ name: PDF
on: on:
push: push:
branches: branches:
- master - "master"
paths-ignore:
- "cmds/**"
- "**.md"
pull_request:
branches:
- "master"
paths-ignore: paths-ignore:
- "cmds/**" - "cmds/**"
- "**.md" - "**.md"
...@@ -34,16 +40,26 @@ jobs: ...@@ -34,16 +40,26 @@ jobs:
pip install -r requirements.txt pip install -r requirements.txt
fi fi
- name: config-net-reset
- name: benchmark run: |
export http_proxy=""
export https_proxy=""
- name: get-benchmark-result
run: | run: |
echo "start test" echo "start test"
cd tools && python ocr_badcase.py pdf_json_label_0306.json ocr_dataset.json json_files.zip badcase.json overall.json base_data.json cd tools && python text_badcase.py pdf_json_label_0306.json pdf_json_label_0229.json json_files.zip text_badcase text_overall base_data_text.json --s3_bucket_name llm-process-pperf --s3_file_directory qa-validate/pdf-datasets/badcase --AWS_ACCESS_KEY 7X9CWNHIVOHH3LXRD5WK --AWS_SECRET_KEY IHLyTsv7h4ArzReLWUGZNKvwqB7CMrRi6e7ZyUt0 --END_POINT_URL http://p-ceph-norm-inside.pjlab.org.cn:80
python ocr_badcase.py pdf_json_label_0306.json ocr_dataset.json json_files.zip ocr_badcase ocr_overall base_data_ocr.json --s3_bucket_name llm-process-pperf --s3_file_directory qa-validate/pdf-datasets/badcase --AWS_ACCESS_KEY 7X9CWNHIVOHH3LXRD5WK --AWS_SECRET_KEY IHLyTsv7h4ArzReLWUGZNKvwqB7CMrRi6e7ZyUt0 --END_POINT_URL http://p-ceph-norm-inside.pjlab.org.cn:80
notify_to_feishu: notify_to_feishu:
if: ${{ always() && !cancelled() && contains(needs.*.result, 'failure') && (github.ref_name == 'master') }} if: ${{ always() && !cancelled() && contains(needs.*.result, 'failure') && (github.ref_name == 'master') }}
needs: [pdf-test] needs: [pdf-test]
runs-on: [pdf] runs-on: pdf
steps: steps:
- name: notify - name: notify
run: | run: |
curl -X POST -H "Content-Type: application/json" -d '{"msg_type":"post","content":{"post":{"zh_cn":{"title":"'${{ github.repository }}' GitHubAction Failed","content":[[{"tag":"text","text":""},{"tag":"a","text":"Please click here for details ","href":"https://github.com/'${{ github.repository }}'/actions/runs/'${GITHUB_RUN_ID}'"},{"tag":"at","user_id":"'${{ secrets.USER_ID }}'"}]]}}}}' ${{ secrets.WEBHOOK_URL }} curl ${{ secrets.WEBHOOK_URL }} -H 'Content-Type: application/json' -d '{
"msgtype": "text",
"text": {
"content": "'${{ github.repository }}' GitHubAction Failed!\n 细节请查看:https://github.com/'${{ github.repository }}'/actions/runs/'${GITHUB_RUN_ID}'"
}
}'
...@@ -115,8 +115,9 @@ def ocr_parse_pdf_core(pdf_bytes, model_output_json_list, book_name, start_page_ ...@@ -115,8 +115,9 @@ def ocr_parse_pdf_core(pdf_bytes, model_output_json_list, book_name, start_page_
if __name__ == '__main__': if __name__ == '__main__':
pdf_path = r"/home/cxu/workspace/Magic-PDF/ocr_demo/j.1540-627x.2006.00176.x.pdf" pdf_path = r"/home/cxu/workspace/Magic-PDF/ocr_demo/j.1540-627x.2006.00176.x.pdf"
json_file_path = r"/home/cxu/workspace/Magic-PDF/ocr_demo/j.1540-627x.2006.00176.x.json" json_file_path = r"/home/cxu/workspace/Magic-PDF/ocr_demo/j.1540-627x.2006.00176.x.json"
ocr_local_parse(pdf_path, json_file_path) # ocr_local_parse(pdf_path, json_file_path)
# book_name = "科数网/edu_00011318"
# ocr_online_parse(book_name) book_name = "数学新星网/edu_00001236"
ocr_online_parse(book_name)
pass pass
...@@ -67,9 +67,7 @@ def demo_classify_by_type(book_name=None, debug_mode=True): ...@@ -67,9 +67,7 @@ def demo_classify_by_type(book_name=None, debug_mode=True):
img_num_list = pdf_meta["imgs_per_page"] img_num_list = pdf_meta["imgs_per_page"]
text_len_list = pdf_meta["text_len_per_page"] text_len_list = pdf_meta["text_len_per_page"]
text_layout_list = pdf_meta["text_layout_per_page"] text_layout_list = pdf_meta["text_layout_per_page"]
pdf_path = json_object.get("file_location")
is_text_pdf, results = classify( is_text_pdf, results = classify(
pdf_path,
total_page, total_page,
page_width, page_width,
page_height, page_height,
...@@ -89,7 +87,7 @@ def demo_meta_scan(book_name=None, debug_mode=True): ...@@ -89,7 +87,7 @@ def demo_meta_scan(book_name=None, debug_mode=True):
s3_pdf_path = json_object.get("file_location") s3_pdf_path = json_object.get("file_location")
s3_config = get_s3_config_dict(s3_pdf_path) s3_config = get_s3_config_dict(s3_pdf_path)
pdf_bytes = read_file(s3_pdf_path, s3_config) pdf_bytes = read_file(s3_pdf_path, s3_config)
res = pdf_meta_scan(s3_pdf_path, pdf_bytes) res = pdf_meta_scan(pdf_bytes)
logger.info(json.dumps(res, ensure_ascii=False)) logger.info(json.dumps(res, ensure_ascii=False))
write_json_to_local(res, book_name) write_json_to_local(res, book_name)
......
...@@ -21,28 +21,122 @@ python magicpdf.py --json s3://llm-pdf-text/scihub/xxxx.json?bytes=0,81350 ...@@ -21,28 +21,122 @@ python magicpdf.py --json s3://llm-pdf-text/scihub/xxxx.json?bytes=0,81350
python magicpdf.py --pdf /home/llm/Downloads/xxxx.pdf --model /home/llm/Downloads/xxxx.json 或者 python magicpdf.py --pdf /home/llm/Downloads/xxxx.pdf python magicpdf.py --pdf /home/llm/Downloads/xxxx.pdf --model /home/llm/Downloads/xxxx.json 或者 python magicpdf.py --pdf /home/llm/Downloads/xxxx.pdf
""" """
import click
from magic_pdf.libs.config_reader import get_s3_config
from magic_pdf.libs.path_utils import (
parse_s3path,
parse_s3_range_params,
remove_non_official_s3_args,
)
from magic_pdf.libs.config_reader import get_local_dir
from magic_pdf.io.S3ReaderWriter import S3ReaderWriter, MODE_BIN
from magic_pdf.io.DiskReaderWriter import DiskReaderWriter
from magic_pdf.spark.spark_api import parse_union_pdf, parse_txt_pdf, parse_ocr_pdf
import os
import json as json_parse
from datetime import datetime
parse_pdf_methods = click.Choice(["ocr", "txt", "auto"])
def get_pdf_parse_method(method):
if method == "ocr":
return parse_ocr_pdf
elif method == "txt":
return parse_txt_pdf
return parse_union_pdf
def prepare_env():
local_parent_dir = os.path.join(
get_local_dir(), "magic-pdf", datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
)
local_image_dir = os.path.join(local_parent_dir, "images")
local_md_dir = os.path.join(local_parent_dir, "md")
os.makedirs(local_image_dir, exist_ok=True)
os.makedirs(local_md_dir, exist_ok=True)
return local_image_dir, local_md_dir
import click
@click.group() @click.group()
def cli(): def cli():
pass pass
@cli.command() @cli.command()
@click.option('--json', type=str, help='输入一个S3路径') @click.option("--json", type=str, help="输入一个S3路径")
def json_command(json): @click.option(
# 这里处理json相关的逻辑 "--method",
print(f'处理JSON: {json}') type=parse_pdf_methods,
help="指定解析方法。txt: 文本型 pdf 解析方法, ocr: 光学识别解析 pdf, auto: 程序智能选择解析方法",
default="auto",
)
def json_command(json, method):
if not json.startswith("s3://"):
print("usage: python magipdf.py --json s3://some_bucket/some_path")
os.exit(1)
def read_s3_path(s3path):
bucket, key = parse_s3path(s3path)
s3_ak, s3_sk, s3_endpoint = get_s3_config(bucket)
s3_rw = S3ReaderWriter(
s3_ak, s3_sk, s3_endpoint, "auto", remove_non_official_s3_args(s3path)
)
may_range_params = parse_s3_range_params(json)
if may_range_params is None or 2 != len(may_range_params):
byte_start, byte_end = 0, None
else:
byte_start, byte_end = int(may_range_params[0]), int(may_range_params[1])
return s3_rw.read_jsonl(
remove_non_official_s3_args(s3path), byte_start, byte_end, MODE_BIN
)
jso = json_parse.loads(read_s3_path(json).decode("utf-8"))
pdf_data = read_s3_path(jso["file_location"])
local_image_dir, _ = prepare_env()
local_image_rw = DiskReaderWriter(local_image_dir)
parse = get_pdf_parse_method(method)
parse(pdf_data, jso["doc_layout_result"], local_image_rw, is_debug=True)
@cli.command() @cli.command()
@click.option('--pdf', type=click.Path(exists=True), required=True, help='PDF文件的路径') @click.option(
@click.option('--model', type=click.Path(exists=True), help='模型的路径') "--pdf", type=click.Path(exists=True), required=True, help="PDF文件的路径"
def pdf_command(pdf, model): )
@click.option("--model", type=click.Path(exists=True), help="模型的路径")
@click.option(
"--method",
type=parse_pdf_methods,
help="指定解析方法。txt: 文本型 pdf 解析方法, ocr: 光学识别解析 pdf, auto: 程序智能选择解析方法",
default="auto",
)
def pdf_command(pdf, model, method):
# 这里处理pdf和模型相关的逻辑 # 这里处理pdf和模型相关的逻辑
print(f'处理PDF: {pdf}') if model is None:
print(f'加载模型: {model}') model = pdf.replace(".pdf", ".json")
if not os.path.exists(model):
print(f"make sure json file existed and place under {os.dirname(pdf)}")
os.eixt(1)
def read_fn(path):
disk_rw = DiskReaderWriter(os.path.dirname(path))
return disk_rw.read(os.path.basename(path), MODE_BIN)
pdf_data = read_fn(pdf)
jso = json_parse.loads(read_fn(model).decode("utf-8"))
local_image_dir, _ = prepare_env()
local_image_rw = DiskReaderWriter(local_image_dir)
parse = get_pdf_parse_method(method)
parse(pdf_data, jso["doc_layout_result"], local_image_rw, is_debug=True)
if __name__ == '__main__': if __name__ == "__main__":
"""
python magic_pdf/cli/magicpdf.py json-command --json s3://llm-pdf-text/pdf_ebook_and_paper/format/v070/part-66028dd46437-000076.jsonl?bytes=0,308393
"""
cli() cli()
...@@ -2,6 +2,7 @@ import math ...@@ -2,6 +2,7 @@ import math
from loguru import logger from loguru import logger
from magic_pdf.libs.boxbase import find_bottom_nearest_text_bbox, find_top_nearest_text_bbox from magic_pdf.libs.boxbase import find_bottom_nearest_text_bbox, find_top_nearest_text_bbox
from magic_pdf.libs.commons import join_path
from magic_pdf.libs.ocr_content_type import ContentType from magic_pdf.libs.ocr_content_type import ContentType
TYPE_INLINE_EQUATION = ContentType.InlineEquation TYPE_INLINE_EQUATION = ContentType.InlineEquation
...@@ -227,7 +228,7 @@ def __insert_before_para(text, type, element, content_list): ...@@ -227,7 +228,7 @@ def __insert_before_para(text, type, element, content_list):
logger.error(f"Can't find the location of image {element.get('image_path')} in the markdown file, search target is {text}") logger.error(f"Can't find the location of image {element.get('image_path')} in the markdown file, search target is {text}")
def mk_universal_format(para_dict: dict): def mk_universal_format(para_dict: dict, img_buket_path):
""" """
构造统一格式 https://aicarrier.feishu.cn/wiki/FqmMwcH69iIdCWkkyjvcDwNUnTY 构造统一格式 https://aicarrier.feishu.cn/wiki/FqmMwcH69iIdCWkkyjvcDwNUnTY
""" """
...@@ -249,7 +250,7 @@ def mk_universal_format(para_dict: dict): ...@@ -249,7 +250,7 @@ def mk_universal_format(para_dict: dict):
for img in all_page_images: for img in all_page_images:
content_node = { content_node = {
"type": "image", "type": "image",
"img_path": img['image_path'], "img_path": join_path(img_buket_path, img['image_path']),
"img_alt":"", "img_alt":"",
"img_title":"", "img_title":"",
"img_caption":"" "img_caption":""
...@@ -258,7 +259,7 @@ def mk_universal_format(para_dict: dict): ...@@ -258,7 +259,7 @@ def mk_universal_format(para_dict: dict):
for table in all_page_tables: for table in all_page_tables:
content_node = { content_node = {
"type": "table", "type": "table",
"img_path": table['image_path'], "img_path": join_path(img_buket_path, table['image_path']),
"table_latex": table.get("text"), "table_latex": table.get("text"),
"table_title": "", "table_title": "",
"table_caption": "", "table_caption": "",
......
from magic_pdf.libs.commons import s3_image_save_path, join_path from magic_pdf.libs.commons import join_path
from magic_pdf.libs.language import detect_lang from magic_pdf.libs.language import detect_lang
from magic_pdf.libs.markdown_utils import ocr_escape_special_markdown_char from magic_pdf.libs.markdown_utils import ocr_escape_special_markdown_char
from magic_pdf.libs.ocr_content_type import ContentType from magic_pdf.libs.ocr_content_type import ContentType
...@@ -56,7 +56,7 @@ def ocr_mk_mm_markdown(pdf_info_dict: dict): ...@@ -56,7 +56,7 @@ def ocr_mk_mm_markdown(pdf_info_dict: dict):
if not span.get('image_path'): if not span.get('image_path'):
continue continue
else: else:
content = f"![]({join_path(s3_image_save_path, span['image_path'])})" content = f"![]({span['image_path']})"
else: else:
content = ocr_escape_special_markdown_char(span['content']) # 转义特殊符号 content = ocr_escape_special_markdown_char(span['content']) # 转义特殊符号
if span['type'] == ContentType.InlineEquation: if span['type'] == ContentType.InlineEquation:
...@@ -123,7 +123,7 @@ def ocr_mk_markdown_with_para_core(paras_of_layout, mode): ...@@ -123,7 +123,7 @@ def ocr_mk_markdown_with_para_core(paras_of_layout, mode):
content = f"\n$$\n{span['content']}\n$$\n" content = f"\n$$\n{span['content']}\n$$\n"
elif span_type in [ContentType.Image, ContentType.Table]: elif span_type in [ContentType.Image, ContentType.Table]:
if mode == 'mm': if mode == 'mm':
content = f"\n![]({join_path(s3_image_save_path, span['image_path'])})\n" content = f"\n![]({span['image_path']})\n"
elif mode == 'nlp': elif mode == 'nlp':
pass pass
if content != '': if content != '':
...@@ -138,10 +138,10 @@ def ocr_mk_markdown_with_para_core(paras_of_layout, mode): ...@@ -138,10 +138,10 @@ def ocr_mk_markdown_with_para_core(paras_of_layout, mode):
return page_markdown return page_markdown
def para_to_standard_format(para): def para_to_standard_format(para, img_buket_path):
para_content = {} para_content = {}
if len(para) == 1: if len(para) == 1:
para_content = line_to_standard_format(para[0]) para_content = line_to_standard_format(para[0], img_buket_path)
elif len(para) > 1: elif len(para) > 1:
para_text = '' para_text = ''
inline_equation_num = 0 inline_equation_num = 0
...@@ -171,7 +171,7 @@ def para_to_standard_format(para): ...@@ -171,7 +171,7 @@ def para_to_standard_format(para):
} }
return para_content return para_content
def make_standard_format_with_para(pdf_info_dict: dict): def make_standard_format_with_para(pdf_info_dict: dict, img_buket_path: str):
content_list = [] content_list = []
for _, page_info in pdf_info_dict.items(): for _, page_info in pdf_info_dict.items():
paras_of_layout = page_info.get("para_blocks") paras_of_layout = page_info.get("para_blocks")
...@@ -179,12 +179,12 @@ def make_standard_format_with_para(pdf_info_dict: dict): ...@@ -179,12 +179,12 @@ def make_standard_format_with_para(pdf_info_dict: dict):
continue continue
for paras in paras_of_layout: for paras in paras_of_layout:
for para in paras: for para in paras:
para_content = para_to_standard_format(para) para_content = para_to_standard_format(para, img_buket_path)
content_list.append(para_content) content_list.append(para_content)
return content_list return content_list
def line_to_standard_format(line): def line_to_standard_format(line, img_buket_path):
line_text = "" line_text = ""
inline_equation_num = 0 inline_equation_num = 0
for span in line['spans']: for span in line['spans']:
...@@ -195,13 +195,13 @@ def line_to_standard_format(line): ...@@ -195,13 +195,13 @@ def line_to_standard_format(line):
if span['type'] == ContentType.Image: if span['type'] == ContentType.Image:
content = { content = {
'type': 'image', 'type': 'image',
'img_path': join_path(s3_image_save_path, span['image_path']) 'img_path': join_path(img_buket_path, span['image_path'])
} }
return content return content
elif span['type'] == ContentType.Table: elif span['type'] == ContentType.Table:
content = { content = {
'type': 'table', 'type': 'table',
'img_path': join_path(s3_image_save_path, span['image_path']) 'img_path': join_path(img_buket_path, span['image_path'])
} }
return content return content
else: else:
......
...@@ -15,6 +15,7 @@ from collections import Counter ...@@ -15,6 +15,7 @@ from collections import Counter
import click import click
import numpy as np import numpy as np
from loguru import logger
from magic_pdf.libs.commons import mymax, get_top_percent_list from magic_pdf.libs.commons import mymax, get_top_percent_list
from magic_pdf.filter.pdf_meta_scan import scan_max_page, junk_limit_min from magic_pdf.filter.pdf_meta_scan import scan_max_page, junk_limit_min
...@@ -298,7 +299,7 @@ def classify_by_img_narrow_strips(page_width, page_height, img_sz_list): ...@@ -298,7 +299,7 @@ def classify_by_img_narrow_strips(page_width, page_height, img_sz_list):
return narrow_strip_pages_ratio < 0.5 return narrow_strip_pages_ratio < 0.5
def classify(pdf_path, total_page: int, page_width, page_height, img_sz_list: list, text_len_list: list, img_num_list: list, text_layout_list: list): def classify(total_page: int, page_width, page_height, img_sz_list: list, text_len_list: list, img_num_list: list, text_layout_list: list):
""" """
这里的图片和页面长度单位是pts 这里的图片和页面长度单位是pts
:param total_page: :param total_page:
...@@ -323,7 +324,7 @@ def classify(pdf_path, total_page: int, page_width, page_height, img_sz_list: li ...@@ -323,7 +324,7 @@ def classify(pdf_path, total_page: int, page_width, page_height, img_sz_list: li
elif not any(results.values()): elif not any(results.values()):
return False, results return False, results
else: else:
print(f"WARNING: {pdf_path} is not classified by area and text_len, by_image_area: {results['by_image_area']}, by_text: {results['by_text_len']}, by_avg_words: {results['by_avg_words']}, by_img_num: {results['by_img_num']}, by_text_layout: {results['by_text_layout']}, by_img_narrow_strips: {results['by_img_narrow_strips']}", file=sys.stderr) # 利用这种情况可以快速找出来哪些pdf比较特殊,针对性修正分类算法 logger.warning(f"pdf is not classified by area and text_len, by_image_area: {results['by_image_area']}, by_text: {results['by_text_len']}, by_avg_words: {results['by_avg_words']}, by_img_num: {results['by_img_num']}, by_text_layout: {results['by_text_layout']}, by_img_narrow_strips: {results['by_img_narrow_strips']}", file=sys.stderr) # 利用这种情况可以快速找出来哪些pdf比较特殊,针对性修正分类算法
return False, results return False, results
...@@ -350,7 +351,7 @@ def main(json_file): ...@@ -350,7 +351,7 @@ def main(json_file):
is_needs_password = o['is_needs_password'] is_needs_password = o['is_needs_password']
if is_encrypted or total_page == 0 or is_needs_password: # 加密的,需要密码的,没有页面的,都不处理 if is_encrypted or total_page == 0 or is_needs_password: # 加密的,需要密码的,没有页面的,都不处理
continue continue
tag = classify(pdf_path, total_page, page_width, page_height, img_sz_list, text_len_list, text_layout_list) tag = classify(total_page, page_width, page_height, img_sz_list, text_len_list, text_layout_list)
o['is_text_pdf'] = tag o['is_text_pdf'] = tag
print(json.dumps(o, ensure_ascii=False)) print(json.dumps(o, ensure_ascii=False))
except Exception as e: except Exception as e:
......
...@@ -287,7 +287,7 @@ def get_language(doc: fitz.Document): ...@@ -287,7 +287,7 @@ def get_language(doc: fitz.Document):
return language return language
def pdf_meta_scan(s3_pdf_path: str, pdf_bytes: bytes): def pdf_meta_scan(pdf_bytes: bytes):
""" """
:param s3_pdf_path: :param s3_pdf_path:
:param pdf_bytes: pdf文件的二进制数据 :param pdf_bytes: pdf文件的二进制数据
...@@ -298,8 +298,8 @@ def pdf_meta_scan(s3_pdf_path: str, pdf_bytes: bytes): ...@@ -298,8 +298,8 @@ def pdf_meta_scan(s3_pdf_path: str, pdf_bytes: bytes):
is_encrypted = doc.is_encrypted is_encrypted = doc.is_encrypted
total_page = len(doc) total_page = len(doc)
if total_page == 0: if total_page == 0:
logger.warning(f"drop this pdf: {s3_pdf_path}, drop_reason: {DropReason.EMPTY_PDF}") logger.warning(f"drop this pdf, drop_reason: {DropReason.EMPTY_PDF}")
result = {"need_drop": True, "drop_reason": DropReason.EMPTY_PDF} result = {"_need_drop": True, "_drop_reason": DropReason.EMPTY_PDF}
return result return result
else: else:
page_width_pts, page_height_pts = get_pdf_page_size_pts(doc) page_width_pts, page_height_pts = get_pdf_page_size_pts(doc)
...@@ -322,7 +322,6 @@ def pdf_meta_scan(s3_pdf_path: str, pdf_bytes: bytes): ...@@ -322,7 +322,6 @@ def pdf_meta_scan(s3_pdf_path: str, pdf_bytes: bytes):
# 最后输出一条json # 最后输出一条json
res = { res = {
"pdf_path": s3_pdf_path,
"is_needs_password": is_needs_password, "is_needs_password": is_needs_password,
"is_encrypted": is_encrypted, "is_encrypted": is_encrypted,
"total_page": total_page, "total_page": total_page,
...@@ -350,7 +349,7 @@ def main(s3_pdf_path: str, s3_profile: str): ...@@ -350,7 +349,7 @@ def main(s3_pdf_path: str, s3_profile: str):
""" """
try: try:
file_content = read_file(s3_pdf_path, s3_profile) file_content = read_file(s3_pdf_path, s3_profile)
pdf_meta_scan(s3_pdf_path, file_content) pdf_meta_scan(file_content)
except Exception as e: except Exception as e:
print(f"ERROR: {s3_pdf_path}, {e}", file=sys.stderr) print(f"ERROR: {s3_pdf_path}, {e}", file=sys.stderr)
logger.exception(e) logger.exception(e)
......
...@@ -5,9 +5,11 @@ from loguru import logger ...@@ -5,9 +5,11 @@ from loguru import logger
MODE_TXT = "text" MODE_TXT = "text"
MODE_BIN = "binary" MODE_BIN = "binary"
class DiskReaderWriter(AbsReaderWriter): class DiskReaderWriter(AbsReaderWriter):
def __init__(self, parent_path, encoding='utf-8'): def __init__(self, parent_path, encoding="utf-8"):
self.path = parent_path self.path = parent_path
self.encoding = encoding self.encoding = encoding
...@@ -20,10 +22,10 @@ class DiskReaderWriter(AbsReaderWriter): ...@@ -20,10 +22,10 @@ class DiskReaderWriter(AbsReaderWriter):
logger.error(f"文件 {abspath} 不存在") logger.error(f"文件 {abspath} 不存在")
raise Exception(f"文件 {abspath} 不存在") raise Exception(f"文件 {abspath} 不存在")
if mode == MODE_TXT: if mode == MODE_TXT:
with open(abspath, 'r', encoding = self.encoding) as f: with open(abspath, "r", encoding=self.encoding) as f:
return f.read() return f.read()
elif mode == MODE_BIN: elif mode == MODE_BIN:
with open(abspath, 'rb') as f: with open(abspath, "rb") as f:
return f.read() return f.read()
else: else:
raise ValueError("Invalid mode. Use 'text' or 'binary'.") raise ValueError("Invalid mode. Use 'text' or 'binary'.")
...@@ -37,20 +39,21 @@ class DiskReaderWriter(AbsReaderWriter): ...@@ -37,20 +39,21 @@ class DiskReaderWriter(AbsReaderWriter):
directory_path = os.path.dirname(abspath) directory_path = os.path.dirname(abspath)
os.makedirs(directory_path) os.makedirs(directory_path)
if mode == MODE_TXT: if mode == MODE_TXT:
with open(abspath, 'w', encoding=self.encoding) as f: with open(abspath, "w", encoding=self.encoding) as f:
f.write(content) f.write(content)
logger.info(f"内容已成功写入 {abspath}") logger.info(f"内容已成功写入 {abspath}")
elif mode == MODE_BIN: elif mode == MODE_BIN:
with open(abspath, 'wb') as f: with open(abspath, "wb") as f:
f.write(content) f.write(content)
logger.info(f"内容已成功写入 {abspath}") logger.info(f"内容已成功写入 {abspath}")
else: else:
raise ValueError("Invalid mode. Use 'text' or 'binary'.") raise ValueError("Invalid mode. Use 'text' or 'binary'.")
def read_jsonl(self, path: str, byte_start=0, byte_end=None, encoding='utf-8'): def read_jsonl(self, path: str, byte_start=0, byte_end=None, encoding="utf-8"):
return self.read(path) return self.read(path)
# 使用示例 # 使用示例
if __name__ == "__main__": if __name__ == "__main__":
file_path = "io/test/example.txt" file_path = "io/test/example.txt"
...@@ -63,5 +66,3 @@ if __name__ == "__main__": ...@@ -63,5 +66,3 @@ if __name__ == "__main__":
content = drw.read(path=file_path) content = drw.read(path=file_path)
if content: if content:
logger.info(f"从 {file_path} 读取的内容: {content}") logger.info(f"从 {file_path} 读取的内容: {content}")
...@@ -24,7 +24,7 @@ error_log_path = "s3://llm-pdf-text/err_logs/" ...@@ -24,7 +24,7 @@ error_log_path = "s3://llm-pdf-text/err_logs/"
# json_dump_path = "s3://pdf_books_temp/json_dump/" # 这条路径仅用于临时本地测试,不能提交到main # json_dump_path = "s3://pdf_books_temp/json_dump/" # 这条路径仅用于临时本地测试,不能提交到main
json_dump_path = "s3://llm-pdf-text/json_dump/" json_dump_path = "s3://llm-pdf-text/json_dump/"
s3_image_save_path = "s3://mllm-raw-media/pdf2md_img/" # TODO 基础库不应该有这些存在的路径,应该在业务代码中定义 # s3_image_save_path = "s3://mllm-raw-media/pdf2md_img/" # 基础库不应该有这些存在的路径,应该在业务代码中定义
def get_top_percent_list(num_list, percent): def get_top_percent_list(num_list, percent):
...@@ -120,28 +120,8 @@ def read_file(pdf_path: str, s3_profile): ...@@ -120,28 +120,8 @@ def read_file(pdf_path: str, s3_profile):
return f.read() return f.read()
def get_docx_model_output(pdf_model_output, pdf_model_s3_profile, page_id): def get_docx_model_output(pdf_model_output, page_id):
if isinstance(pdf_model_output, str):
model_output_json_path = join_path(pdf_model_output, f"page_{page_id + 1}.json") # 模型输出的页面编号从1开始的
if os.path.exists(model_output_json_path):
json_from_docx = read_file(model_output_json_path, pdf_model_s3_profile)
model_output_json = json.loads(json_from_docx)
else:
try:
model_output_json_path = join_path(pdf_model_output, "model.json")
with open(model_output_json_path, "r", encoding="utf-8") as f:
model_output_json = json.load(f)
model_output_json = model_output_json["doc_layout_result"][page_id]
except:
s3_model_output_json_path = join_path(pdf_model_output, f"page_{page_id + 1}.json")
s3_model_output_json_path = join_path(pdf_model_output, f"{page_id}.json")
#s3_model_output_json_path = join_path(pdf_model_output, f"page_{page_id }.json")
# logger.warning(f"model_output_json_path: {model_output_json_path} not found. try to load from s3: {s3_model_output_json_path}")
s = read_file(s3_model_output_json_path, pdf_model_s3_profile)
return json.loads(s)
elif isinstance(pdf_model_output, list):
model_output_json = pdf_model_output[page_id] model_output_json = pdf_model_output[page_id]
return model_output_json return model_output_json
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
根据bucket的名字返回对应的s3 AK, SK,endpoint三元组 根据bucket的名字返回对应的s3 AK, SK,endpoint三元组
""" """
import json import json
import os import os
...@@ -10,11 +11,7 @@ from loguru import logger ...@@ -10,11 +11,7 @@ from loguru import logger
from magic_pdf.libs.commons import parse_bucket_key from magic_pdf.libs.commons import parse_bucket_key
def get_s3_config(bucket_name: str): def read_config():
"""
~/magic-pdf.json 读出来
"""
home_dir = os.path.expanduser("~") home_dir = os.path.expanduser("~")
config_file = os.path.join(home_dir, "magic-pdf.json") config_file = os.path.join(home_dir, "magic-pdf.json")
...@@ -24,6 +21,14 @@ def get_s3_config(bucket_name: str): ...@@ -24,6 +21,14 @@ def get_s3_config(bucket_name: str):
with open(config_file, "r") as f: with open(config_file, "r") as f:
config = json.load(f) config = json.load(f)
return config
def get_s3_config(bucket_name: str):
"""
~/magic-pdf.json 读出来
"""
config = read_config()
bucket_info = config.get("bucket_info") bucket_info = config.get("bucket_info")
if bucket_name not in bucket_info: if bucket_name not in bucket_info:
...@@ -49,5 +54,10 @@ def get_bucket_name(path): ...@@ -49,5 +54,10 @@ def get_bucket_name(path):
return bucket return bucket
if __name__ == '__main__': def get_local_dir():
config = read_config()
return config.get("temp-output-dir", "/tmp")
if __name__ == "__main__":
ak, sk, endpoint = get_s3_config("llm-raw") ak, sk, endpoint = get_s3_config("llm-raw")
from collections import Counter
from magic_pdf.libs.language import detect_lang
def get_language_from_model(model_list: list):
language_lst = []
for ocr_page_info in model_list:
page_text = ""
layout_dets = ocr_page_info["layout_dets"]
for layout_det in layout_dets:
category_id = layout_det["category_id"]
allow_category_id_list = [15]
if category_id in allow_category_id_list:
page_text += layout_det["text"]
page_language = detect_lang(page_text)
language_lst.append(page_language)
# 统计text_language_list中每种语言的个数
count_dict = Counter(language_lst)
# 输出text_language_list中出现的次数最多的语言
language = max(count_dict, key=count_dict.get)
return language
...@@ -8,7 +8,7 @@ class DropReason: ...@@ -8,7 +8,7 @@ class DropReason:
HIGH_COMPUTATIONAL_lOAD_BY_SVGS = "high_computational_load_by_svgs" # 特殊的SVG图,计算量太大,从而丢弃 HIGH_COMPUTATIONAL_lOAD_BY_SVGS = "high_computational_load_by_svgs" # 特殊的SVG图,计算量太大,从而丢弃
HIGH_COMPUTATIONAL_lOAD_BY_TOTAL_PAGES = "high_computational_load_by_total_pages" # 计算量超过负荷,当前方法下计算量消耗过大 HIGH_COMPUTATIONAL_lOAD_BY_TOTAL_PAGES = "high_computational_load_by_total_pages" # 计算量超过负荷,当前方法下计算量消耗过大
MISS_DOC_LAYOUT_RESULT = "missing doc_layout_result" # 版面分析失败 MISS_DOC_LAYOUT_RESULT = "missing doc_layout_result" # 版面分析失败
Exception = "exception" # 解析中发生异常 Exception = "_exception" # 解析中发生异常
ENCRYPTED = "encrypted" # PDF是加密的 ENCRYPTED = "encrypted" # PDF是加密的
EMPTY_PDF = "total_page=0" # PDF页面总数为0 EMPTY_PDF = "total_page=0" # PDF页面总数为0
NOT_IS_TEXT_PDF = "not_is_text_pdf" # 不是文字版PDF,无法直接解析 NOT_IS_TEXT_PDF = "not_is_text_pdf" # 不是文字版PDF,无法直接解析
......
import hashlib
def compute_md5(file_bytes):
hasher = hashlib.md5()
hasher.update(file_bytes)
return hasher.hexdigest().upper()
def compute_sha256(input_string):
hasher = hashlib.sha256()
# 在Python3中,需要将字符串转化为字节对象才能被哈希函数处理
input_bytes = input_string.encode('utf-8')
hasher.update(input_bytes)
return hasher.hexdigest()
from s3pathlib import S3Path
def remove_non_official_s3_args(s3path):
"""
example: s3://abc/xxxx.json?bytes=0,81350 ==> s3://abc/xxxx.json
"""
arr = s3path.split("?")
return arr[0]
def parse_s3path(s3path: str):
p = S3Path(remove_non_official_s3_args(s3path))
return p.bucket, p.key
def parse_s3_range_params(s3path: str):
"""
example: s3://abc/xxxx.json?bytes=0,81350 ==> [0, 81350]
"""
arr = s3path.split("?bytes=")
if len(arr) == 1:
return None
return arr[1].split(",")
import os
from pathlib import Path
from typing import Tuple
import io
# from app.common.s3 import get_s3_client
from magic_pdf.libs.commons import fitz from magic_pdf.libs.commons import fitz
from loguru import logger from loguru import logger
from magic_pdf.libs.commons import parse_bucket_key, join_path from magic_pdf.libs.commons import join_path
from magic_pdf.libs.hash_utils import compute_sha256
def cut_image(bbox: Tuple, page_num: int, page: fitz.Page, save_parent_path: str, s3_return_path=None, img_s3_client=None, upload_switch=True): def cut_image(bbox: tuple, page_num: int, page: fitz.Page, return_path, imageWriter):
""" """
从第page_num页的page中,根据bbox进行裁剪出一张jpg图片,返回图片路径 从第page_num页的page中,根据bbox进行裁剪出一张jpg图片,返回图片路径
save_path:需要同时支持s3和本地, 图片存放在save_path下,文件名是: {page_num}_{bbox[0]}_{bbox[1]}_{bbox[2]}_{bbox[3]}.jpg , bbox内数字取整。 save_path:需要同时支持s3和本地, 图片存放在save_path下,文件名是: {page_num}_{bbox[0]}_{bbox[1]}_{bbox[2]}_{bbox[3]}.jpg , bbox内数字取整。
""" """
# 拼接文件名 # 拼接文件名
filename = f"{page_num}_{int(bbox[0])}_{int(bbox[1])}_{int(bbox[2])}_{int(bbox[3])}.jpg" filename = f"{page_num}_{int(bbox[0])}_{int(bbox[1])}_{int(bbox[2])}_{int(bbox[3])}"
# 拼接路径
image_save_path = join_path(save_parent_path, filename)
s3_img_path = join_path(s3_return_path, filename) if s3_return_path is not None else None
# 打印图片文件名
# print(f"Saved {image_save_path}")
#检查坐标
# x_check = int(bbox[2]) - int(bbox[0])
# y_check = int(bbox[3]) - int(bbox[1])
# if x_check <= 0 or y_check <= 0:
#
# if image_save_path.startswith("s3://"):
# logger.exception(f"传入图片坐标有误,x1<x0或y1<y0,{s3_img_path}")
# return s3_img_path
# else:
# logger.exception(f"传入图片坐标有误,x1<x0或y1<y0,{image_save_path}")
# return image_save_path
# 老版本返回不带bucket的路径
img_path = join_path(return_path, filename) if return_path is not None else None
# 新版本生成平铺路径
img_hash256_path = f"{compute_sha256(img_path)}.jpg"
# 将坐标转换为fitz.Rect对象 # 将坐标转换为fitz.Rect对象
rect = fitz.Rect(*bbox) rect = fitz.Rect(*bbox)
...@@ -42,39 +26,17 @@ def cut_image(bbox: Tuple, page_num: int, page: fitz.Page, save_parent_path: str ...@@ -42,39 +26,17 @@ def cut_image(bbox: Tuple, page_num: int, page: fitz.Page, save_parent_path: str
# 截取图片 # 截取图片
pix = page.get_pixmap(clip=rect, matrix=zoom) pix = page.get_pixmap(clip=rect, matrix=zoom)
if image_save_path.startswith("s3://"):
if not upload_switch:
pass
else:
# 图片保存到s3
bucket_name, bucket_key = parse_bucket_key(image_save_path)
# 将字节流上传到s3
byte_data = pix.tobytes(output='jpeg', jpg_quality=95) byte_data = pix.tobytes(output='jpeg', jpg_quality=95)
file_obj = io.BytesIO(byte_data)
if img_s3_client is not None: imageWriter.write(data=byte_data, path=img_hash256_path, mode="binary")
img_s3_client.upload_fileobj(file_obj, bucket_name, bucket_key)
# 每个图片上传任务都创建一个新的client return img_hash256_path
# img_s3_client_once = get_s3_client(image_save_path)
# img_s3_client_once.upload_fileobj(file_obj, bucket_name, bucket_key)
else: def save_images_by_bboxes(page_num: int, page: fitz.Page, pdf_bytes_md5: str,
logger.exception("must input img_s3_client") image_bboxes: list, images_overlap_backup: list, table_bboxes: list,
return s3_img_path equation_inline_bboxes: list,
else: equation_interline_bboxes: list, imageWriter) -> dict:
# 保存图片到本地
# 先检查一下image_save_path的父目录是否存在,如果不存在,就创建
parent_dir = os.path.dirname(image_save_path)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
pix.save(image_save_path, jpg_quality=95)
# 为了直接能在markdown里看,这里把地址改为相对于mardown的地址
pth = Path(image_save_path)
image_save_path = f"{pth.parent.name}/{pth.name}"
return image_save_path
def save_images_by_bboxes(book_name: str, page_num: int, page: fitz.Page, save_path: str,
image_bboxes: list, images_overlap_backup:list, table_bboxes: list, equation_inline_bboxes: list,
equation_interline_bboxes: list, img_s3_client) -> dict:
""" """
返回一个dict, key为bbox, 值是图片地址 返回一个dict, key为bbox, 值是图片地址
""" """
...@@ -85,53 +47,30 @@ def save_images_by_bboxes(book_name: str, page_num: int, page: fitz.Page, save_p ...@@ -85,53 +47,30 @@ def save_images_by_bboxes(book_name: str, page_num: int, page: fitz.Page, save_p
interline_eq_info = [] interline_eq_info = []
# 图片的保存路径组成是这样的: {s3_or_local_path}/{book_name}/{images|tables|equations}/{page_num}_{bbox[0]}_{bbox[1]}_{bbox[2]}_{bbox[3]}.jpg # 图片的保存路径组成是这样的: {s3_or_local_path}/{book_name}/{images|tables|equations}/{page_num}_{bbox[0]}_{bbox[1]}_{bbox[2]}_{bbox[3]}.jpg
s3_return_image_path = join_path(book_name, "images")
image_save_path = join_path(save_path, s3_return_image_path)
s3_return_table_path = join_path(book_name, "tables")
table_save_path = join_path(save_path, s3_return_table_path)
s3_return_equations_inline_path = join_path(book_name, "equations_inline")
equation_inline_save_path = join_path(save_path, s3_return_equations_inline_path)
s3_return_equation_interline_path = join_path(book_name, "equation_interline")
equation_interline_save_path = join_path(save_path, s3_return_equation_interline_path)
def return_path(type):
return join_path(pdf_bytes_md5, type)
for bbox in image_bboxes: for bbox in image_bboxes:
if any([bbox[0]>=bbox[2], bbox[1]>=bbox[3]]): if any([bbox[0] >= bbox[2], bbox[1] >= bbox[3]]):
logger.warning(f"image_bboxes: 错误的box, {bbox}") logger.warning(f"image_bboxes: 错误的box, {bbox}")
continue continue
image_path = cut_image(bbox, page_num, page, image_save_path, s3_return_image_path, img_s3_client) image_path = cut_image(bbox, page_num, page, return_path("images"), imageWriter)
image_info.append({"bbox": bbox, "image_path": image_path}) image_info.append({"bbox": bbox, "image_path": image_path})
for bbox in images_overlap_backup: for bbox in images_overlap_backup:
if any([bbox[0]>=bbox[2], bbox[1]>=bbox[3]]): if any([bbox[0] >= bbox[2], bbox[1] >= bbox[3]]):
logger.warning(f"images_overlap_backup: 错误的box, {bbox}") logger.warning(f"images_overlap_backup: 错误的box, {bbox}")
continue continue
image_path = cut_image(bbox, page_num, page, image_save_path, s3_return_image_path, img_s3_client) image_path = cut_image(bbox, page_num, page, return_path("images"), imageWriter)
image_backup_info.append({"bbox": bbox, "image_path": image_path}) image_backup_info.append({"bbox": bbox, "image_path": image_path})
for bbox in table_bboxes: for bbox in table_bboxes:
if any([bbox[0]>=bbox[2], bbox[1]>=bbox[3]]): if any([bbox[0] >= bbox[2], bbox[1] >= bbox[3]]):
logger.warning(f"table_bboxes: 错误的box, {bbox}") logger.warning(f"table_bboxes: 错误的box, {bbox}")
continue continue
image_path = cut_image(bbox, page_num, page, table_save_path, s3_return_table_path, img_s3_client) image_path = cut_image(bbox, page_num, page, return_path("tables"), imageWriter)
table_info.append({"bbox": bbox, "image_path": image_path}) table_info.append({"bbox": bbox, "image_path": image_path})
for bbox in equation_inline_bboxes:
if any([bbox[0]>=bbox[2], bbox[1]>=bbox[3]]):
logger.warning(f"equation_inline_bboxes: 错误的box, {bbox}")
continue
image_path = cut_image(bbox[:4], page_num, page, equation_inline_save_path, s3_return_equations_inline_path, img_s3_client, upload_switch=False)
inline_eq_info.append({'bbox':bbox[:4], "image_path":image_path, "latex_text":bbox[4]})
for bbox in equation_interline_bboxes:
if any([bbox[0]>=bbox[2], bbox[1]>=bbox[3]]):
logger.warning(f"equation_interline_bboxes: 错误的box, {bbox}")
continue
image_path = cut_image(bbox[:4], page_num, page, equation_interline_save_path, s3_return_equation_interline_path, img_s3_client, upload_switch=False)
interline_eq_info.append({"bbox":bbox[:4], "image_path":image_path, "latex_text":bbox[4]})
return image_info, image_backup_info, table_info, inline_eq_info, interline_eq_info return image_info, image_backup_info, table_info, inline_eq_info, interline_eq_info
\ No newline at end of file
import json
import os
import time import time
from loguru import logger from loguru import logger
from magic_pdf.libs.draw_bbox import draw_layout_bbox, draw_text_bbox
from magic_pdf.libs.commons import ( from magic_pdf.libs.commons import (
read_file,
join_path,
fitz, fitz,
get_img_s3_client,
get_delta_time, get_delta_time,
get_docx_model_output, get_docx_model_output,
) )
from magic_pdf.libs.coordinate_transform import get_scale_ratio from magic_pdf.libs.coordinate_transform import get_scale_ratio
from magic_pdf.libs.drop_tag import DropTag from magic_pdf.libs.drop_tag import DropTag
from magic_pdf.libs.hash_utils import compute_md5
from magic_pdf.libs.ocr_content_type import ContentType from magic_pdf.libs.ocr_content_type import ContentType
from magic_pdf.libs.safe_filename import sanitize_filename
from magic_pdf.para.para_split import para_split from magic_pdf.para.para_split import para_split
from magic_pdf.pre_proc.construct_page_dict import ocr_construct_page_component from magic_pdf.pre_proc.construct_page_dict import ocr_construct_page_component
from magic_pdf.pre_proc.detect_footer_by_model import parse_footers from magic_pdf.pre_proc.detect_footer_by_model import parse_footers
...@@ -38,38 +30,16 @@ from magic_pdf.pre_proc.remove_bbox_overlap import remove_overlap_between_bbox ...@@ -38,38 +30,16 @@ from magic_pdf.pre_proc.remove_bbox_overlap import remove_overlap_between_bbox
def parse_pdf_by_ocr( def parse_pdf_by_ocr(
pdf_bytes, pdf_bytes,
pdf_model_output, pdf_model_output,
save_path, imageWriter,
book_name,
pdf_model_profile=None,
image_s3_config=None,
start_page_id=0, start_page_id=0,
end_page_id=None, end_page_id=None,
debug_mode=False, debug_mode=False,
): ):
pdf_bytes_md5 = compute_md5(pdf_bytes)
save_tmp_path = os.path.join(os.path.dirname(__file__), "../..", "tmp", "unittest")
book_name = sanitize_filename(book_name)
md_bookname_save_path = ""
if debug_mode:
save_path = join_path(save_tmp_path, "md")
pdf_local_path = join_path(save_tmp_path, "download-pdfs", book_name)
if not os.path.exists(os.path.dirname(pdf_local_path)):
# 如果目录不存在,创建它
os.makedirs(os.path.dirname(pdf_local_path))
md_bookname_save_path = join_path(save_tmp_path, "md", book_name)
if not os.path.exists(md_bookname_save_path):
# 如果目录不存在,创建它
os.makedirs(md_bookname_save_path)
with open(pdf_local_path + ".pdf", "wb") as pdf_file:
pdf_file.write(pdf_bytes)
pdf_docs = fitz.open("pdf", pdf_bytes) pdf_docs = fitz.open("pdf", pdf_bytes)
# 初始化空的pdf_info_dict # 初始化空的pdf_info_dict
pdf_info_dict = {} pdf_info_dict = {}
img_s3_client = get_img_s3_client(save_path, image_s3_config)
start_time = time.time() start_time = time.time()
...@@ -91,16 +61,14 @@ def parse_pdf_by_ocr( ...@@ -91,16 +61,14 @@ def parse_pdf_by_ocr(
# 获取当前页的模型数据 # 获取当前页的模型数据
ocr_page_info = get_docx_model_output( ocr_page_info = get_docx_model_output(
pdf_model_output, pdf_model_profile, page_id pdf_model_output, page_id
) )
"""从json中获取每页的页码、页眉、页脚的bbox""" """从json中获取每页的页码、页眉、页脚的bbox"""
page_no_bboxes = parse_pageNos(page_id, page, ocr_page_info) page_no_bboxes = parse_pageNos(page_id, page, ocr_page_info)
header_bboxes = parse_headers(page_id, page, ocr_page_info) header_bboxes = parse_headers(page_id, page, ocr_page_info)
footer_bboxes = parse_footers(page_id, page, ocr_page_info) footer_bboxes = parse_footers(page_id, page, ocr_page_info)
footnote_bboxes = parse_footnotes_by_model( footnote_bboxes = parse_footnotes_by_model(page_id, page, ocr_page_info, debug_mode=debug_mode)
page_id, page, ocr_page_info, md_bookname_save_path, debug_mode=debug_mode
)
# 构建需要remove的bbox字典 # 构建需要remove的bbox字典
need_remove_spans_bboxes_dict = { need_remove_spans_bboxes_dict = {
...@@ -179,7 +147,7 @@ def parse_pdf_by_ocr( ...@@ -179,7 +147,7 @@ def parse_pdf_by_ocr(
spans, dropped_spans_by_removed_bboxes = remove_spans_by_bboxes_dict(spans, need_remove_spans_bboxes_dict) spans, dropped_spans_by_removed_bboxes = remove_spans_by_bboxes_dict(spans, need_remove_spans_bboxes_dict)
'''对image和table截图''' '''对image和table截图'''
spans = cut_image_and_table(spans, page, page_id, book_name, save_path, img_s3_client) spans = cut_image_and_table(spans, page, page_id, pdf_bytes_md5, imageWriter)
'''行内公式调整, 高度调整至与同行文字高度一致(优先左侧, 其次右侧)''' '''行内公式调整, 高度调整至与同行文字高度一致(优先左侧, 其次右侧)'''
displayed_list = [] displayed_list = []
...@@ -242,16 +210,4 @@ def parse_pdf_by_ocr( ...@@ -242,16 +210,4 @@ def parse_pdf_by_ocr(
"""分段""" """分段"""
para_split(pdf_info_dict, debug_mode=debug_mode) para_split(pdf_info_dict, debug_mode=debug_mode)
'''在测试时,保存调试信息'''
if debug_mode:
params_file_save_path = join_path(
save_tmp_path, "md", book_name, "preproc_out.json"
)
with open(params_file_save_path, "w", encoding="utf-8") as f:
json.dump(pdf_info_dict, f, ensure_ascii=False, indent=4)
# drow_bbox
draw_layout_bbox(pdf_info_dict, pdf_bytes, md_bookname_save_path)
draw_text_bbox(pdf_info_dict, pdf_bytes, md_bookname_save_path)
return pdf_info_dict return pdf_info_dict
This diff is collapsed.
...@@ -112,7 +112,6 @@ def parse_pdf_for_train( ...@@ -112,7 +112,6 @@ def parse_pdf_for_train(
pdf_model_output, pdf_model_output,
save_path, save_path,
book_name, book_name,
pdf_model_profile=None,
image_s3_config=None, image_s3_config=None,
start_page_id=0, start_page_id=0,
end_page_id=None, end_page_id=None,
...@@ -184,8 +183,8 @@ def parse_pdf_for_train( ...@@ -184,8 +183,8 @@ def parse_pdf_for_train(
f"page_id: {page_id}, img_counts: {img_counts}, drop this pdf: {book_name}, drop_reason: {DropReason.HIGH_COMPUTATIONAL_lOAD_BY_IMGS}" f"page_id: {page_id}, img_counts: {img_counts}, drop this pdf: {book_name}, drop_reason: {DropReason.HIGH_COMPUTATIONAL_lOAD_BY_IMGS}"
) )
result = { result = {
"need_drop": True, "_need_drop": True,
"drop_reason": DropReason.HIGH_COMPUTATIONAL_lOAD_BY_IMGS, "_drop_reason": DropReason.HIGH_COMPUTATIONAL_lOAD_BY_IMGS,
} }
if not debug_mode: if not debug_mode:
return result return result
...@@ -200,7 +199,7 @@ def parse_pdf_for_train( ...@@ -200,7 +199,7 @@ def parse_pdf_for_train(
flags=fitz.TEXTFLAGS_TEXT, flags=fitz.TEXTFLAGS_TEXT,
)["blocks"] )["blocks"]
model_output_json = get_docx_model_output( model_output_json = get_docx_model_output(
pdf_model_output, pdf_model_profile, page_id pdf_model_output, page_id
) )
# 解析图片 # 解析图片
...@@ -397,8 +396,8 @@ def parse_pdf_for_train( ...@@ -397,8 +396,8 @@ def parse_pdf_for_train(
f"page_id: {page_id}, drop this pdf: {book_name}, reason: {DropReason.TEXT_BLCOK_HOR_OVERLAP}" f"page_id: {page_id}, drop this pdf: {book_name}, reason: {DropReason.TEXT_BLCOK_HOR_OVERLAP}"
) )
result = { result = {
"need_drop": True, "_need_drop": True,
"drop_reason": DropReason.TEXT_BLCOK_HOR_OVERLAP, "_drop_reason": DropReason.TEXT_BLCOK_HOR_OVERLAP,
} }
if not debug_mode: if not debug_mode:
return result return result
...@@ -444,8 +443,8 @@ def parse_pdf_for_train( ...@@ -444,8 +443,8 @@ def parse_pdf_for_train(
f"page_id: {page_id}, drop this pdf: {book_name}, reason: {DropReason.CAN_NOT_DETECT_PAGE_LAYOUT}" f"page_id: {page_id}, drop this pdf: {book_name}, reason: {DropReason.CAN_NOT_DETECT_PAGE_LAYOUT}"
) )
result = { result = {
"need_drop": True, "_need_drop": True,
"drop_reason": DropReason.CAN_NOT_DETECT_PAGE_LAYOUT, "_drop_reason": DropReason.CAN_NOT_DETECT_PAGE_LAYOUT,
} }
if not debug_mode: if not debug_mode:
return result return result
...@@ -457,7 +456,7 @@ def parse_pdf_for_train( ...@@ -457,7 +456,7 @@ def parse_pdf_for_train(
logger.warning( logger.warning(
f"page_id: {page_id}, drop this pdf: {book_name}, reason: {DropReason.COMPLICATED_LAYOUT}" f"page_id: {page_id}, drop this pdf: {book_name}, reason: {DropReason.COMPLICATED_LAYOUT}"
) )
result = {"need_drop": True, "drop_reason": DropReason.COMPLICATED_LAYOUT} result = {"_need_drop": True, "_drop_reason": DropReason.COMPLICATED_LAYOUT}
if not debug_mode: if not debug_mode:
return result return result
...@@ -467,8 +466,8 @@ def parse_pdf_for_train( ...@@ -467,8 +466,8 @@ def parse_pdf_for_train(
f"page_id: {page_id}, drop this pdf: {book_name}, reason: {DropReason.TOO_MANY_LAYOUT_COLUMNS}" f"page_id: {page_id}, drop this pdf: {book_name}, reason: {DropReason.TOO_MANY_LAYOUT_COLUMNS}"
) )
result = { result = {
"need_drop": True, "_need_drop": True,
"drop_reason": DropReason.TOO_MANY_LAYOUT_COLUMNS, "_drop_reason": DropReason.TOO_MANY_LAYOUT_COLUMNS,
"extra_info": {"column_cnt": layout_column_width}, "extra_info": {"column_cnt": layout_column_width},
} }
if not debug_mode: if not debug_mode:
...@@ -617,8 +616,8 @@ def parse_pdf_for_train( ...@@ -617,8 +616,8 @@ def parse_pdf_for_train(
f"Drop this pdf: {book_name}, reason: {DropReason.DENSE_SINGLE_LINE_BLOCK}" f"Drop this pdf: {book_name}, reason: {DropReason.DENSE_SINGLE_LINE_BLOCK}"
) )
result = { result = {
"need_drop": True, "_need_drop": True,
"drop_reason": DropReason.DENSE_SINGLE_LINE_BLOCK, "_drop_reason": DropReason.DENSE_SINGLE_LINE_BLOCK,
} }
return result return result
if error_info == titleDetectionException_msg: if error_info == titleDetectionException_msg:
...@@ -626,27 +625,27 @@ def parse_pdf_for_train( ...@@ -626,27 +625,27 @@ def parse_pdf_for_train(
f"Drop this pdf: {book_name}, reason: {DropReason.TITLE_DETECTION_FAILED}" f"Drop this pdf: {book_name}, reason: {DropReason.TITLE_DETECTION_FAILED}"
) )
result = { result = {
"need_drop": True, "_need_drop": True,
"drop_reason": DropReason.TITLE_DETECTION_FAILED, "_drop_reason": DropReason.TITLE_DETECTION_FAILED,
} }
return result return result
elif error_info == titleLevelException_msg: elif error_info == titleLevelException_msg:
logger.warning( logger.warning(
f"Drop this pdf: {book_name}, reason: {DropReason.TITLE_LEVEL_FAILED}" f"Drop this pdf: {book_name}, reason: {DropReason.TITLE_LEVEL_FAILED}"
) )
result = {"need_drop": True, "drop_reason": DropReason.TITLE_LEVEL_FAILED} result = {"_need_drop": True, "_drop_reason": DropReason.TITLE_LEVEL_FAILED}
return result return result
elif error_info == paraSplitException_msg: elif error_info == paraSplitException_msg:
logger.warning( logger.warning(
f"Drop this pdf: {book_name}, reason: {DropReason.PARA_SPLIT_FAILED}" f"Drop this pdf: {book_name}, reason: {DropReason.PARA_SPLIT_FAILED}"
) )
result = {"need_drop": True, "drop_reason": DropReason.PARA_SPLIT_FAILED} result = {"_need_drop": True, "_drop_reason": DropReason.PARA_SPLIT_FAILED}
return result return result
elif error_info == paraMergeException_msg: elif error_info == paraMergeException_msg:
logger.warning( logger.warning(
f"Drop this pdf: {book_name}, reason: {DropReason.PARA_MERGE_FAILED}" f"Drop this pdf: {book_name}, reason: {DropReason.PARA_MERGE_FAILED}"
) )
result = {"need_drop": True, "drop_reason": DropReason.PARA_MERGE_FAILED} result = {"_need_drop": True, "_drop_reason": DropReason.PARA_MERGE_FAILED}
return result return result
if debug_mode: if debug_mode:
......
...@@ -32,8 +32,8 @@ def meta_scan(jso: dict, doc_layout_check=True) -> dict: ...@@ -32,8 +32,8 @@ def meta_scan(jso: dict, doc_layout_check=True) -> dict:
if ( if (
"doc_layout_result" not in jso "doc_layout_result" not in jso
): # 检测json中是存在模型数据,如果没有则需要跳过该pdf ): # 检测json中是存在模型数据,如果没有则需要跳过该pdf
jso["need_drop"] = True jso["_need_drop"] = True
jso["drop_reason"] = DropReason.MISS_DOC_LAYOUT_RESULT jso["_drop_reason"] = DropReason.MISS_DOC_LAYOUT_RESULT
return jso return jso
try: try:
data_source = get_data_source(jso) data_source = get_data_source(jso)
...@@ -58,10 +58,10 @@ def meta_scan(jso: dict, doc_layout_check=True) -> dict: ...@@ -58,10 +58,10 @@ def meta_scan(jso: dict, doc_layout_check=True) -> dict:
start_time = time.time() # 记录开始时间 start_time = time.time() # 记录开始时间
res = pdf_meta_scan(s3_pdf_path, file_content) res = pdf_meta_scan(s3_pdf_path, file_content)
if res.get( if res.get(
"need_drop", False "_need_drop", False
): # 如果返回的字典里有need_drop,则提取drop_reason并跳过本次解析 ): # 如果返回的字典里有need_drop,则提取drop_reason并跳过本次解析
jso["need_drop"] = True jso["_need_drop"] = True
jso["drop_reason"] = res["drop_reason"] jso["_drop_reason"] = res["_drop_reason"]
else: # 正常返回 else: # 正常返回
jso["pdf_meta"] = res jso["pdf_meta"] = res
jso["content"] = "" jso["content"] = ""
...@@ -85,7 +85,7 @@ def classify_by_type(jso: dict, debug_mode=False) -> dict: ...@@ -85,7 +85,7 @@ def classify_by_type(jso: dict, debug_mode=False) -> dict:
if debug_mode: if debug_mode:
pass pass
else: # 如果debug没开,则检测是否有needdrop字段 else: # 如果debug没开,则检测是否有needdrop字段
if jso.get("need_drop", False): if jso.get("_need_drop", False):
return jso return jso
# 开始正式逻辑 # 开始正式逻辑
try: try:
...@@ -113,8 +113,8 @@ def classify_by_type(jso: dict, debug_mode=False) -> dict: ...@@ -113,8 +113,8 @@ def classify_by_type(jso: dict, debug_mode=False) -> dict:
if ( if (
is_encrypted or is_needs_password is_encrypted or is_needs_password
): # 加密的,需要密码的,没有页面的,都不处理 ): # 加密的,需要密码的,没有页面的,都不处理
jso["need_drop"] = True jso["_need_drop"] = True
jso["drop_reason"] = DropReason.ENCRYPTED jso["_drop_reason"] = DropReason.ENCRYPTED
else: else:
start_time = time.time() # 记录开始时间 start_time = time.time() # 记录开始时间
is_text_pdf, results = classify( is_text_pdf, results = classify(
...@@ -139,8 +139,8 @@ def classify_by_type(jso: dict, debug_mode=False) -> dict: ...@@ -139,8 +139,8 @@ def classify_by_type(jso: dict, debug_mode=False) -> dict:
if ( if (
text_language not in allow_language text_language not in allow_language
): # 如果语言不在允许的语言中,则drop ): # 如果语言不在允许的语言中,则drop
jso["need_drop"] = True jso["_need_drop"] = True
jso["drop_reason"] = DropReason.NOT_ALLOW_LANGUAGE jso["_drop_reason"] = DropReason.NOT_ALLOW_LANGUAGE
return jso return jso
else: else:
# 先不drop # 先不drop
...@@ -148,8 +148,8 @@ def classify_by_type(jso: dict, debug_mode=False) -> dict: ...@@ -148,8 +148,8 @@ def classify_by_type(jso: dict, debug_mode=False) -> dict:
jso["_pdf_type"] = "OCR" jso["_pdf_type"] = "OCR"
jso["pdf_meta"] = pdf_meta jso["pdf_meta"] = pdf_meta
jso["classify_time"] = classify_time jso["classify_time"] = classify_time
# jso["need_drop"] = True # jso["_need_drop"] = True
# jso["drop_reason"] = DropReason.NOT_IS_TEXT_PDF # jso["_drop_reason"] = DropReason.NOT_IS_TEXT_PDF
extra_info = {"classify_rules": []} extra_info = {"classify_rules": []}
for condition, result in results.items(): for condition, result in results.items():
if not result: if not result:
...@@ -162,7 +162,7 @@ def classify_by_type(jso: dict, debug_mode=False) -> dict: ...@@ -162,7 +162,7 @@ def classify_by_type(jso: dict, debug_mode=False) -> dict:
def drop_needdrop_pdf(jso: dict) -> dict: def drop_needdrop_pdf(jso: dict) -> dict:
if jso.get("need_drop", False): if jso.get("_need_drop", False):
logger.info( logger.info(
f"book_name is:{get_data_source(jso)}/{jso['file_id']} need drop", f"book_name is:{get_data_source(jso)}/{jso['file_id']} need drop",
file=sys.stderr, file=sys.stderr,
...@@ -176,7 +176,7 @@ def pdf_intermediate_dict_to_markdown(jso: dict, debug_mode=False) -> dict: ...@@ -176,7 +176,7 @@ def pdf_intermediate_dict_to_markdown(jso: dict, debug_mode=False) -> dict:
if debug_mode: if debug_mode:
pass pass
else: # 如果debug没开,则检测是否有needdrop字段 else: # 如果debug没开,则检测是否有needdrop字段
if jso.get("need_drop", False): if jso.get("_need_drop", False):
book_name = join_path(get_data_source(jso), jso["file_id"]) book_name = join_path(get_data_source(jso), jso["file_id"])
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr) logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
jso["dropped"] = True jso["dropped"] = True
...@@ -203,7 +203,7 @@ def parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict: ...@@ -203,7 +203,7 @@ def parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
if debug_mode: if debug_mode:
pass pass
else: # 如果debug没开,则检测是否有needdrop字段 else: # 如果debug没开,则检测是否有needdrop字段
if jso.get("need_drop", False): if jso.get("_need_drop", False):
return jso return jso
# 开始正式逻辑 # 开始正式逻辑
s3_pdf_path = jso.get("file_location") s3_pdf_path = jso.get("file_location")
...@@ -220,8 +220,8 @@ def parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict: ...@@ -220,8 +220,8 @@ def parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
svgs_per_page_list = jso["pdf_meta"]["svgs_per_page"] svgs_per_page_list = jso["pdf_meta"]["svgs_per_page"]
max_svgs = max(svgs_per_page_list) max_svgs = max(svgs_per_page_list)
if max_svgs > 3000: if max_svgs > 3000:
jso["need_drop"] = True jso["_need_drop"] = True
jso["drop_reason"] = DropReason.HIGH_COMPUTATIONAL_lOAD_BY_SVGS jso["_drop_reason"] = DropReason.HIGH_COMPUTATIONAL_lOAD_BY_SVGS
else: else:
try: try:
save_path = s3_image_save_path save_path = s3_image_save_path
...@@ -244,10 +244,10 @@ def parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict: ...@@ -244,10 +244,10 @@ def parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
debug_mode=debug_mode, debug_mode=debug_mode,
) )
if pdf_info_dict.get( if pdf_info_dict.get(
"need_drop", False "_need_drop", False
): # 如果返回的字典里有need_drop,则提取drop_reason并跳过本次解析 ): # 如果返回的字典里有need_drop,则提取drop_reason并跳过本次解析
jso["need_drop"] = True jso["_need_drop"] = True
jso["drop_reason"] = pdf_info_dict["drop_reason"] jso["_drop_reason"] = pdf_info_dict["_drop_reason"]
else: # 正常返回,将 pdf_info_dict 压缩并存储 else: # 正常返回,将 pdf_info_dict 压缩并存储
pdf_info_dict = JsonCompressor.compress_json(pdf_info_dict) pdf_info_dict = JsonCompressor.compress_json(pdf_info_dict)
jso["pdf_intermediate_dict"] = pdf_info_dict jso["pdf_intermediate_dict"] = pdf_info_dict
...@@ -269,7 +269,7 @@ def parse_pdf_for_model_train(jso: dict, start_page_id=0, debug_mode=False) -> d ...@@ -269,7 +269,7 @@ def parse_pdf_for_model_train(jso: dict, start_page_id=0, debug_mode=False) -> d
if debug_mode: if debug_mode:
pass pass
else: # 如果debug没开,则检测是否有needdrop字段 else: # 如果debug没开,则检测是否有needdrop字段
if jso.get("need_drop", False): if jso.get("_need_drop", False):
return jso return jso
# 开始正式逻辑 # 开始正式逻辑
s3_pdf_path = jso.get("file_location") s3_pdf_path = jso.get("file_location")
...@@ -295,8 +295,8 @@ def parse_pdf_for_model_train(jso: dict, start_page_id=0, debug_mode=False) -> d ...@@ -295,8 +295,8 @@ def parse_pdf_for_model_train(jso: dict, start_page_id=0, debug_mode=False) -> d
svgs_per_page_list = jso["pdf_meta"]["svgs_per_page"] svgs_per_page_list = jso["pdf_meta"]["svgs_per_page"]
max_svgs = max(svgs_per_page_list) max_svgs = max(svgs_per_page_list)
if max_svgs > 3000: if max_svgs > 3000:
jso["need_drop"] = True jso["_need_drop"] = True
jso["drop_reason"] = DropReason.HIGH_COMPUTATIONAL_lOAD_BY_SVGS jso["_drop_reason"] = DropReason.HIGH_COMPUTATIONAL_lOAD_BY_SVGS
# elif total_page > 1000: # elif total_page > 1000:
# jso['need_drop'] = True # jso['need_drop'] = True
# jso['drop_reason'] = DropReason.HIGH_COMPUTATIONAL_lOAD_BY_TOTAL_PAGES # jso['drop_reason'] = DropReason.HIGH_COMPUTATIONAL_lOAD_BY_TOTAL_PAGES
...@@ -323,10 +323,10 @@ def parse_pdf_for_model_train(jso: dict, start_page_id=0, debug_mode=False) -> d ...@@ -323,10 +323,10 @@ def parse_pdf_for_model_train(jso: dict, start_page_id=0, debug_mode=False) -> d
debug_mode=debug_mode, debug_mode=debug_mode,
) )
if pdf_info_dict.get( if pdf_info_dict.get(
"need_drop", False "_need_drop", False
): # 如果返回的字典里有need_drop,则提取drop_reason并跳过本次解析 ): # 如果返回的字典里有need_drop,则提取drop_reason并跳过本次解析
jso["need_drop"] = True jso["_need_drop"] = True
jso["drop_reason"] = pdf_info_dict["drop_reason"] jso["_drop_reason"] = pdf_info_dict["_drop_reason"]
else: # 正常返回,将 pdf_info_dict 压缩并存储 else: # 正常返回,将 pdf_info_dict 压缩并存储
jso["parsed_results"] = convert_to_train_format(pdf_info_dict) jso["parsed_results"] = convert_to_train_format(pdf_info_dict)
pdf_info_dict = JsonCompressor.compress_json(pdf_info_dict) pdf_info_dict = JsonCompressor.compress_json(pdf_info_dict)
......
...@@ -17,7 +17,7 @@ def ocr_pdf_intermediate_dict_to_markdown(jso: dict, debug_mode=False) -> dict: ...@@ -17,7 +17,7 @@ def ocr_pdf_intermediate_dict_to_markdown(jso: dict, debug_mode=False) -> dict:
if debug_mode: if debug_mode:
pass pass
else: # 如果debug没开,则检测是否有needdrop字段 else: # 如果debug没开,则检测是否有needdrop字段
if jso.get("need_drop", False): if jso.get("_need_drop", False):
book_name = join_path(get_data_source(jso), jso["file_id"]) book_name = join_path(get_data_source(jso), jso["file_id"])
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr) logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
jso["dropped"] = True jso["dropped"] = True
...@@ -45,7 +45,7 @@ def ocr_pdf_intermediate_dict_to_markdown_with_para(jso: dict, mode, debug_mode= ...@@ -45,7 +45,7 @@ def ocr_pdf_intermediate_dict_to_markdown_with_para(jso: dict, mode, debug_mode=
if debug_mode: if debug_mode:
pass pass
else: # 如果debug没开,则检测是否有needdrop字段 else: # 如果debug没开,则检测是否有needdrop字段
if jso.get("need_drop", False): if jso.get("_need_drop", False):
book_name = join_path(get_data_source(jso), jso["file_id"]) book_name = join_path(get_data_source(jso), jso["file_id"])
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr) logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
jso["dropped"] = True jso["dropped"] = True
...@@ -78,7 +78,7 @@ def ocr_pdf_intermediate_dict_to_markdown_with_para_and_pagination(jso: dict, de ...@@ -78,7 +78,7 @@ def ocr_pdf_intermediate_dict_to_markdown_with_para_and_pagination(jso: dict, de
if debug_mode: if debug_mode:
pass pass
else: # 如果debug没开,则检测是否有needdrop字段 else: # 如果debug没开,则检测是否有needdrop字段
if jso.get("need_drop", False): if jso.get("_need_drop", False):
book_name = join_path(get_data_source(jso), jso["file_id"]) book_name = join_path(get_data_source(jso), jso["file_id"])
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr) logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
jso["dropped"] = True jso["dropped"] = True
...@@ -108,7 +108,7 @@ def ocr_pdf_intermediate_dict_to_markdown_with_para_for_qa( ...@@ -108,7 +108,7 @@ def ocr_pdf_intermediate_dict_to_markdown_with_para_for_qa(
if debug_mode: if debug_mode:
pass pass
else: # 如果debug没开,则检测是否有needdrop字段 else: # 如果debug没开,则检测是否有needdrop字段
if jso.get("need_drop", False): if jso.get("_need_drop", False):
book_name = join_path(get_data_source(jso), jso["file_id"]) book_name = join_path(get_data_source(jso), jso["file_id"])
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr) logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
jso["dropped"] = True jso["dropped"] = True
...@@ -137,7 +137,7 @@ def ocr_pdf_intermediate_dict_to_standard_format(jso: dict, debug_mode=False) -> ...@@ -137,7 +137,7 @@ def ocr_pdf_intermediate_dict_to_standard_format(jso: dict, debug_mode=False) ->
if debug_mode: if debug_mode:
pass pass
else: # 如果debug没开,则检测是否有needdrop字段 else: # 如果debug没开,则检测是否有needdrop字段
if jso.get("need_drop", False): if jso.get("_need_drop", False):
book_name = join_path(get_data_source(jso), jso["file_id"]) book_name = join_path(get_data_source(jso), jso["file_id"])
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr) logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
jso["dropped"] = True jso["dropped"] = True
...@@ -165,7 +165,7 @@ def ocr_pdf_intermediate_dict_to_standard_format_with_para(jso: dict, debug_mode ...@@ -165,7 +165,7 @@ def ocr_pdf_intermediate_dict_to_standard_format_with_para(jso: dict, debug_mode
if debug_mode: if debug_mode:
pass pass
else: # 如果debug没开,则检测是否有needdrop字段 else: # 如果debug没开,则检测是否有needdrop字段
if jso.get("need_drop", False): if jso.get("_need_drop", False):
book_name = join_path(get_data_source(jso), jso["file_id"]) book_name = join_path(get_data_source(jso), jso["file_id"])
logger.info(f"book_name is:{book_name} need drop", file=sys.stderr) logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
jso["dropped"] = True jso["dropped"] = True
...@@ -221,7 +221,7 @@ def ocr_parse_pdf_core(pdf_bytes, model_output_json_list, book_name, start_page_ ...@@ -221,7 +221,7 @@ def ocr_parse_pdf_core(pdf_bytes, model_output_json_list, book_name, start_page_
# 专门用来跑被drop的pdf,跑完之后需要把need_drop字段置为false # 专门用来跑被drop的pdf,跑完之后需要把need_drop字段置为false
def ocr_dropped_parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict: def ocr_dropped_parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
if not jso.get("need_drop", False): if not jso.get("_need_drop", False):
return jso return jso
else: else:
try: try:
...@@ -233,7 +233,7 @@ def ocr_dropped_parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict: ...@@ -233,7 +233,7 @@ def ocr_dropped_parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
) )
jso["pdf_intermediate_dict"] = JsonCompressor.compress_json(pdf_info_dict) jso["pdf_intermediate_dict"] = JsonCompressor.compress_json(pdf_info_dict)
jso["parse_time"] = parse_time jso["parse_time"] = parse_time
jso["need_drop"] = False jso["_need_drop"] = False
except Exception as e: except Exception as e:
jso = exception_handler(jso, e) jso = exception_handler(jso, e)
return jso return jso
...@@ -244,7 +244,7 @@ def ocr_parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict: ...@@ -244,7 +244,7 @@ def ocr_parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
if debug_mode: if debug_mode:
pass pass
else: # 如果debug没开,则检测是否有needdrop字段 else: # 如果debug没开,则检测是否有needdrop字段
if jso.get("need_drop", False): if jso.get("_need_drop", False):
return jso return jso
try: try:
pdf_bytes = get_pdf_bytes(jso) pdf_bytes = get_pdf_bytes(jso)
......
...@@ -18,7 +18,7 @@ def txt_pdf_to_standard_format(jso: dict, debug_mode=False) -> dict: ...@@ -18,7 +18,7 @@ def txt_pdf_to_standard_format(jso: dict, debug_mode=False) -> dict:
if debug_mode: if debug_mode:
pass pass
else: # 如果debug没开,则检测是否有needdrop字段 else: # 如果debug没开,则检测是否有needdrop字段
if jso.get("need_drop", False): if jso.get("_need_drop", False):
book_name = join_path(get_data_source(jso), jso["file_id"]) book_name = join_path(get_data_source(jso), jso["file_id"])
logger.info(f"book_name is:{book_name} need drop") logger.info(f"book_name is:{book_name} need drop")
jso["dropped"] = True jso["dropped"] = True
...@@ -46,7 +46,7 @@ def txt_pdf_to_mm_markdown_format(jso: dict, debug_mode=False) -> dict: ...@@ -46,7 +46,7 @@ def txt_pdf_to_mm_markdown_format(jso: dict, debug_mode=False) -> dict:
if debug_mode: if debug_mode:
pass pass
else: # 如果debug没开,则检测是否有needdrop字段 else: # 如果debug没开,则检测是否有needdrop字段
if jso.get("need_drop", False): if jso.get("_need_drop", False):
book_name = join_path(get_data_source(jso), jso["file_id"]) book_name = join_path(get_data_source(jso), jso["file_id"])
logger.info(f"book_name is:{book_name} need drop") logger.info(f"book_name is:{book_name} need drop")
jso["dropped"] = True jso["dropped"] = True
......
...@@ -62,6 +62,6 @@ def pdf_post_filter(page_info) -> tuple: ...@@ -62,6 +62,6 @@ def pdf_post_filter(page_info) -> tuple:
""" """
bool_is_pseudo_single_column, extra_info = __is_pseudo_single_column(page_info) bool_is_pseudo_single_column, extra_info = __is_pseudo_single_column(page_info)
if bool_is_pseudo_single_column: if bool_is_pseudo_single_column:
return False, {"need_drop": True, "drop_reason": DropReason.PSEUDO_SINGLE_COLUMN, "extra_info": extra_info} return False, {"_need_drop": True, "_drop_reason": DropReason.PSEUDO_SINGLE_COLUMN, "extra_info": extra_info}
return True, None return True, None
\ No newline at end of file
...@@ -3,7 +3,7 @@ from magic_pdf.libs.commons import fitz # pyMuPDF库 ...@@ -3,7 +3,7 @@ from magic_pdf.libs.commons import fitz # pyMuPDF库
from magic_pdf.libs.coordinate_transform import get_scale_ratio from magic_pdf.libs.coordinate_transform import get_scale_ratio
def parse_footnotes_by_model(page_ID: int, page: fitz.Page, json_from_DocXchain_obj: dict, md_bookname_save_path, debug_mode=False): def parse_footnotes_by_model(page_ID: int, page: fitz.Page, json_from_DocXchain_obj: dict, md_bookname_save_path=None, debug_mode=False):
""" """
:param page_ID: int类型,当前page在当前pdf文档中是第page_D页。 :param page_ID: int类型,当前page在当前pdf文档中是第page_D页。
:param page :fitz读取的当前页的内容 :param page :fitz读取的当前页的内容
......
...@@ -3,18 +3,16 @@ from magic_pdf.libs.ocr_content_type import ContentType ...@@ -3,18 +3,16 @@ from magic_pdf.libs.ocr_content_type import ContentType
from magic_pdf.libs.pdf_image_tools import cut_image from magic_pdf.libs.pdf_image_tools import cut_image
def cut_image_and_table(spans, page, page_id, book_name, save_path, img_s3_client): def cut_image_and_table(spans, page, page_id, pdf_bytes_md5, imageWriter):
def s3_return_path(type):
return join_path(book_name, type)
def img_save_path(type): def return_path(type):
return join_path(save_path, s3_return_path(type)) return join_path(pdf_bytes_md5, type)
for span in spans: for span in spans:
span_type = span['type'] span_type = span['type']
if span_type == ContentType.Image: if span_type == ContentType.Image:
span['image_path'] = cut_image(span['bbox'], page_id, page, img_save_path('images'), s3_return_path=s3_return_path('images'), img_s3_client=img_s3_client) span['image_path'] = cut_image(span['bbox'], page_id, page, return_path=return_path('images'), imageWriter=imageWriter)
elif span_type == ContentType.Table: elif span_type == ContentType.Table:
span['image_path'] = cut_image(span['bbox'], page_id, page, img_save_path('tables'), s3_return_path=s3_return_path('tables'), img_s3_client=img_s3_client) span['image_path'] = cut_image(span['bbox'], page_id, page, return_path=return_path('tables'), imageWriter=imageWriter)
return spans return spans
...@@ -68,7 +68,7 @@ def pdf_filter(page:fitz.Page, text_blocks, table_bboxes, image_bboxes) -> tuple ...@@ -68,7 +68,7 @@ def pdf_filter(page:fitz.Page, text_blocks, table_bboxes, image_bboxes) -> tuple
""" """
if __is_contain_color_background_rect(page, text_blocks, image_bboxes): if __is_contain_color_background_rect(page, text_blocks, image_bboxes):
return False, {"need_drop": True, "drop_reason": DropReason.COLOR_BACKGROUND_TEXT_BOX} return False, {"_need_drop": True, "_drop_reason": DropReason.COLOR_BACKGROUND_TEXT_BOX}
return True, None return True, None
\ No newline at end of file
from loguru import logger
from magic_pdf.dict2md.mkcontent import mk_universal_format
from magic_pdf.dict2md.ocr_mkcontent import make_standard_format_with_para
from magic_pdf.filter.pdf_classify_by_type import classify
from magic_pdf.filter.pdf_meta_scan import pdf_meta_scan
from magic_pdf.libs.detect_language_from_model import get_language_from_model
from magic_pdf.libs.drop_reason import DropReason
from magic_pdf.libs.json_compressor import JsonCompressor
from magic_pdf.spark.spark_api import parse_union_pdf, parse_ocr_pdf
class UNIPipe:
def __init__(self):
pass
def classify(self, pdf_bytes: bytes) -> str:
"""
根据pdf的元数据,判断是否是文本pdf,还是ocr pdf
"""
pdf_meta = pdf_meta_scan(pdf_bytes)
if pdf_meta.get("_need_drop", False): # 如果返回了需要丢弃的标志,则抛出异常
raise Exception(f"pdf meta_scan need_drop,reason is {pdf_meta['_drop_reason']}")
else:
is_encrypted = pdf_meta["is_encrypted"]
is_needs_password = pdf_meta["is_needs_password"]
if is_encrypted or is_needs_password: # 加密的,需要密码的,没有页面的,都不处理
raise Exception(f"pdf meta_scan need_drop,reason is {DropReason.ENCRYPTED}")
else:
is_text_pdf, results = classify(
pdf_meta["total_page"],
pdf_meta["page_width_pts"],
pdf_meta["page_height_pts"],
pdf_meta["image_info_per_page"],
pdf_meta["text_len_per_page"],
pdf_meta["imgs_per_page"],
pdf_meta["text_layout_per_page"],
)
if is_text_pdf:
return "txt"
else:
return "ocr"
def parse(self, pdf_bytes: bytes, image_writer, jso_useful_key) -> dict:
"""
根据pdf类型,解析pdf
"""
text_language = get_language_from_model(jso_useful_key['model_list'])
allow_language = ["zh", "en"] # 允许的语言,目前只允许简中和英文的
logger.info(f"pdf text_language is {text_language}")
if text_language not in allow_language: # 如果语言不在允许的语言中,则drop
raise Exception(f"pdf meta_scan need_drop,reason is {DropReason.NOT_ALLOW_LANGUAGE}")
else:
if jso_useful_key['_pdf_type'] == "txt":
pdf_mid_data = parse_union_pdf(pdf_bytes, jso_useful_key['model_list'], image_writer)
elif jso_useful_key['_pdf_type'] == "ocr":
pdf_mid_data = parse_ocr_pdf(pdf_bytes, jso_useful_key['model_list'], image_writer)
else:
raise Exception(f"pdf type is not txt or ocr")
return JsonCompressor.compress(pdf_mid_data)
def mk_uni_format(self, pdf_mid_data: str, img_buket_path: str) -> list:
"""
根据pdf类型,生成统一格式content_list
"""
pdf_mid_data = JsonCompressor.decompress_json(pdf_mid_data)
parse_type = pdf_mid_data["_parse_type"]
if parse_type == "txt":
content_list = mk_universal_format(pdf_mid_data, img_buket_path)
elif parse_type == "ocr":
content_list = make_standard_format_with_para(pdf_mid_data, img_buket_path)
return content_list
if __name__ == '__main__':
# 测试
pipe = UNIPipe()
pdf_bytes = open(r"D:\project\20231108code-clean\magic_pdf\tmp\unittest\download-pdfs\数学新星网\edu_00001544.pdf",
"rb").read()
pdf_type = pipe.classify(pdf_bytes)
logger.info(f"pdf_type is {pdf_type}")
...@@ -26,9 +26,9 @@ def get_bookid(jso: dict): ...@@ -26,9 +26,9 @@ def get_bookid(jso: dict):
def exception_handler(jso: dict, e): def exception_handler(jso: dict, e):
logger.exception(e) logger.exception(e)
jso["need_drop"] = True jso["_need_drop"] = True
jso["drop_reason"] = DropReason.Exception jso["_drop_reason"] = DropReason.Exception
jso["exception"] = f"ERROR: {e}" jso["_exception"] = f"ERROR: {e}"
return jso return jso
......
...@@ -12,27 +12,86 @@ ...@@ -12,27 +12,86 @@
其余部分至于构造s3cli, 获取ak,sk都在code-clean里写代码完成。不要反向依赖!!! 其余部分至于构造s3cli, 获取ak,sk都在code-clean里写代码完成。不要反向依赖!!!
""" """
from loguru import logger
from magic_pdf.io import AbsReaderWriter from magic_pdf.io import AbsReaderWriter
from magic_pdf.pdf_parse_by_ocr import parse_pdf_by_ocr
from magic_pdf.pdf_parse_by_txt import parse_pdf_by_txt
def parse_txt_pdf(pdf_bytes:bytes, pdf_models:list, imageWriter: AbsReaderWriter, is_debug=False, start_page=0, *args, **kwargs): def parse_txt_pdf(pdf_bytes:bytes, pdf_models:list, imageWriter: AbsReaderWriter, is_debug=False, start_page=0, *args, **kwargs):
""" """
解析文本类pdf 解析文本类pdf
""" """
pass pdf_info_dict = parse_pdf_by_txt(
pdf_bytes,
pdf_models,
imageWriter,
start_page_id=start_page,
debug_mode=is_debug,
)
pdf_info_dict["parse_type"] = "txt"
return pdf_info_dict
def parse_ocr_pdf(pdf_bytes:bytes, pdf_models:list, imageWriter: AbsReaderWriter, is_debug=False, start_page=0, *args, **kwargs): def parse_ocr_pdf(pdf_bytes:bytes, pdf_models:list, imageWriter: AbsReaderWriter, is_debug=False, start_page=0, *args, **kwargs):
""" """
解析ocr类pdf 解析ocr类pdf
""" """
pass pdf_info_dict = parse_pdf_by_ocr(
pdf_bytes,
pdf_models,
imageWriter,
start_page_id=start_page,
debug_mode=is_debug,
)
pdf_info_dict["_parse_type"] = "ocr"
return pdf_info_dict
def parse_union_pdf(pdf_bytes:bytes, pdf_models:list, imageWriter: AbsReaderWriter, is_debug=False, start_page=0, *args, **kwargs): def parse_union_pdf(pdf_bytes:bytes, pdf_models:list, imageWriter: AbsReaderWriter, is_debug=False, start_page=0, *args, **kwargs):
""" """
ocr和文本混合的pdf,全部解析出来 ocr和文本混合的pdf,全部解析出来
""" """
pass def parse_pdf(method):
\ No newline at end of file try:
return method(
pdf_bytes,
pdf_models,
imageWriter,
start_page_id=start_page,
debug_mode=is_debug,
)
except Exception as e:
logger.error(f"{method.__name__} error: {e}")
return None
pdf_info_dict = parse_pdf(parse_pdf_by_txt)
if pdf_info_dict is None or pdf_info_dict.get("_need_drop", False):
logger.warning(f"parse_pdf_by_txt drop or error, switch to parse_pdf_by_ocr")
pdf_info_dict = parse_pdf(parse_pdf_by_ocr)
if pdf_info_dict is None:
raise Exception("Both parse_pdf_by_txt and parse_pdf_by_ocr failed.")
else:
pdf_info_dict["_parse_type"] = "ocr"
else:
pdf_info_dict["_parse_type"] = "txt"
return pdf_info_dict
def spark_json_extractor(jso: dict) -> dict:
"""
从json中提取数据,返回一个dict
"""
return {
"_pdf_type": jso["_pdf_type"],
"model_list": jso["doc_layout_result"],
}
...@@ -16,3 +16,5 @@ en_core_web_sm @ https://github.com/explosion/spacy-models/releases/download/en_ ...@@ -16,3 +16,5 @@ en_core_web_sm @ https://github.com/explosion/spacy-models/releases/download/en_
zh_core_web_sm @ https://github.com/explosion/spacy-models/releases/download/zh_core_web_sm-3.7.0/zh_core_web_sm-3.7.0-py3-none-any.whl zh_core_web_sm @ https://github.com/explosion/spacy-models/releases/download/zh_core_web_sm-3.7.0/zh_core_web_sm-3.7.0-py3-none-any.whl
scikit-learn==1.4.1.post1 scikit-learn==1.4.1.post1
nltk==3.8.1 nltk==3.8.1
s3pathlib>=2.1.1
{
"accuracy": 1.0,
"precision": 1.0,
"recall": 1.0,
"f1_score": 1.0,
"pdf间的平均编辑距离": 19.82051282051282,
"pdf间的平均bleu": 0.9002485609584511,
"阅读顺序编辑距离": 0.3176895306859206,
"分段准确率": 0.8989169675090253,
"行内公式准确率": {
"accuracy": 0.9782741738066095,
"precision": 0.9782741738066095,
"recall": 1.0,
"f1_score": 0.9890177880897139
},
"行内公式编辑距离": 0.0,
"行内公式bleu": 0.20340450120213166,
"行间公式准确率": {
"accuracy": 1.0,
"precision": 1.0,
"recall": 1.0,
"f1_score": 1.0
},
"行间公式编辑距离": 0.0,
"行间公式bleu": 0.3662262622386575,
"丢弃文本准确率": {
"accuracy": 0.867870036101083,
"precision": 0.9064856711915535,
"recall": 0.9532117367168914,
"f1_score": 0.9292616930807885
},
"丢弃文本标签准确率": {
"color_background_header_txt_block": {
"precision": 0.0,
"recall": 0.0,
"f1-score": 0.0,
"support": 41.0
},
"rotate": {
"precision": 1.0,
"recall": 0.9682539682539683,
"f1-score": 0.9838709677419355,
"support": 63.0
},
"footnote": {
"precision": 1.0,
"recall": 0.883495145631068,
"f1-score": 0.9381443298969072,
"support": 103.0
},
"header": {
"precision": 1.0,
"recall": 1.0,
"f1-score": 1.0,
"support": 4.0
},
"on-image": {
"precision": 0.9947643979057592,
"recall": 1.0,
"f1-score": 0.9973753280839895,
"support": 380.0
},
"on-table": {
"precision": 1.0,
"recall": 0.9443609022556391,
"f1-score": 0.97138437741686,
"support": 665.0
},
"micro avg": {
"precision": 0.9982847341337907,
"recall": 0.9267515923566879,
"f1-score": 0.9611890999174236,
"support": 1256.0
}
},
"丢弃图片准确率": {
"accuracy": 0.8666666666666667,
"precision": 0.9285714285714286,
"recall": 0.9285714285714286,
"f1_score": 0.9285714285714286
},
"丢弃表格准确率": {
"accuracy": 0,
"precision": 0,
"recall": 0,
"f1_score": 0
}
}
\ No newline at end of file
No preview for this file type
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment