Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix markdown reader image path #334

Open
wants to merge 4 commits into
base: feature
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 1 addition & 4 deletions src/pai_rag/app/api/query.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,10 +187,7 @@ async def upload_data(
for file in files:
fn = file.filename
data = await file.read()
file_hash = hashlib.md5(data).hexdigest()
tmp_file_dir = os.path.join(
tmpdir, f"{COMMON_FILE_PATH_FODER_NAME}/{file_hash}"
)
tmp_file_dir = os.path.join(tmpdir, f"{COMMON_FILE_PATH_FODER_NAME}")
os.makedirs(tmp_file_dir, exist_ok=True)
save_file = os.path.join(tmp_file_dir, fn)

Expand Down
6 changes: 1 addition & 5 deletions src/pai_rag/app/api/v1/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
from typing import Any, List
from fastapi import APIRouter, Body, BackgroundTasks, UploadFile, Form
import uuid
import hashlib
import os
import tempfile
import shutil
Expand Down Expand Up @@ -174,10 +173,7 @@ async def upload_data(
for file in files:
fn = file.filename
data = await file.read()
file_hash = hashlib.md5(data).hexdigest()
tmp_file_dir = os.path.join(
tmpdir, f"{COMMON_FILE_PATH_FODER_NAME}/{file_hash}"
)
tmp_file_dir = os.path.join(tmpdir, f"{COMMON_FILE_PATH_FODER_NAME}")
os.makedirs(tmp_file_dir, exist_ok=True)
save_file = os.path.join(tmp_file_dir, fn)

Expand Down
71 changes: 47 additions & 24 deletions src/pai_rag/integrations/readers/pai_html_reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,26 +3,28 @@
"""
import html2text
from bs4 import BeautifulSoup
import requests
from typing import Dict, List, Optional, Union, Any
from io import BytesIO
from pai_rag.utils.markdown_utils import (
transform_local_to_oss,
convert_table_to_markdown,
PaiTable,
)
from pathlib import Path
import re
import time
import os
from PIL import Image
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from loguru import logger


IMAGE_URL_PATTERN = (
r"!\[(?P<alt_text>.*?)\]\((https?://[^\s]+?[\s\w.-]*\.(jpg|jpeg|png|gif|bmp))\)"
MARKDOWN_IMAGE_PATTERN = re.compile(
r"!\[.*?\]\(((?!https?://|www\.)[^\s)]+\.(?:png|jpe?g|gif|bmp|svg|webp|tiff))\)",
re.IGNORECASE,
)
HTML_IMAGE_PATTERN = re.compile(
r'<img[^>]*src=["\']((?!https?://|www\.)[^"\']+\.(?:png|jpe?g|gif|bmp|svg|webp|tiff))["\'][^>]*>',
re.IGNORECASE,
)


Expand Down Expand Up @@ -149,32 +151,52 @@ def _convert_table_to_markdown(self, table):
table, total_cols = self._convert_table_to_pai_table(table)
return convert_table_to_markdown(table, total_cols)

def _transform_local_to_oss(self, html_name: str, image_url: str):
response = requests.get(image_url)
response.raise_for_status() # 检查请求是否成功
def _transform_local_to_oss(self, html_name: str, local_url: str):
try:
image = Image.open(local_url)
return transform_local_to_oss(self._oss_cache, image, html_name)
except Exception as e:
logger.error(f"read html local image failed: {e}")
return None

def _replace_image_paths(self, html_dir: str, html_name: str, content: str):
markdown_image_matches = MARKDOWN_IMAGE_PATTERN.finditer(content)
html_image_matches = HTML_IMAGE_PATTERN.finditer(content)
for match in markdown_image_matches:
full_match = match.group(0) # 整个匹配
local_url = match.group(1) # 捕获的URL

# 将二进制数据转换为图像对象
image = Image.open(BytesIO(response.content))
return transform_local_to_oss(self._oss_cache, image, html_name)
local_path = os.path.normpath(os.path.join(html_dir, local_url))

def _replace_image_paths(self, html_name: str, content: str):
image_pattern = IMAGE_URL_PATTERN
matches = re.findall(image_pattern, content)
for alt_text, image_url, image_type in matches:
if self._oss_cache:
time_tag = int(time.time())
oss_url = self._transform_local_to_oss(html_name, image_url)
updated_alt_text = f"pai_rag_image_{time_tag}_{alt_text}"
content = content.replace(
f"![{alt_text}]({image_url})", f"![{updated_alt_text}]({oss_url})"
)
oss_url = self._transform_local_to_oss(html_name, local_path)
if oss_url:
content = content.replace(local_url, oss_url)
else:
content = content.replace(full_match, "")
else:
content = content.replace(f"![{alt_text}]({image_url})", "")
content = content.replace(full_match, "")
for match in html_image_matches:
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这段和上面165-178行看起来一模一样?可以写到一起吗

full_match = match.group(0) # 整个匹配
local_url = match.group(1) # 捕获的URL

local_path = os.path.normpath(os.path.join(html_dir, local_url))

if self._oss_cache:
oss_url = self._transform_local_to_oss(html_name, local_path)
if oss_url:
content = content.replace(local_url, oss_url)
else:
content = content.replace(full_match, "")
else:
content = content.replace(full_match, "")

return content

def convert_html_to_markdown(self, html_path):
html_name = os.path.basename(html_path).split(".")[0]
html_name = html_name.replace(" ", "_")
html_dir = os.path.dirname(html_path)
try:
with open(html_path, "r", encoding="utf-8") as f:
html_content = f.read()
Expand All @@ -194,8 +216,9 @@ def convert_html_to_markdown(self, html_path):
table_markdown = self._convert_table_to_markdown(table) + "\n\n"
placeholder = f"<!-- TABLE_PLACEHOLDER_{id(table)} -->"
markdown_content = markdown_content.replace(placeholder, table_markdown)

markdown_content = self._replace_image_paths(html_name, markdown_content)
markdown_content = self._replace_image_paths(
html_dir, html_name, markdown_content
)

return markdown_content

Expand Down
13 changes: 9 additions & 4 deletions src/pai_rag/integrations/readers/pai_markdown_reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,15 +37,17 @@ def __init__(
f"PaiMarkdownReader created with enable_table_summary : {self.enable_table_summary}"
)

def replace_image_paths(self, markdown_name: str, content: str):
def replace_image_paths(self, markdown_dir: str, markdown_name: str, content: str):
markdown_image_matches = MARKDOWN_IMAGE_PATTERN.finditer(content)
html_image_matches = HTML_IMAGE_PATTERN.finditer(content)
for match in markdown_image_matches:
full_match = match.group(0) # 整个匹配
local_url = match.group(1) # 捕获的URL

local_path = os.path.normpath(os.path.join(markdown_dir, local_url))

if self._oss_cache:
oss_url = self._transform_local_to_oss(markdown_name, local_url)
oss_url = self._transform_local_to_oss(markdown_name, local_path)
if oss_url:
content = content.replace(local_url, oss_url)
else:
Expand All @@ -56,8 +58,10 @@ def replace_image_paths(self, markdown_name: str, content: str):
full_match = match.group(0) # 整个匹配
local_url = match.group(1) # 捕获的URL

local_path = os.path.normpath(os.path.join(markdown_dir, local_url))

if self._oss_cache:
oss_url = self._transform_local_to_oss(markdown_name, local_url)
oss_url = self._transform_local_to_oss(markdown_name, local_path)
if oss_url:
content = content.replace(local_url, oss_url)
else:
Expand All @@ -78,6 +82,7 @@ def _transform_local_to_oss(self, markdown_name: str, local_url: str):
def parse_markdown(self, markdown_path):
markdown_name = os.path.basename(markdown_path).split(".")[0]
markdown_name = markdown_name.replace(" ", "_")
markdown_dir = os.path.dirname(markdown_path)
text = ""
pre_line = ""
with open(markdown_path) as fp:
Expand Down Expand Up @@ -108,7 +113,7 @@ def parse_markdown(self, markdown_path):
line = fp.readline()

text += pre_line
md_content = self.replace_image_paths(markdown_name, text)
md_content = self.replace_image_paths(markdown_dir, markdown_name, text)
return md_content

def load_data(
Expand Down
Loading