This commit is contained in:
2025-08-11 14:20:56 +02:00
parent 0a80400720
commit f077c6351d
17 changed files with 165 additions and 248 deletions

18
app/core/callbacks.py Normal file
View File

@@ -0,0 +1,18 @@
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.outputs import LLMResult
from typing import Any, Dict
class TokenUsageCallbackHandler(BaseCallbackHandler):
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
token_usage = response.llm_output.get('token_usage', {})
if token_usage:
prompt_tokens = token_usage.get('prompt_tokens', 0)
completion_tokens = token_usage.get('completion_tokens', 0)
total_tokens = token_usage.get('total_tokens', 0)
print("--- [Token Usage] ---")
print(f" Prompt Tokens: {prompt_tokens}")
print(f" Completion Tokens: {completion_tokens}")
print(f" Total Tokens: {total_tokens}")
print("---------------------")

View File

@@ -1,45 +1,33 @@
# app/core/llm.py
import os
from dotenv import load_dotenv
from langchain_openai import AzureChatOpenAI, ChatOpenAI
from .callbacks import TokenUsageCallbackHandler
# 加载.env文件中的环境变量
load_dotenv()
# 获取配置的LLM供应商
LLM_PROVIDER = os.getenv("LLM_PROVIDER", "openai").lower()
llm = None
print(f"--- [Core] Initializing LLM with provider: {LLM_PROVIDER} ---")
if LLM_PROVIDER == "azure":
# --- Azure OpenAI 配置 ---
required_vars = [
"AZURE_OPENAI_ENDPOINT", "AZURE_OPENAI_API_KEY",
"OPENAI_API_VERSION", "AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"
]
if not all(os.getenv(var) for var in required_vars):
raise ValueError("One or more Azure OpenAI environment variables for chat are not set.")
token_callback = TokenUsageCallbackHandler()
if LLM_PROVIDER == "azure":
llm = AzureChatOpenAI(
azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"),
api_key=os.getenv("AZURE_OPENAI_API_KEY"),
api_version=os.getenv("OPENAI_API_VERSION"),
azure_deployment=os.getenv("AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"),
temperature=0,
callbacks=[token_callback]
)
elif LLM_PROVIDER == "openai":
# --- 标准 OpenAI 配置 ---
if not os.getenv("OPENAI_API_KEY"):
raise ValueError("OPENAI_API_KEY is not set for the 'openai' provider.")
llm = ChatOpenAI(
api_key=os.getenv("OPENAI_API_KEY"),
model_name=os.getenv("OPENAI_MODEL_NAME", "gpt-4o"),
temperature=0,
callbacks=[token_callback]
)
else:
raise ValueError(f"Unsupported LLM_PROVIDER: {LLM_PROVIDER}. Please use 'azure' or 'openai'.")
raise ValueError(f"Unsupported LLM_PROVIDER: {LLM_PROVIDER}. Please use 'azure' or 'openai'.")

View File

@@ -1,27 +0,0 @@
# app/core/ocr.py
import pytesseract
from PIL import Image
# 注意: 您需要先在您的系统中安装Google的Tesseract OCR引擎。
# 详情请参考之前的安装说明。
def extract_text_from_image(image: Image.Image) -> str:
"""
使用Tesseract OCR从Pillow Image对象中提取文本。
参数:
image: Pillow Image对象。
返回:
从图片中提取出的字符串文本。
"""
try:
print("--- [Core OCR] 正在从图片中提取文本用于分类...")
# lang='chi_sim+eng' 表示同时识别简体中文和英文
text = pytesseract.image_to_string(image, lang='chi_sim+eng')
print("--- [Core OCR] 文本提取成功。")
return text
except Exception as e:
print(f"--- [Core OCR] OCR处理失败: {e}")
raise IOError(f"OCR processing failed: {e}")

View File

@@ -5,39 +5,20 @@ from io import BytesIO
from typing import List
import base64
# 注意: 您需要安装Poppler。
# - macOS: brew install poppler
# - Ubuntu/Debian: sudo apt-get install poppler-utils
# - Windows: 下载Poppler并将其bin目录添加到系统PATH。
def convert_pdf_to_images(pdf_bytes: bytes) -> List[Image.Image]:
"""将PDF文件的字节流转换为Pillow Image对象列表。"""
try:
print("--- [Core PDF] 正在将PDF转换为图片...")
print("--- [Core PDF] Converting PDF to images...")
# --- 新增代码开始 ---
# 在这里直接指定您电脑上Poppler的bin目录路径
# 请确保将下面的示例路径替换为您的真实路径
poppler_path = r"C:\ProgramData\chocolatey\lib\poppler\tools\Library\bin"
# --- 新增代码结束 ---
# --- 修改的代码开始 ---
# 在调用时传入poppler_path参数
images = convert_from_bytes(pdf_bytes)
# --- 修改的代码结束 ---
print(f"--- [Core PDF] 转换成功,共 {len(images)} ")
print(f"--- [Core PDF] converted PDF to imagestotal {len(images)} pages")
return images
except Exception as e:
print(f"--- [Core PDF] PDF转换失败: {e}")
# 增加一个更友好的错误提示
print("--- [Core PDF] 请确认您已在系统中正确安装Poppler并在上面的代码中指定了正确的poppler_path。")
print(f"--- [Core PDF] PDF conversion failed: {e}")
raise IOError(f"PDF to image conversion failed: {e}")
def image_to_base64_str(image: Image.Image) -> str:
"""将Pillow Image对象转换为Base64编码的字符串。"""
buffered = BytesIO()
image.save(buffered, format="PNG")
return base64.b64encode(buffered.getvalue()).decode('utf-8')

View File

@@ -4,10 +4,8 @@ import chromadb
from dotenv import load_dotenv
from langchain_openai import AzureOpenAIEmbeddings, OpenAIEmbeddings
# 加载.env文件中的环境变量
load_dotenv()
# 获取配置的LLM供应商
LLM_PROVIDER = os.getenv("LLM_PROVIDER", "openai").lower()
embedding_model = None
@@ -15,7 +13,6 @@ embedding_model = None
print(f"--- [Core] Initializing Embeddings with provider: {LLM_PROVIDER} ---")
if LLM_PROVIDER == "azure":
# --- Azure OpenAI 配置 ---
required_vars = [
"AZURE_OPENAI_ENDPOINT", "AZURE_OPENAI_API_KEY",
"OPENAI_API_VERSION", "AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME"
@@ -31,7 +28,6 @@ if LLM_PROVIDER == "azure":
)
elif LLM_PROVIDER == "openai":
# --- 标准 OpenAI 配置 ---
if not os.getenv("OPENAI_API_KEY"):
raise ValueError("OPENAI_API_KEY is not set for the 'openai' provider.")
@@ -44,7 +40,6 @@ else:
raise ValueError(f"Unsupported LLM_PROVIDER: {LLM_PROVIDER}. Please use 'azure' or 'openai'.")
# 初始化ChromaDB客户端 (无变化)
client = chromadb.PersistentClient(path="./chroma_db")
vector_store = client.get_or_create_collection(
name="documents",