Vector.
This commit is contained in:
@@ -1,29 +1,25 @@
|
||||
# app/agents/vectorization_agent.py
|
||||
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
||||
from langchain_openai import OpenAIEmbeddings
|
||||
embedding_model = OpenAIEmbeddings(model="text-embedding-3-small")
|
||||
import chromadb
|
||||
|
||||
client = chromadb.PersistentClient(path="./chroma_db")
|
||||
vector_store = client.get_or_create_collection(name="documents")
|
||||
|
||||
text_splitter = RecursiveCharacterTextSplitter(
|
||||
chunk_size=1000,
|
||||
chunk_overlap=100,
|
||||
)
|
||||
|
||||
def agent_vectorize_and_store(doc_id: str, text: str, category: str, language: str):
|
||||
"""
|
||||
Agent 4: Vectorizes a document and stores it in ChromaDB.
|
||||
"""
|
||||
def agent_vectorize_and_store(
|
||||
doc_id: str,
|
||||
text: str,
|
||||
category: str,
|
||||
language: str,
|
||||
embedding_model,
|
||||
vector_store
|
||||
):
|
||||
print(f"--- [Background Task] Starting vectorization (ID: {doc_id})...")
|
||||
|
||||
try:
|
||||
return
|
||||
|
||||
chunks = text_splitter.split_text(text)
|
||||
if not chunks:
|
||||
print(f"--- [Background Task] document {doc_id} has no text to vectorize.")
|
||||
print(f"--- [Background task] document is empty, skip vectorization. (ID: {doc_id})")
|
||||
return
|
||||
|
||||
chunk_ids = [f"{doc_id}_{i}" for i in range(len(chunks))]
|
||||
@@ -38,6 +34,6 @@ def agent_vectorize_and_store(doc_id: str, text: str, category: str, language: s
|
||||
documents=chunks,
|
||||
metadatas=metadatas
|
||||
)
|
||||
print(f"--- [Background Task] Document {doc_id} vectorized and stored successfully.")
|
||||
print(f"--- [Background Task] Document {doc_id} vectorized。")
|
||||
except Exception as e:
|
||||
print(f"--- [background Task] Vectorization failed (ID: {doc_id}): {e}")
|
||||
print(f"--- [Background Task] Document vectorization failed (ID: {doc_id}): {e}")
|
||||
|
||||
@@ -1,47 +1,28 @@
|
||||
# app/core/vector_store.py
|
||||
import os
|
||||
import chromadb
|
||||
from dotenv import load_dotenv
|
||||
from langchain_openai import AzureOpenAIEmbeddings, OpenAIEmbeddings
|
||||
|
||||
load_dotenv()
|
||||
|
||||
LLM_PROVIDER = os.getenv("LLM_PROVIDER", "openai").lower()
|
||||
|
||||
embedding_model = None
|
||||
|
||||
print(f"--- [Core] Initializing Embeddings with provider: {LLM_PROVIDER} ---")
|
||||
|
||||
if LLM_PROVIDER == "azure":
|
||||
required_vars = [
|
||||
"AZURE_OPENAI_ENDPOINT", "AZURE_OPENAI_API_KEY",
|
||||
"OPENAI_API_VERSION", "AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME"
|
||||
]
|
||||
if not all(os.getenv(var) for var in required_vars):
|
||||
raise ValueError("One or more Azure OpenAI environment variables for embeddings are not set.")
|
||||
|
||||
embedding_model = AzureOpenAIEmbeddings(
|
||||
azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"),
|
||||
api_key=os.getenv("AZURE_OPENAI_API_KEY"),
|
||||
api_version=os.getenv("OPENAI_API_VERSION"),
|
||||
azure_deployment=os.getenv("AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME"),
|
||||
)
|
||||
|
||||
elif LLM_PROVIDER == "openai":
|
||||
if not os.getenv("OPENAI_API_KEY"):
|
||||
raise ValueError("OPENAI_API_KEY is not set for the 'openai' provider.")
|
||||
|
||||
embedding_model = OpenAIEmbeddings(
|
||||
api_key=os.getenv("OPENAI_API_KEY"),
|
||||
model=os.getenv("OPENAI_EMBEDDING_MODEL_NAME", "text-embedding-3-small")
|
||||
)
|
||||
|
||||
else:
|
||||
raise ValueError(f"Unsupported LLM_PROVIDER: {LLM_PROVIDER}. Please use 'azure' or 'openai'.")
|
||||
|
||||
raise ValueError(f"Unsupported LLM_PROVIDER: {LLM_PROVIDER}.")
|
||||
|
||||
client = chromadb.PersistentClient(path="./chroma_db")
|
||||
vector_store = client.get_or_create_collection(
|
||||
name="documents",
|
||||
metadata={"hnsw:space": "cosine"}
|
||||
)
|
||||
vector_store = client.get_or_create_collection(name="documents")
|
||||
@@ -5,10 +5,10 @@ from typing import Dict, Any, List
|
||||
from fastapi.concurrency import run_in_threadpool
|
||||
from PIL import Image
|
||||
from io import BytesIO
|
||||
|
||||
from .. import agents
|
||||
from ..core.pdf_processor import convert_pdf_to_images, image_to_base64_str
|
||||
from ..core.ocr import extract_text_from_images
|
||||
from ..core.vector_store import embedding_model, vector_store
|
||||
|
||||
# Create an APIRouter instance
|
||||
router = APIRouter(
|
||||
@@ -102,10 +102,12 @@ async def upload_and_process_document(
|
||||
full_text = await run_in_threadpool(extract_text_from_images, images)
|
||||
background_tasks.add_task(
|
||||
agents.agent_vectorize_and_store,
|
||||
doc_id,
|
||||
full_text,
|
||||
category,
|
||||
language
|
||||
doc_id=doc_id,
|
||||
text=full_text,
|
||||
category=category,
|
||||
language=language,
|
||||
embedding_model=embedding_model,
|
||||
vector_store=vector_store
|
||||
)
|
||||
print("--- [Main] Vectorization job added to background tasks.")
|
||||
|
||||
@@ -118,4 +120,4 @@ async def upload_and_process_document(
|
||||
async def get_result(doc_id: str):
|
||||
if doc_id in db_results:
|
||||
return db_results[doc_id]
|
||||
raise HTTPException(status_code=404, detail="Document not found.")
|
||||
raise HTTPException(status_code=404, detail="Document not found.")
|
||||
Binary file not shown.
Reference in New Issue
Block a user