Prepare vectorizes

This commit is contained in:
2025-08-11 16:42:36 +02:00
parent f077c6351d
commit 0c6d008368
5 changed files with 79 additions and 38 deletions

View File

@@ -1,4 +1,4 @@
# app/agents/__init__.py
from .classification_agent import agent_classify_document_from_image
from .receipt_agent import agent_extract_receipt_info
from .invoice_agent import agent_extract_invoice_info
from .invoice_agent import agent_extract_invoice_info
from .vectorization_agent import agent_vectorize_and_store

View File

@@ -1,38 +1,43 @@
# app/agents/vectorization_agent.py
from langchain.text_splitter import RecursiveCharacterTextSplitter
from ..core.vector_store import vector_store, embedding_model
from langchain_openai import OpenAIEmbeddings
embedding_model = OpenAIEmbeddings(model="text-embedding-3-small")
import chromadb
client = chromadb.PersistentClient(path="./chroma_db")
vector_store = client.get_or_create_collection(name="documents")
# Initialize the text splitter to divide long documents into smaller chunks
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=500,
chunk_overlap=50,
chunk_size=1000,
chunk_overlap=100,
)
def agent_vectorize_and_store(doc_id: str, text: str, category: str):
"""Agent 4: Vectorization and Storage (Real Implementation)"""
print(f"--- [Agent 4] Vectorizing document (ID: {doc_id})...")
def agent_vectorize_and_store(doc_id: str, text: str, category: str, language: str):
"""
Agent 4: Vectorizes a document and stores it in ChromaDB.
"""
print(f"--- [Background Task] Starting vectorization (ID: {doc_id})...")
# 1. Split the document text into chunks
chunks = text_splitter.split_text(text)
print(f"--- [Agent 4] Document split into {len(chunks)} chunks.")
if not chunks:
print(f"--- [Agent 4] Document is empty, skipping vectorization.")
try:
return
# 2. Create a unique ID and metadata for each chunk
chunk_ids = [f"{doc_id}_{i}" for i in range(len(chunks))]
metadatas = [{"doc_id": doc_id, "category": category, "chunk_number": i} for i in range(len(chunks))]
chunks = text_splitter.split_text(text)
if not chunks:
print(f"--- [Background Task] document {doc_id} has no text to vectorize.")
return
# 3. Use an embedding model to generate vectors for all chunks
embeddings = embedding_model.embed_documents(chunks)
chunk_ids = [f"{doc_id}_{i}" for i in range(len(chunks))]
metadatas = [{"doc_id": doc_id, "category": category, "language": language, "chunk_number": i} for i in
range(len(chunks))]
# 4. Add the IDs, vectors, metadata, and text chunks to ChromaDB
vector_store.add(
ids=chunk_ids,
embeddings=embeddings,
documents=chunks,
metadatas=metadatas
)
embeddings = embedding_model.embed_documents(chunks)
print(f"--- [Agent 4] document {doc_id} stored in ChromaDB。")
vector_store.add(
ids=chunk_ids,
embeddings=embeddings,
documents=chunks,
metadatas=metadatas
)
print(f"--- [Background Task] Document {doc_id} vectorized and stored successfully.")
except Exception as e:
print(f"--- [background Task] Vectorization failed (ID: {doc_id}): {e}")