39 lines
1.4 KiB
Python
39 lines
1.4 KiB
Python
# app/agents/vectorization_agent.py
|
|
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
|
from ..core.vector_store import vector_store, embedding_model
|
|
|
|
# Initialize the text splitter to divide long documents into smaller chunks
|
|
text_splitter = RecursiveCharacterTextSplitter(
|
|
chunk_size=500,
|
|
chunk_overlap=50,
|
|
)
|
|
|
|
def agent_vectorize_and_store(doc_id: str, text: str, category: str):
|
|
"""Agent 4: Vectorization and Storage (Real Implementation)"""
|
|
print(f"--- [Agent 4] Vectorizing document (ID: {doc_id})...")
|
|
|
|
# 1. Split the document text into chunks
|
|
chunks = text_splitter.split_text(text)
|
|
print(f"--- [Agent 4] Document split into {len(chunks)} chunks.")
|
|
|
|
if not chunks:
|
|
print(f"--- [Agent 4] Document is empty, skipping vectorization.")
|
|
return
|
|
|
|
# 2. Create a unique ID and metadata for each chunk
|
|
chunk_ids = [f"{doc_id}_{i}" for i in range(len(chunks))]
|
|
metadatas = [{"doc_id": doc_id, "category": category, "chunk_number": i} for i in range(len(chunks))]
|
|
|
|
# 3. Use an embedding model to generate vectors for all chunks
|
|
embeddings = embedding_model.embed_documents(chunks)
|
|
|
|
# 4. Add the IDs, vectors, metadata, and text chunks to ChromaDB
|
|
vector_store.add(
|
|
ids=chunk_ids,
|
|
embeddings=embeddings,
|
|
documents=chunks,
|
|
metadatas=metadatas
|
|
)
|
|
|
|
print(f"--- [Agent 4] document {doc_id} stored in ChromaDB。")
|