44 lines
1.5 KiB
Python
44 lines
1.5 KiB
Python
# app/agents/vectorization_agent.py
|
|
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
|
from langchain_openai import OpenAIEmbeddings
|
|
embedding_model = OpenAIEmbeddings(model="text-embedding-3-small")
|
|
import chromadb
|
|
|
|
client = chromadb.PersistentClient(path="./chroma_db")
|
|
vector_store = client.get_or_create_collection(name="documents")
|
|
|
|
text_splitter = RecursiveCharacterTextSplitter(
|
|
chunk_size=1000,
|
|
chunk_overlap=100,
|
|
)
|
|
|
|
def agent_vectorize_and_store(doc_id: str, text: str, category: str, language: str):
|
|
"""
|
|
Agent 4: Vectorizes a document and stores it in ChromaDB.
|
|
"""
|
|
print(f"--- [Background Task] Starting vectorization (ID: {doc_id})...")
|
|
|
|
try:
|
|
return
|
|
|
|
chunks = text_splitter.split_text(text)
|
|
if not chunks:
|
|
print(f"--- [Background Task] document {doc_id} has no text to vectorize.")
|
|
return
|
|
|
|
chunk_ids = [f"{doc_id}_{i}" for i in range(len(chunks))]
|
|
metadatas = [{"doc_id": doc_id, "category": category, "language": language, "chunk_number": i} for i in
|
|
range(len(chunks))]
|
|
|
|
embeddings = embedding_model.embed_documents(chunks)
|
|
|
|
vector_store.add(
|
|
ids=chunk_ids,
|
|
embeddings=embeddings,
|
|
documents=chunks,
|
|
metadatas=metadatas
|
|
)
|
|
print(f"--- [Background Task] Document {doc_id} vectorized and stored successfully.")
|
|
except Exception as e:
|
|
print(f"--- [background Task] Vectorization failed (ID: {doc_id}): {e}")
|