Backend: - FastAPI WebSocket /ws endpoint with streaming via LangGraph astream - LangGraph Supervisor connecting 3 mock agents (order_lookup, order_actions, fallback) - YAML Agent Registry with Pydantic validation and immutable configs - PostgresSaver checkpoint persistence via langgraph-checkpoint-postgres - Session TTL with 30-min sliding window and interrupt extension - LLM provider abstraction (Anthropic/OpenAI/Google) - Token usage + cost tracking callback handler - Input validation: message size cap, thread_id format, content length - Security: no hardcoded defaults, startup API key validation, no input reflection Frontend: - React 19 + TypeScript + Vite chat UI - WebSocket hook with reconnect + exponential backoff - Streaming token display with agent attribution - Interrupt approval/reject UI for write operations - Collapsible tool call viewer Testing: - 87 unit tests, 87% coverage (exceeds 80% requirement) - Ruff lint + format clean Infrastructure: - Docker Compose (PostgreSQL 16 + backend) - pyproject.toml with full dependency management
42 lines
1.2 KiB
Python
42 lines
1.2 KiB
Python
"""Tests for app.llm module."""
|
|
|
|
from __future__ import annotations
|
|
|
|
import pytest
|
|
|
|
from app.config import Settings
|
|
from app.llm import create_llm
|
|
|
|
|
|
@pytest.mark.unit
|
|
class TestCreateLlm:
|
|
def test_anthropic_provider(self) -> None:
|
|
settings = Settings(
|
|
database_url="postgresql://x:x@localhost/db",
|
|
llm_provider="anthropic",
|
|
llm_model="claude-sonnet-4-6",
|
|
anthropic_api_key="test-key",
|
|
)
|
|
llm = create_llm(settings)
|
|
assert type(llm).__name__ == "ChatAnthropic"
|
|
|
|
def test_openai_provider(self) -> None:
|
|
settings = Settings(
|
|
database_url="postgresql://x:x@localhost/db",
|
|
llm_provider="openai",
|
|
llm_model="gpt-4o",
|
|
openai_api_key="sk-test",
|
|
)
|
|
llm = create_llm(settings)
|
|
assert type(llm).__name__ == "ChatOpenAI"
|
|
|
|
def test_google_provider(self) -> None:
|
|
settings = Settings(
|
|
database_url="postgresql://x:x@localhost/db",
|
|
llm_provider="google",
|
|
llm_model="gemini-pro",
|
|
google_api_key="test-key",
|
|
)
|
|
llm = create_llm(settings)
|
|
assert type(llm).__name__ == "ChatGoogleGenerativeAI"
|