- Replay models: StepType enum, ReplayStep, ReplayPage frozen dataclasses
- Checkpoint transformer: PostgresSaver JSONB -> structured timeline steps
- Replay API: GET /api/conversations (paginated), GET /api/replay/{thread_id}
- Analytics models: AgentUsage, InterruptStats, AnalyticsResult
- Analytics event recorder: Protocol + PostgresAnalyticsRecorder + NoOp
- Analytics queries: resolution_rate, agent_usage, escalation_rate, cost, interrupts
- Analytics API: GET /api/analytics?range=Xd with envelope response
- DB migration: analytics_events table + conversations column additions
- 74 new tests, 399 total passing, 92.87% coverage
107 lines
3.6 KiB
Python
107 lines
3.6 KiB
Python
"""Unit tests for app.analytics.models."""
|
|
|
|
from __future__ import annotations
|
|
|
|
import pytest
|
|
|
|
pytestmark = pytest.mark.unit
|
|
|
|
|
|
class TestAgentUsage:
|
|
def test_agent_usage_construction(self) -> None:
|
|
from app.analytics.models import AgentUsage
|
|
|
|
au = AgentUsage(agent="order_agent", count=10, percentage=50.0)
|
|
assert au.agent == "order_agent"
|
|
assert au.count == 10
|
|
assert au.percentage == 50.0
|
|
|
|
def test_agent_usage_is_frozen(self) -> None:
|
|
from app.analytics.models import AgentUsage
|
|
|
|
au = AgentUsage(agent="a", count=1, percentage=100.0)
|
|
with pytest.raises((AttributeError, TypeError)):
|
|
au.count = 2 # type: ignore[misc]
|
|
|
|
|
|
class TestInterruptStats:
|
|
def test_interrupt_stats_defaults(self) -> None:
|
|
from app.analytics.models import InterruptStats
|
|
|
|
stats = InterruptStats()
|
|
assert stats.total == 0
|
|
assert stats.approved == 0
|
|
assert stats.rejected == 0
|
|
assert stats.expired == 0
|
|
|
|
def test_interrupt_stats_custom_values(self) -> None:
|
|
from app.analytics.models import InterruptStats
|
|
|
|
stats = InterruptStats(total=10, approved=7, rejected=2, expired=1)
|
|
assert stats.total == 10
|
|
assert stats.approved == 7
|
|
assert stats.rejected == 2
|
|
assert stats.expired == 1
|
|
|
|
def test_interrupt_stats_is_frozen(self) -> None:
|
|
from app.analytics.models import InterruptStats
|
|
|
|
stats = InterruptStats()
|
|
with pytest.raises((AttributeError, TypeError)):
|
|
stats.total = 5 # type: ignore[misc]
|
|
|
|
|
|
class TestAnalyticsResult:
|
|
def test_analytics_result_construction(self) -> None:
|
|
from app.analytics.models import AgentUsage, AnalyticsResult, InterruptStats
|
|
|
|
result = AnalyticsResult(
|
|
range="7d",
|
|
total_conversations=100,
|
|
resolution_rate=0.85,
|
|
escalation_rate=0.05,
|
|
avg_turns_per_conversation=4.2,
|
|
avg_cost_per_conversation_usd=0.03,
|
|
agent_usage=(AgentUsage(agent="order_agent", count=60, percentage=60.0),),
|
|
interrupt_stats=InterruptStats(total=5, approved=4, rejected=1, expired=0),
|
|
)
|
|
assert result.range == "7d"
|
|
assert result.total_conversations == 100
|
|
assert result.resolution_rate == 0.85
|
|
assert result.escalation_rate == 0.05
|
|
assert result.avg_turns_per_conversation == 4.2
|
|
assert result.avg_cost_per_conversation_usd == 0.03
|
|
assert len(result.agent_usage) == 1
|
|
assert result.interrupt_stats.total == 5
|
|
|
|
def test_analytics_result_is_frozen(self) -> None:
|
|
from app.analytics.models import AnalyticsResult, InterruptStats
|
|
|
|
result = AnalyticsResult(
|
|
range="7d",
|
|
total_conversations=0,
|
|
resolution_rate=0.0,
|
|
escalation_rate=0.0,
|
|
avg_turns_per_conversation=0.0,
|
|
avg_cost_per_conversation_usd=0.0,
|
|
agent_usage=(),
|
|
interrupt_stats=InterruptStats(),
|
|
)
|
|
with pytest.raises((AttributeError, TypeError)):
|
|
result.range = "30d" # type: ignore[misc]
|
|
|
|
def test_analytics_result_empty_agent_usage(self) -> None:
|
|
from app.analytics.models import AnalyticsResult, InterruptStats
|
|
|
|
result = AnalyticsResult(
|
|
range="7d",
|
|
total_conversations=0,
|
|
resolution_rate=0.0,
|
|
escalation_rate=0.0,
|
|
avg_turns_per_conversation=0.0,
|
|
avg_cost_per_conversation_usd=0.0,
|
|
agent_usage=(),
|
|
interrupt_stats=InterruptStats(),
|
|
)
|
|
assert result.agent_usage == ()
|