test: close coverage gaps and add frontend test infrastructure

Backend (516 tests, 94% coverage):
- Add azure_openai endpoint/deployment validation tests (config.py -> 100%)
- Add _total_conversations and _avg_turns direct tests (queries.py -> 100%)
- Add transformer edge cases: list content, string checkpoint, invalid JSON,
  malformed message graceful skip (transformer.py -> 93%)
- Add safety combined status_code+error_message interaction tests
- Fix ambiguous 200/422 assertion to strict 422
- Add E2E pagination shape assertions (total, page, per_page, row count)
- Fix ReplayPool mock to respect LIMIT/OFFSET params

Frontend (23 tests, vitest + happy-dom + @testing-library/react):
- Add vitest infrastructure with happy-dom environment
- Add api.ts tests: success, HTTP error, success=false, URL encoding
- Add DashboardPage tests: loading, data, error, empty states
- Add ReplayListPage tests: loading, empty, data, error, status badge classes
- Add ReplayPage tests: loading, steps, empty, error states
This commit is contained in:
Yaojia Wang
2026-04-06 13:32:10 +02:00
parent 036e12349d
commit 19fc9f3289
15 changed files with 1270 additions and 7 deletions

View File

@@ -47,7 +47,13 @@ class ReplayPool(FakePool):
if "COUNT" in query and "conversations" in query:
return FakeCursor([(len(self._convos),)])
if "conversations" in query and "SELECT" in query:
return FakeCursor(self._convos)
# Respect LIMIT/OFFSET from params if provided
rows = self._convos
if params:
offset = params.get("offset", 0)
limit = params.get("limit", len(rows))
rows = rows[offset : offset + limit]
return FakeCursor(rows)
if "checkpoints" in query:
return FakeCursor(self._checkpoints)
# Analytics queries
@@ -122,6 +128,11 @@ class TestFlow6ReplayConversation:
assert resp.status_code == 200
body = resp.json()
assert body["success"] is True
data = body["data"]
assert data["total"] == 5
assert data["page"] == 1
assert data["per_page"] == 2
assert len(data["conversations"]) == 2
def test_replay_thread_not_found(self) -> None:
pool = ReplayPool(checkpoints=[])

View File

@@ -158,6 +158,42 @@ class TestInterruptStatsQuery:
assert result.expired == 0
class TestTotalConversations:
@pytest.mark.asyncio
async def test_returns_count(self) -> None:
from app.analytics.queries import _total_conversations
pool = _make_pool_with_fetchone({"total": 42})
result = await _total_conversations(pool, range_days=7)
assert result == 42
@pytest.mark.asyncio
async def test_zero_state_returns_zero(self) -> None:
from app.analytics.queries import _total_conversations
pool = _make_pool_with_fetchone(None)
result = await _total_conversations(pool, range_days=7)
assert result == 0
class TestAvgTurns:
@pytest.mark.asyncio
async def test_returns_float(self) -> None:
from app.analytics.queries import _avg_turns
pool = _make_pool_with_fetchone({"avg_turns": 3.5})
result = await _avg_turns(pool, range_days=7)
assert result == 3.5
@pytest.mark.asyncio
async def test_zero_state_returns_zero(self) -> None:
from app.analytics.queries import _avg_turns
pool = _make_pool_with_fetchone(None)
result = await _avg_turns(pool, range_days=7)
assert result == 0.0
class TestGetAnalytics:
@pytest.mark.asyncio
async def test_returns_analytics_result(self) -> None:

View File

@@ -119,8 +119,8 @@ class TestListConversations:
with TestClient(app) as client:
resp = client.get("/api/conversations?per_page=200")
# FastAPI validation rejects values > 100
assert resp.status_code in (200, 422)
# FastAPI Query(le=100) rejects values > 100
assert resp.status_code == 422
class TestGetReplay:

View File

@@ -153,3 +153,105 @@ class TestTransformCheckpoints:
rows = [_make_row([{"type": "human", "content": "Hi"}])]
steps = transform_checkpoints(rows)
assert isinstance(steps[0].timestamp, str)
def test_list_content_joined_to_string(self) -> None:
from app.replay.transformer import transform_checkpoints
rows = [
_make_row(
[
{
"type": "human",
"content": [
{"text": "Hello"},
{"text": " world"},
],
}
]
)
]
steps = transform_checkpoints(rows)
assert len(steps) == 1
assert steps[0].content == "Hello world"
def test_checkpoint_as_string_skipped(self) -> None:
from app.replay.transformer import transform_checkpoints
rows = [
{
"thread_id": "t1",
"checkpoint_id": "cp1",
"checkpoint": "not-a-dict",
"metadata": {},
}
]
steps = transform_checkpoints(rows)
assert steps == []
def test_channel_values_not_dict_skipped(self) -> None:
from app.replay.transformer import transform_checkpoints
rows = [
{
"thread_id": "t1",
"checkpoint_id": "cp1",
"checkpoint": {"channel_values": "bad"},
"metadata": {},
}
]
steps = transform_checkpoints(rows)
assert steps == []
def test_tool_result_valid_json_parsed(self) -> None:
from app.replay.transformer import transform_checkpoints
rows = [
_make_row(
[
{
"type": "tool",
"content": '{"order_id": "123", "status": "shipped"}',
"name": "get_order_status",
}
]
)
]
steps = transform_checkpoints(rows)
assert len(steps) == 1
assert steps[0].result == {"order_id": "123", "status": "shipped"}
def test_tool_result_invalid_json_wrapped(self) -> None:
from app.replay.transformer import transform_checkpoints
rows = [
_make_row(
[
{
"type": "tool",
"content": "not valid json",
"name": "some_tool",
}
]
)
]
steps = transform_checkpoints(rows)
assert len(steps) == 1
assert steps[0].result == {"raw": "not valid json"}
def test_malformed_message_skipped_gracefully(self) -> None:
from app.replay.transformer import transform_checkpoints
rows = [
_make_row(
[
{"type": "human", "content": "Good message"},
42, # not a dict -- will raise in _step_from_message
{"type": "ai", "content": "Response", "tool_calls": []},
]
)
]
steps = transform_checkpoints(rows)
# The malformed message is skipped; the other two produce steps.
assert len(steps) == 2
assert steps[0].step == 1
assert steps[1].step == 2

View File

@@ -89,3 +89,21 @@ class TestSettings:
anthropic_api_key="key",
openai_api_key="",
)
def test_azure_openai_missing_endpoint_rejected(self) -> None:
with pytest.raises(ValueError, match="AZURE_OPENAI_ENDPOINT"):
_isolated_settings(
database_url="postgresql://x:x@localhost/db",
llm_provider="azure_openai",
azure_openai_api_key="key",
azure_openai_deployment="my-deploy",
)
def test_azure_openai_missing_deployment_rejected(self) -> None:
with pytest.raises(ValueError, match="AZURE_OPENAI_DEPLOYMENT"):
_isolated_settings(
database_url="postgresql://x:x@localhost/db",
llm_provider="azure_openai",
azure_openai_api_key="key",
azure_openai_endpoint="https://example.openai.azure.com",
)

View File

@@ -67,6 +67,17 @@ class TestClassifyMcpError:
def test_unknown_message(self) -> None:
assert classify_mcp_error(error_message="Something happened") == "unknown"
def test_status_code_takes_precedence_over_message(self) -> None:
# 429 is transient by code; message would classify as validation
assert classify_mcp_error(status_code=429, error_message="invalid param") == "transient"
def test_non_classified_status_falls_through_to_message(self) -> None:
# 200 is not in any status set, so message classification takes over
assert classify_mcp_error(status_code=200, error_message="timed out") == "transient"
def test_no_args_returns_unknown(self) -> None:
assert classify_mcp_error() == "unknown"
class TestRetryPolicy:
def test_transient_is_retryable(self) -> None: