fix: restore green builds and align frontend-backend contracts (P0)

- Isolate Settings tests from .env and process env leakage
- Fix analytics metadata test to unwrap psycopg Json wrapper
- Remove unused state variables causing frontend build failures
- Fix ReviewPage to use /classifications endpoint instead of nonexistent /result
- Normalize ReviewPage status enums (failed not error) and access_type values
- Align api.ts types with backend response shapes (ReplayPage, AnalyticsData, AgentUsage)
This commit is contained in:
Yaojia Wang
2026-04-05 23:00:39 +02:00
parent 189a0fad34
commit e55ec42ae5
6 changed files with 70 additions and 41 deletions

View File

@@ -145,4 +145,11 @@ class TestPostgresAnalyticsRecorder:
)
call_args = mock_conn.execute.call_args
params = call_args[0][1]
assert params["metadata"] == {"key": "val"}
# PostgresAnalyticsRecorder wraps metadata with psycopg Json() adapter.
# Unwrap to compare the inner dict.
from psycopg.types.json import Json
meta = params["metadata"]
if isinstance(meta, Json):
meta = meta.obj
assert meta == {"key": "val"}

View File

@@ -7,10 +7,41 @@ import pytest
from app.config import Settings
def _isolated_settings(**kwargs: object) -> Settings:
"""Create a Settings instance that ignores .env files and process env vars.
pydantic-settings reads from env_file and environment by default, which
causes test results to depend on the machine they run on. We override
model_config at the class level temporarily so that every test gets
deterministic results.
"""
# Build a throwaway subclass that disables env-file and env-var loading.
class _IsolatedSettings(Settings):
model_config = Settings.model_config.copy()
model_config["env_file"] = None # type: ignore[assignment]
model_config["env_ignore_empty"] = True
# _env_parse_none_str makes pydantic-settings treat missing env vars as
# absent rather than empty-string, so required fields will raise.
import os
env_backup = os.environ.copy()
# Strip all env vars that Settings knows about so they can't leak in.
settings_fields = set(Settings.model_fields)
for key in list(os.environ):
if key.lower() in settings_fields:
del os.environ[key]
try:
return _IsolatedSettings(**kwargs) # type: ignore[return-value]
finally:
os.environ.clear()
os.environ.update(env_backup)
@pytest.mark.unit
class TestSettings:
def test_default_values(self) -> None:
settings = Settings(
settings = _isolated_settings(
database_url="postgresql://x:x@localhost/db",
anthropic_api_key="key",
)
@@ -20,7 +51,7 @@ class TestSettings:
assert settings.interrupt_ttl_minutes == 30
def test_custom_values(self) -> None:
settings = Settings(
settings = _isolated_settings(
database_url="postgresql://x:x@localhost/db",
llm_provider="openai",
llm_model="gpt-4o",
@@ -33,18 +64,18 @@ class TestSettings:
def test_invalid_provider_rejected(self) -> None:
with pytest.raises(Exception):
Settings(
_isolated_settings(
database_url="postgresql://x:x@localhost/db",
llm_provider="invalid",
)
def test_missing_database_url_rejected(self) -> None:
with pytest.raises(Exception):
Settings(anthropic_api_key="key")
_isolated_settings(anthropic_api_key="key")
def test_empty_api_key_for_provider_rejected(self) -> None:
with pytest.raises(ValueError, match="API key"):
Settings(
_isolated_settings(
database_url="postgresql://x:x@localhost/db",
llm_provider="anthropic",
anthropic_api_key="",
@@ -52,7 +83,7 @@ class TestSettings:
def test_wrong_provider_key_rejected(self) -> None:
with pytest.raises(ValueError, match="API key"):
Settings(
_isolated_settings(
database_url="postgresql://x:x@localhost/db",
llm_provider="openai",
anthropic_api_key="key",

View File

@@ -39,17 +39,16 @@ export interface ReplayStep {
export interface ReplayPage {
thread_id: string;
steps: ReplayStep[];
total: number;
total_steps: number;
page: number;
per_page: number;
steps: ReplayStep[];
}
export interface AgentUsage {
agent_name: string;
message_count: number;
total_tokens: number;
total_cost_usd: number;
agent: string;
count: number;
percentage: number;
}
export interface InterruptStats {
@@ -60,14 +59,12 @@ export interface InterruptStats {
}
export interface AnalyticsData {
range: string;
total_conversations: number;
resolved_conversations: number;
escalated_conversations: number;
resolution_rate: number;
escalation_rate: number;
total_tokens: number;
total_cost_usd: number;
avg_turns_per_conversation: number;
avg_cost_per_conversation_usd: number;
agent_usage: AgentUsage[];
interrupt_stats: InterruptStats;
}

View File

@@ -1,4 +1,3 @@
import { useState } from "react";
import { useParams, useNavigate } from "react-router-dom";
import { ReplayTimeline } from "../components/ReplayTimeline";
@@ -16,8 +15,6 @@ const MOCK_STEPS = [
export function ReplayPage() {
const { threadId } = useParams<{ threadId: string }>();
const navigate = useNavigate();
const [page, setPage] = useState(1);
if (!threadId) return null;
return (

View File

@@ -2,8 +2,8 @@ import { useEffect, useRef, useState } from "react";
interface ImportJob {
job_id: string;
status: "pending" | "processing" | "done" | "error";
error?: string;
status: "pending" | "processing" | "done" | "failed";
error_message?: string;
}
interface EndpointClassification {
@@ -14,16 +14,9 @@ interface EndpointClassification {
agent_group: string;
}
interface JobResult {
job_id: string;
status: string;
endpoints: EndpointClassification[];
}
export function ReviewPage() {
const [url, setUrl] = useState("");
const [job, setJob] = useState<ImportJob | null>(null);
const [result, setResult] = useState<JobResult | null>(null);
const [submitting, setSubmitting] = useState(false);
const [submitError, setSubmitError] = useState<string | null>(null);
const [classifications, setClassifications] = useState<EndpointClassification[]>([
@@ -45,7 +38,7 @@ export function ReviewPage() {
path: "/api/v1/payments/{charge_id}/refund",
method: "post",
summary: "Issue a full or partial refund for a charge",
access_type: "admin",
access_type: "write",
agent_group: "Billing Assistant",
},
{
@@ -78,14 +71,20 @@ export function ReviewPage() {
const j: ImportJob = data.data ?? data;
setJob(j);
if (j.status === "done") {
return fetch(`/api/openapi/jobs/${encodeURIComponent(jobId)}/result`)
return fetch(`/api/openapi/jobs/${encodeURIComponent(jobId)}/classifications`)
.then((r) => r.json())
.then((rdata) => {
const res: JobResult = rdata.data ?? rdata;
setResult(res);
setClassifications(res.endpoints ?? []);
.then((clfs: EndpointClassification[]) => {
setClassifications(
clfs.map((c: any) => ({
path: c.endpoint?.path ?? c.path ?? "",
method: c.endpoint?.method ?? c.method ?? "",
summary: c.endpoint?.summary ?? c.summary ?? "",
access_type: c.access_type ?? "read",
agent_group: c.agent_group ?? "Unassigned",
}))
);
});
} else if (j.status === "error") {
} else if (j.status === "failed") {
return;
} else {
pollRef.current = setTimeout(() => pollJob(jobId), 2000);
@@ -102,7 +101,6 @@ export function ReviewPage() {
setSubmitting(true);
setSubmitError(null);
setJob(null);
setResult(null);
setClassifications([]);
fetch("/api/openapi/import", {
@@ -174,10 +172,10 @@ export function ReviewPage() {
{job && (
<div style={{ padding: "1rem", background: "var(--bg-surface)", border: "1px solid var(--border-light)", borderRadius: "var(--radius-md)", marginBottom: "1.5rem" }}>
<strong>Job:</strong> {job.job_id} &mdash; Status:{" "}
<span style={{ fontWeight: 600, color: job.status === "done" ? "#10b981" : job.status === "error" ? "var(--brand-accent)" : "#f59e0b" }}>
<span style={{ fontWeight: 600, color: job.status === "done" ? "#10b981" : job.status === "failed" ? "var(--brand-accent)" : "#f59e0b" }}>
{job.status}
</span>
{job.error && <div style={{ marginTop: "4px", color: "var(--brand-accent)" }}>{job.error}</div>}
{job.error_message && <div style={{ marginTop: "4px", color: "var(--brand-accent)" }}>{job.error_message}</div>}
</div>
)}
@@ -221,7 +219,6 @@ export function ReviewPage() {
>
<option value="read">Read Only</option>
<option value="write">Write (Confirm)</option>
<option value="admin">Admin</option>
</select>
<input
type="text"

View File

@@ -1 +1 @@
{"root":["./src/app.tsx","./src/api.ts","./src/main.tsx","./src/types.ts","./src/components/agentaction.tsx","./src/components/chatinput.tsx","./src/components/chatmessages.tsx","./src/components/errorbanner.tsx","./src/components/interruptprompt.tsx","./src/components/layout.tsx","./src/components/metriccard.tsx","./src/components/navbar.tsx","./src/components/replaytimeline.tsx","./src/hooks/usewebsocket.ts","./src/pages/chatpage.tsx","./src/pages/dashboardpage.tsx","./src/pages/replaylistpage.tsx","./src/pages/replaypage.tsx","./src/pages/reviewpage.tsx"],"version":"5.7.3"}
{"root":["./src/app.tsx","./src/api.ts","./src/main.tsx","./src/types.ts","./src/vite-env.d.ts","./src/components/agentaction.tsx","./src/components/chatinput.tsx","./src/components/chatmessages.tsx","./src/components/errorbanner.tsx","./src/components/interruptprompt.tsx","./src/components/layout.tsx","./src/components/metriccard.tsx","./src/components/navbar.tsx","./src/components/replaytimeline.tsx","./src/hooks/usewebsocket.ts","./src/pages/chatpage.tsx","./src/pages/dashboardpage.tsx","./src/pages/replaylistpage.tsx","./src/pages/replaypage.tsx","./src/pages/reviewpage.tsx"],"version":"5.7.3"}