Backend (516 tests, 94% coverage): - Add azure_openai endpoint/deployment validation tests (config.py -> 100%) - Add _total_conversations and _avg_turns direct tests (queries.py -> 100%) - Add transformer edge cases: list content, string checkpoint, invalid JSON, malformed message graceful skip (transformer.py -> 93%) - Add safety combined status_code+error_message interaction tests - Fix ambiguous 200/422 assertion to strict 422 - Add E2E pagination shape assertions (total, page, per_page, row count) - Fix ReplayPool mock to respect LIMIT/OFFSET params Frontend (23 tests, vitest + happy-dom + @testing-library/react): - Add vitest infrastructure with happy-dom environment - Add api.ts tests: success, HTTP error, success=false, URL encoding - Add DashboardPage tests: loading, data, error, empty states - Add ReplayListPage tests: loading, empty, data, error, status badge classes - Add ReplayPage tests: loading, steps, empty, error states
86 lines
2.6 KiB
TypeScript
86 lines
2.6 KiB
TypeScript
import { describe, it, expect, vi, beforeEach } from "vitest";
|
|
import { render, screen, waitFor } from "@testing-library/react";
|
|
import { MemoryRouter, Route, Routes } from "react-router-dom";
|
|
import { ReplayPage } from "./ReplayPage";
|
|
|
|
vi.mock("../api", () => ({
|
|
fetchReplay: vi.fn(),
|
|
}));
|
|
|
|
vi.mock("../components/ReplayTimeline", () => ({
|
|
ReplayTimeline: ({ steps }: { steps: unknown[] }) => (
|
|
<div data-testid="replay-timeline">{steps.length} steps</div>
|
|
),
|
|
}));
|
|
|
|
import { fetchReplay } from "../api";
|
|
const mockFetchReplay = vi.mocked(fetchReplay);
|
|
|
|
beforeEach(() => {
|
|
mockFetchReplay.mockReset();
|
|
});
|
|
|
|
function renderWithRoute(threadId: string) {
|
|
return render(
|
|
<MemoryRouter initialEntries={[`/replay/${threadId}`]}>
|
|
<Routes>
|
|
<Route path="/replay/:threadId" element={<ReplayPage />} />
|
|
</Routes>
|
|
</MemoryRouter>
|
|
);
|
|
}
|
|
|
|
describe("ReplayPage", () => {
|
|
it("renders loading state initially", () => {
|
|
mockFetchReplay.mockReturnValue(new Promise(() => {}));
|
|
renderWithRoute("t1");
|
|
expect(document.querySelector(".skeleton-box")).toBeTruthy();
|
|
});
|
|
|
|
it("renders replay steps on success", async () => {
|
|
mockFetchReplay.mockResolvedValue({
|
|
thread_id: "t1",
|
|
total_steps: 2,
|
|
page: 1,
|
|
per_page: 100,
|
|
steps: [
|
|
{ step: 1, type: "message", content: "Hello", agent: null, tool: null, params: null, result: null, timestamp: "2026-04-01T00:00:00Z" },
|
|
{ step: 2, type: "response", content: "Hi!", agent: "bot", tool: null, params: null, result: null, timestamp: "2026-04-01T00:00:01Z" },
|
|
],
|
|
});
|
|
renderWithRoute("t1");
|
|
|
|
await waitFor(() => {
|
|
expect(screen.getByTestId("replay-timeline")).toBeInTheDocument();
|
|
});
|
|
expect(screen.getByText("2 steps")).toBeInTheDocument();
|
|
// Thread ID appears in multiple places (header + sidebar)
|
|
expect(screen.getAllByText("t1").length).toBeGreaterThan(0);
|
|
});
|
|
|
|
it("renders empty state when no steps", async () => {
|
|
mockFetchReplay.mockResolvedValue({
|
|
thread_id: "t1",
|
|
total_steps: 0,
|
|
page: 1,
|
|
per_page: 100,
|
|
steps: [],
|
|
});
|
|
renderWithRoute("t1");
|
|
|
|
await waitFor(() => {
|
|
expect(screen.getByText("No replay steps found")).toBeInTheDocument();
|
|
});
|
|
});
|
|
|
|
it("renders error state on fetch failure", async () => {
|
|
mockFetchReplay.mockRejectedValue(new Error("Not found"));
|
|
renderWithRoute("t1");
|
|
|
|
await waitFor(() => {
|
|
expect(screen.getByText("Failed to load replay")).toBeInTheDocument();
|
|
});
|
|
expect(screen.getByText("Not found")).toBeInTheDocument();
|
|
});
|
|
});
|