Backend (516 tests, 94% coverage): - Add azure_openai endpoint/deployment validation tests (config.py -> 100%) - Add _total_conversations and _avg_turns direct tests (queries.py -> 100%) - Add transformer edge cases: list content, string checkpoint, invalid JSON, malformed message graceful skip (transformer.py -> 93%) - Add safety combined status_code+error_message interaction tests - Fix ambiguous 200/422 assertion to strict 422 - Add E2E pagination shape assertions (total, page, per_page, row count) - Fix ReplayPool mock to respect LIMIT/OFFSET params Frontend (23 tests, vitest + happy-dom + @testing-library/react): - Add vitest infrastructure with happy-dom environment - Add api.ts tests: success, HTTP error, success=false, URL encoding - Add DashboardPage tests: loading, data, error, empty states - Add ReplayListPage tests: loading, empty, data, error, status badge classes - Add ReplayPage tests: loading, steps, empty, error states
110 lines
4.0 KiB
Python
110 lines
4.0 KiB
Python
"""Tests for app.config module."""
|
|
|
|
from __future__ import annotations
|
|
|
|
import pytest
|
|
|
|
from app.config import Settings
|
|
|
|
|
|
def _isolated_settings(**kwargs: object) -> Settings:
|
|
"""Create a Settings instance that ignores .env files and process env vars.
|
|
|
|
pydantic-settings reads from env_file and environment by default, which
|
|
causes test results to depend on the machine they run on. We override
|
|
model_config at the class level temporarily so that every test gets
|
|
deterministic results.
|
|
"""
|
|
# Build a throwaway subclass that disables env-file and env-var loading.
|
|
class _IsolatedSettings(Settings):
|
|
model_config = Settings.model_config.copy()
|
|
model_config["env_file"] = None # type: ignore[assignment]
|
|
model_config["env_ignore_empty"] = True
|
|
|
|
# _env_parse_none_str makes pydantic-settings treat missing env vars as
|
|
# absent rather than empty-string, so required fields will raise.
|
|
import os
|
|
|
|
env_backup = os.environ.copy()
|
|
# Strip all env vars that Settings knows about so they can't leak in.
|
|
settings_fields = set(Settings.model_fields)
|
|
for key in list(os.environ):
|
|
if key.lower() in settings_fields:
|
|
del os.environ[key]
|
|
try:
|
|
return _IsolatedSettings(**kwargs) # type: ignore[return-value]
|
|
finally:
|
|
os.environ.clear()
|
|
os.environ.update(env_backup)
|
|
|
|
|
|
@pytest.mark.unit
|
|
class TestSettings:
|
|
def test_default_values(self) -> None:
|
|
settings = _isolated_settings(
|
|
database_url="postgresql://x:x@localhost/db",
|
|
anthropic_api_key="key",
|
|
)
|
|
assert settings.llm_provider == "anthropic"
|
|
assert settings.llm_model == "claude-sonnet-4-6"
|
|
assert settings.session_ttl_minutes == 30
|
|
assert settings.interrupt_ttl_minutes == 30
|
|
|
|
def test_custom_values(self) -> None:
|
|
settings = _isolated_settings(
|
|
database_url="postgresql://x:x@localhost/db",
|
|
llm_provider="openai",
|
|
llm_model="gpt-4o",
|
|
session_ttl_minutes=15,
|
|
openai_api_key="sk-test",
|
|
)
|
|
assert settings.llm_provider == "openai"
|
|
assert settings.llm_model == "gpt-4o"
|
|
assert settings.session_ttl_minutes == 15
|
|
|
|
def test_invalid_provider_rejected(self) -> None:
|
|
with pytest.raises(Exception):
|
|
_isolated_settings(
|
|
database_url="postgresql://x:x@localhost/db",
|
|
llm_provider="invalid",
|
|
)
|
|
|
|
def test_missing_database_url_rejected(self) -> None:
|
|
with pytest.raises(Exception):
|
|
_isolated_settings(anthropic_api_key="key")
|
|
|
|
def test_empty_api_key_for_provider_rejected(self) -> None:
|
|
with pytest.raises(ValueError, match="API key"):
|
|
_isolated_settings(
|
|
database_url="postgresql://x:x@localhost/db",
|
|
llm_provider="anthropic",
|
|
anthropic_api_key="",
|
|
)
|
|
|
|
def test_wrong_provider_key_rejected(self) -> None:
|
|
with pytest.raises(ValueError, match="API key"):
|
|
_isolated_settings(
|
|
database_url="postgresql://x:x@localhost/db",
|
|
llm_provider="openai",
|
|
anthropic_api_key="key",
|
|
openai_api_key="",
|
|
)
|
|
|
|
def test_azure_openai_missing_endpoint_rejected(self) -> None:
|
|
with pytest.raises(ValueError, match="AZURE_OPENAI_ENDPOINT"):
|
|
_isolated_settings(
|
|
database_url="postgresql://x:x@localhost/db",
|
|
llm_provider="azure_openai",
|
|
azure_openai_api_key="key",
|
|
azure_openai_deployment="my-deploy",
|
|
)
|
|
|
|
def test_azure_openai_missing_deployment_rejected(self) -> None:
|
|
with pytest.raises(ValueError, match="AZURE_OPENAI_DEPLOYMENT"):
|
|
_isolated_settings(
|
|
database_url="postgresql://x:x@localhost/db",
|
|
llm_provider="azure_openai",
|
|
azure_openai_api_key="key",
|
|
azure_openai_endpoint="https://example.openai.azure.com",
|
|
)
|