re-structure

This commit is contained in:
Yaojia Wang
2026-02-01 22:55:31 +01:00
parent 400b12a967
commit b602d0a340
176 changed files with 856 additions and 853 deletions

BIN
.coverage

Binary file not shown.

View File

@@ -324,7 +324,7 @@ Inference API PostgreSQL Training (ACI)
## Python API
```python
from inference.pipeline import InferencePipeline
from backend.pipeline import InferencePipeline
# 初始化
pipeline = InferencePipeline(
@@ -350,8 +350,8 @@ if result.cross_validation:
```
```python
from inference.pipeline.payment_line_parser import PaymentLineParser
from inference.pipeline.customer_number_parser import CustomerNumberParser
from backend.pipeline.payment_line_parser import PaymentLineParser
from backend.pipeline.customer_number_parser import CustomerNumberParser
# Payment Line 解析
parser = PaymentLineParser()

View File

@@ -347,7 +347,7 @@ from sagemaker.pytorch import PyTorchModel
model = PyTorchModel(
model_data="s3://invoice-models/model.tar.gz",
role="arn:aws:iam::123456789012:role/SageMakerRole",
entry_point="inference.py",
entry_point="backend.py",
framework_version="2.0",
py_version="py310"
)

View File

@@ -90,7 +90,7 @@ def main():
print(f"Processing {len(pdf_files)} PDF file(s)")
print(f"Model: {model_path}")
from inference.pipeline import InferencePipeline
from backend.pipeline import InferencePipeline
# Initialize pipeline
pipeline = InferencePipeline(

View File

@@ -120,7 +120,7 @@ def main() -> None:
logger.info("=" * 60)
# Create config
from inference.web.config import AppConfig, ModelConfig, ServerConfig, FileConfig
from backend.web.config import AppConfig, ModelConfig, ServerConfig, FileConfig
config = AppConfig(
model=ModelConfig(
@@ -141,7 +141,7 @@ def main() -> None:
# Create and run app
import uvicorn
from inference.web.app import create_app
from backend.web.app import create_app
app = create_app(config)

View File

@@ -12,8 +12,8 @@ from uuid import UUID
from sqlalchemy import func, text
from sqlmodel import Session, select
from inference.data.database import get_session_context, create_db_and_tables, close_engine
from inference.data.models import ApiKey, AsyncRequest, RateLimitEvent
from backend.data.database import get_session_context, create_db_and_tables, close_engine
from backend.data.models import ApiKey, AsyncRequest, RateLimitEvent
logger = logging.getLogger(__name__)

View File

@@ -264,8 +264,8 @@ def run_migrations() -> None:
def create_db_and_tables() -> None:
"""Create all database tables."""
from inference.data.models import ApiKey, AsyncRequest, RateLimitEvent # noqa: F401
from inference.data.admin_models import ( # noqa: F401
from backend.data.models import ApiKey, AsyncRequest, RateLimitEvent # noqa: F401
from backend.data.admin_models import ( # noqa: F401
AdminToken,
AdminDocument,
AdminAnnotation,

View File

@@ -0,0 +1,26 @@
"""
Repository Pattern Implementation
Provides domain-specific repository classes to replace the monolithic AdminDB.
Each repository handles a single domain following Single Responsibility Principle.
"""
from backend.data.repositories.base import BaseRepository
from backend.data.repositories.token_repository import TokenRepository
from backend.data.repositories.document_repository import DocumentRepository
from backend.data.repositories.annotation_repository import AnnotationRepository
from backend.data.repositories.training_task_repository import TrainingTaskRepository
from backend.data.repositories.dataset_repository import DatasetRepository
from backend.data.repositories.model_version_repository import ModelVersionRepository
from backend.data.repositories.batch_upload_repository import BatchUploadRepository
__all__ = [
"BaseRepository",
"TokenRepository",
"DocumentRepository",
"AnnotationRepository",
"TrainingTaskRepository",
"DatasetRepository",
"ModelVersionRepository",
"BatchUploadRepository",
]

View File

@@ -11,9 +11,9 @@ from uuid import UUID
from sqlmodel import select
from inference.data.database import get_session_context
from inference.data.admin_models import AdminAnnotation, AnnotationHistory
from inference.data.repositories.base import BaseRepository
from backend.data.database import get_session_context
from backend.data.admin_models import AdminAnnotation, AnnotationHistory
from backend.data.repositories.base import BaseRepository
logger = logging.getLogger(__name__)

View File

@@ -13,7 +13,7 @@ from uuid import UUID
from sqlmodel import Session
from inference.data.database import get_session_context
from backend.data.database import get_session_context
logger = logging.getLogger(__name__)

View File

@@ -11,9 +11,9 @@ from uuid import UUID
from sqlalchemy import func
from sqlmodel import select
from inference.data.database import get_session_context
from inference.data.admin_models import BatchUpload, BatchUploadFile
from inference.data.repositories.base import BaseRepository
from backend.data.database import get_session_context
from backend.data.admin_models import BatchUpload, BatchUploadFile
from backend.data.repositories.base import BaseRepository
logger = logging.getLogger(__name__)

View File

@@ -12,9 +12,9 @@ from uuid import UUID
from sqlalchemy import func
from sqlmodel import select
from inference.data.database import get_session_context
from inference.data.admin_models import TrainingDataset, DatasetDocument, TrainingTask
from inference.data.repositories.base import BaseRepository
from backend.data.database import get_session_context
from backend.data.admin_models import TrainingDataset, DatasetDocument, TrainingTask
from backend.data.repositories.base import BaseRepository
logger = logging.getLogger(__name__)

View File

@@ -12,9 +12,9 @@ from uuid import UUID
from sqlalchemy import func
from sqlmodel import select
from inference.data.database import get_session_context
from inference.data.admin_models import AdminDocument, AdminAnnotation
from inference.data.repositories.base import BaseRepository
from backend.data.database import get_session_context
from backend.data.admin_models import AdminDocument, AdminAnnotation
from backend.data.repositories.base import BaseRepository
logger = logging.getLogger(__name__)
@@ -333,7 +333,7 @@ class DocumentRepository(BaseRepository[AdminDocument]):
offset: int = 0,
) -> tuple[list[AdminDocument], int]:
"""Get documents suitable for training with filtering."""
from inference.data.admin_models import TrainingDocumentLink
from backend.data.admin_models import TrainingDocumentLink
with get_session_context() as session:
statement = select(AdminDocument).where(

View File

@@ -12,9 +12,9 @@ from uuid import UUID
from sqlalchemy import func
from sqlmodel import select
from inference.data.database import get_session_context
from inference.data.admin_models import ModelVersion
from inference.data.repositories.base import BaseRepository
from backend.data.database import get_session_context
from backend.data.admin_models import ModelVersion
from backend.data.repositories.base import BaseRepository
logger = logging.getLogger(__name__)

View File

@@ -7,8 +7,8 @@ Handles admin token operations following Single Responsibility Principle.
import logging
from datetime import datetime
from inference.data.admin_models import AdminToken
from inference.data.repositories.base import BaseRepository
from backend.data.admin_models import AdminToken
from backend.data.repositories.base import BaseRepository
logger = logging.getLogger(__name__)

View File

@@ -12,9 +12,9 @@ from uuid import UUID
from sqlalchemy import func
from sqlmodel import select
from inference.data.database import get_session_context
from inference.data.admin_models import TrainingTask, TrainingLog, TrainingDocumentLink
from inference.data.repositories.base import BaseRepository
from backend.data.database import get_session_context
from backend.data.admin_models import TrainingTask, TrainingLog, TrainingDocumentLink
from backend.data.repositories.base import BaseRepository
logger = logging.getLogger(__name__)

View File

@@ -92,7 +92,7 @@ constructors or methods. The values here serve as sensible defaults
based on Swedish invoice processing requirements.
Example:
from inference.pipeline.constants import DEFAULT_CONFIDENCE_THRESHOLD
from backend.pipeline.constants import DEFAULT_CONFIDENCE_THRESHOLD
detector = YOLODetector(
model_path="model.pt",

View File

@@ -0,0 +1,8 @@
"""
Backward compatibility shim for admin_routes.py
DEPRECATED: Import from backend.web.api.v1.admin.documents instead.
"""
from backend.web.api.v1.admin.documents import *
__all__ = ["create_admin_router"]

View File

@@ -0,0 +1,21 @@
"""
Admin API v1
Document management, annotations, and training endpoints.
"""
from backend.web.api.v1.admin.annotations import create_annotation_router
from backend.web.api.v1.admin.augmentation import create_augmentation_router
from backend.web.api.v1.admin.auth import create_auth_router
from backend.web.api.v1.admin.documents import create_documents_router
from backend.web.api.v1.admin.locks import create_locks_router
from backend.web.api.v1.admin.training import create_training_router
__all__ = [
"create_annotation_router",
"create_augmentation_router",
"create_auth_router",
"create_documents_router",
"create_locks_router",
"create_training_router",
]

View File

@@ -13,11 +13,11 @@ from fastapi import APIRouter, Depends, HTTPException, Query
from fastapi.responses import FileResponse, StreamingResponse
from shared.fields import FIELD_CLASSES, FIELD_CLASS_IDS
from inference.data.repositories import DocumentRepository, AnnotationRepository
from inference.web.core.auth import AdminTokenDep
from inference.web.services.autolabel import get_auto_label_service
from inference.web.services.storage_helpers import get_storage_helper
from inference.web.schemas.admin import (
from backend.data.repositories import DocumentRepository, AnnotationRepository
from backend.web.core.auth import AdminTokenDep
from backend.web.services.autolabel import get_auto_label_service
from backend.web.services.storage_helpers import get_storage_helper
from backend.web.schemas.admin import (
AnnotationCreate,
AnnotationItem,
AnnotationListResponse,
@@ -32,7 +32,7 @@ from inference.web.schemas.admin import (
AutoLabelResponse,
BoundingBox,
)
from inference.web.schemas.common import ErrorResponse
from backend.web.schemas.common import ErrorResponse
logger = logging.getLogger(__name__)

View File

@@ -2,8 +2,8 @@
from fastapi import APIRouter, Query
from inference.web.core.auth import AdminTokenDep, DocumentRepoDep, DatasetRepoDep
from inference.web.schemas.admin.augmentation import (
from backend.web.core.auth import AdminTokenDep, DocumentRepoDep, DatasetRepoDep
from backend.web.schemas.admin.augmentation import (
AugmentationBatchRequest,
AugmentationBatchResponse,
AugmentationConfigSchema,
@@ -83,7 +83,7 @@ def register_augmentation_routes(router: APIRouter) -> None:
Returns URLs to original and augmented preview images.
"""
from inference.web.services.augmentation_service import AugmentationService
from backend.web.services.augmentation_service import AugmentationService
service = AugmentationService(doc_repo=docs)
return await service.preview_single(
@@ -106,7 +106,7 @@ def register_augmentation_routes(router: APIRouter) -> None:
page: int = Query(default=1, ge=1, description="Page number"),
) -> AugmentationPreviewResponse:
"""Preview complete augmentation pipeline on a document page."""
from inference.web.services.augmentation_service import AugmentationService
from backend.web.services.augmentation_service import AugmentationService
service = AugmentationService(doc_repo=docs)
return await service.preview_config(
@@ -132,7 +132,7 @@ def register_augmentation_routes(router: APIRouter) -> None:
This runs as a background task. The augmented images are stored
alongside the original dataset for training.
"""
from inference.web.services.augmentation_service import AugmentationService
from backend.web.services.augmentation_service import AugmentationService
service = AugmentationService(doc_repo=docs, dataset_repo=datasets)
return await service.create_augmented_dataset(
@@ -154,7 +154,7 @@ def register_augmentation_routes(router: APIRouter) -> None:
offset: int = Query(default=0, ge=0, description="Offset"),
) -> AugmentedDatasetListResponse:
"""List all augmented datasets."""
from inference.web.services.augmentation_service import AugmentationService
from backend.web.services.augmentation_service import AugmentationService
service = AugmentationService(dataset_repo=datasets)
return await service.list_augmented_datasets(limit=limit, offset=offset)

View File

@@ -10,12 +10,12 @@ from datetime import datetime, timedelta, timezone
from fastapi import APIRouter
from inference.web.core.auth import AdminTokenDep, TokenRepoDep
from inference.web.schemas.admin import (
from backend.web.core.auth import AdminTokenDep, TokenRepoDep
from backend.web.schemas.admin import (
AdminTokenCreate,
AdminTokenResponse,
)
from inference.web.schemas.common import ErrorResponse
from backend.web.schemas.common import ErrorResponse
logger = logging.getLogger(__name__)

View File

@@ -9,22 +9,22 @@ from typing import Annotated
from fastapi import APIRouter, Depends, Query
from inference.web.core.auth import (
from backend.web.core.auth import (
AdminTokenDep,
get_model_version_repository,
get_training_task_repository,
ModelVersionRepoDep,
TrainingTaskRepoDep,
)
from inference.web.schemas.admin import (
from backend.web.schemas.admin import (
DashboardStatsResponse,
ActiveModelResponse,
DashboardActiveModelResponse,
ActiveModelInfo,
RunningTrainingInfo,
RecentActivityResponse,
ActivityItem,
)
from inference.web.services.dashboard_service import (
from backend.web.services.dashboard_service import (
DashboardStatsService,
DashboardActivityService,
)
@@ -59,7 +59,7 @@ def create_dashboard_router() -> APIRouter:
@router.get(
"/active-model",
response_model=ActiveModelResponse,
response_model=DashboardActiveModelResponse,
summary="Get active model info",
description="Returns current active model and running training status.",
)
@@ -67,7 +67,7 @@ def create_dashboard_router() -> APIRouter:
admin_token: AdminTokenDep,
model_repo: ModelVersionRepoDep,
task_repo: TrainingTaskRepoDep,
) -> ActiveModelResponse:
) -> DashboardActiveModelResponse:
"""Get active model and training status."""
# Get active model
active_model = model_repo.get_active()
@@ -98,7 +98,7 @@ def create_dashboard_router() -> APIRouter:
progress=running_task.progress or 0,
)
return ActiveModelResponse(
return DashboardActiveModelResponse(
model=model_info,
running_training=training_info,
)

View File

@@ -11,15 +11,15 @@ from uuid import UUID
from fastapi import APIRouter, File, HTTPException, Query, UploadFile
from inference.web.config import DEFAULT_DPI, StorageConfig
from inference.web.core.auth import (
from backend.web.config import DEFAULT_DPI, StorageConfig
from backend.web.core.auth import (
AdminTokenDep,
DocumentRepoDep,
AnnotationRepoDep,
TrainingTaskRepoDep,
)
from inference.web.services.storage_helpers import get_storage_helper
from inference.web.schemas.admin import (
from backend.web.services.storage_helpers import get_storage_helper
from backend.web.schemas.admin import (
AnnotationItem,
AnnotationSource,
AutoLabelStatus,
@@ -35,7 +35,7 @@ from inference.web.schemas.admin import (
ModelMetrics,
TrainingHistoryItem,
)
from inference.web.schemas.common import ErrorResponse
from backend.web.schemas.common import ErrorResponse
logger = logging.getLogger(__name__)
@@ -167,8 +167,8 @@ def create_documents_router(storage_config: StorageConfig) -> APIRouter:
raise HTTPException(status_code=500, detail="Failed to save file")
# Update file path in database (using storage path for reference)
from inference.data.database import get_session_context
from inference.data.admin_models import AdminDocument
from backend.data.database import get_session_context
from backend.data.admin_models import AdminDocument
with get_session_context() as session:
doc = session.get(AdminDocument, UUID(document_id))
if doc:
@@ -586,7 +586,7 @@ def create_documents_router(storage_config: StorageConfig) -> APIRouter:
# If marking as labeled, save annotations to PostgreSQL DocumentDB
db_save_result = None
if status == "labeled":
from inference.web.services.db_autolabel import save_manual_annotations_to_document_db
from backend.web.services.db_autolabel import save_manual_annotations_to_document_db
# Get all annotations for this document
doc_annotations = annotations.get_for_document(document_id)

View File

@@ -9,12 +9,12 @@ from uuid import UUID
from fastapi import APIRouter, HTTPException, Query
from inference.web.core.auth import AdminTokenDep, DocumentRepoDep
from inference.web.schemas.admin import (
from backend.web.core.auth import AdminTokenDep, DocumentRepoDep
from backend.web.schemas.admin import (
AnnotationLockRequest,
AnnotationLockResponse,
)
from inference.web.schemas.common import ErrorResponse
from backend.web.schemas.common import ErrorResponse
def _validate_uuid(value: str, name: str = "ID") -> None:

View File

@@ -5,7 +5,7 @@ from typing import Annotated
from fastapi import APIRouter, HTTPException, Query
from inference.web.core.auth import (
from backend.web.core.auth import (
AdminTokenDep,
DatasetRepoDep,
DocumentRepoDep,
@@ -13,7 +13,7 @@ from inference.web.core.auth import (
ModelVersionRepoDep,
TrainingTaskRepoDep,
)
from inference.web.schemas.admin import (
from backend.web.schemas.admin import (
DatasetCreateRequest,
DatasetDetailResponse,
DatasetDocumentItem,
@@ -24,7 +24,7 @@ from inference.web.schemas.admin import (
TrainingStatus,
TrainingTaskResponse,
)
from inference.web.services.storage_helpers import get_storage_helper
from backend.web.services.storage_helpers import get_storage_helper
from ._utils import _validate_uuid
@@ -48,7 +48,7 @@ def register_dataset_routes(router: APIRouter) -> None:
annotations: AnnotationRepoDep,
) -> DatasetResponse:
"""Create a training dataset from document IDs."""
from inference.web.services.dataset_builder import DatasetBuilder
from backend.web.services.dataset_builder import DatasetBuilder
# Validate minimum document count for proper train/val/test split
if len(request.document_ids) < 10:

View File

@@ -5,13 +5,13 @@ from typing import Annotated
from fastapi import APIRouter, HTTPException, Query
from inference.web.core.auth import (
from backend.web.core.auth import (
AdminTokenDep,
DocumentRepoDep,
AnnotationRepoDep,
TrainingTaskRepoDep,
)
from inference.web.schemas.admin import (
from backend.web.schemas.admin import (
ModelMetrics,
TrainingDocumentItem,
TrainingDocumentsResponse,
@@ -19,7 +19,7 @@ from inference.web.schemas.admin import (
TrainingModelsResponse,
TrainingStatus,
)
from inference.web.schemas.common import ErrorResponse
from backend.web.schemas.common import ErrorResponse
from ._utils import _validate_uuid

View File

@@ -5,12 +5,12 @@ from datetime import datetime
from fastapi import APIRouter, HTTPException
from inference.web.core.auth import AdminTokenDep, DocumentRepoDep, AnnotationRepoDep
from inference.web.schemas.admin import (
from backend.web.core.auth import AdminTokenDep, DocumentRepoDep, AnnotationRepoDep
from backend.web.schemas.admin import (
ExportRequest,
ExportResponse,
)
from inference.web.schemas.common import ErrorResponse
from backend.web.schemas.common import ErrorResponse
logger = logging.getLogger(__name__)
@@ -35,7 +35,7 @@ def register_export_routes(router: APIRouter) -> None:
annotations: AnnotationRepoDep,
) -> ExportResponse:
"""Export annotations for training."""
from inference.web.services.storage_helpers import get_storage_helper
from backend.web.services.storage_helpers import get_storage_helper
# Get storage helper for reading images and exports directory
storage = get_storage_helper()

View File

@@ -5,8 +5,8 @@ from typing import Annotated
from fastapi import APIRouter, HTTPException, Query, Request
from inference.web.core.auth import AdminTokenDep, ModelVersionRepoDep
from inference.web.schemas.admin import (
from backend.web.core.auth import AdminTokenDep, ModelVersionRepoDep
from backend.web.schemas.admin import (
ModelVersionCreateRequest,
ModelVersionUpdateRequest,
ModelVersionItem,

View File

@@ -5,8 +5,8 @@ from typing import Annotated
from fastapi import APIRouter, HTTPException, Query
from inference.web.core.auth import AdminTokenDep, TrainingTaskRepoDep
from inference.web.schemas.admin import (
from backend.web.core.auth import AdminTokenDep, TrainingTaskRepoDep
from backend.web.schemas.admin import (
TrainingLogItem,
TrainingLogsResponse,
TrainingStatus,
@@ -17,7 +17,7 @@ from inference.web.schemas.admin import (
TrainingTaskResponse,
TrainingType,
)
from inference.web.schemas.common import ErrorResponse
from backend.web.schemas.common import ErrorResponse
from ._utils import _validate_uuid

View File

@@ -14,10 +14,10 @@ from uuid import UUID
from fastapi import APIRouter, Depends, File, HTTPException, UploadFile, Form
from fastapi.responses import JSONResponse
from inference.data.repositories import BatchUploadRepository
from inference.web.core.auth import validate_admin_token
from inference.web.services.batch_upload import BatchUploadService, MAX_COMPRESSED_SIZE, MAX_UNCOMPRESSED_SIZE
from inference.web.workers.batch_queue import BatchTask, get_batch_queue
from backend.data.repositories import BatchUploadRepository
from backend.web.core.auth import validate_admin_token
from backend.web.services.batch_upload import BatchUploadService, MAX_COMPRESSED_SIZE, MAX_UNCOMPRESSED_SIZE
from backend.web.workers.batch_queue import BatchTask, get_batch_queue
logger = logging.getLogger(__name__)

View File

@@ -4,9 +4,9 @@ Public API v1
Customer-facing endpoints for inference, async processing, and labeling.
"""
from inference.web.api.v1.public.inference import create_inference_router
from inference.web.api.v1.public.async_api import create_async_router, set_async_service
from inference.web.api.v1.public.labeling import create_labeling_router
from backend.web.api.v1.public.inference import create_inference_router
from backend.web.api.v1.public.async_api import create_async_router, set_async_service
from backend.web.api.v1.public.labeling import create_labeling_router
__all__ = [
"create_inference_router",

View File

@@ -11,13 +11,13 @@ from uuid import UUID
from fastapi import APIRouter, File, HTTPException, Query, UploadFile
from inference.web.dependencies import (
from backend.web.dependencies import (
ApiKeyDep,
AsyncDBDep,
PollRateLimitDep,
SubmitRateLimitDep,
)
from inference.web.schemas.inference import (
from backend.web.schemas.inference import (
AsyncRequestItem,
AsyncRequestsListResponse,
AsyncResultResponse,
@@ -27,7 +27,7 @@ from inference.web.schemas.inference import (
DetectionResult,
InferenceResult,
)
from inference.web.schemas.common import ErrorResponse
from backend.web.schemas.common import ErrorResponse
def _validate_request_id(request_id: str) -> None:

View File

@@ -15,18 +15,18 @@ from typing import TYPE_CHECKING
from fastapi import APIRouter, File, HTTPException, UploadFile, status
from fastapi.responses import FileResponse
from inference.web.schemas.inference import (
from backend.web.schemas.inference import (
DetectionResult,
HealthResponse,
InferenceResponse,
InferenceResult,
)
from inference.web.schemas.common import ErrorResponse
from inference.web.services.storage_helpers import get_storage_helper
from backend.web.schemas.common import ErrorResponse
from backend.web.services.storage_helpers import get_storage_helper
if TYPE_CHECKING:
from inference.web.services import InferenceService
from inference.web.config import StorageConfig
from backend.web.services import InferenceService
from backend.web.config import StorageConfig
logger = logging.getLogger(__name__)

View File

@@ -13,14 +13,14 @@ from typing import TYPE_CHECKING
from fastapi import APIRouter, Depends, File, Form, HTTPException, UploadFile, status
from inference.data.repositories import DocumentRepository
from inference.web.schemas.labeling import PreLabelResponse
from inference.web.schemas.common import ErrorResponse
from inference.web.services.storage_helpers import get_storage_helper
from backend.data.repositories import DocumentRepository
from backend.web.schemas.labeling import PreLabelResponse
from backend.web.schemas.common import ErrorResponse
from backend.web.services.storage_helpers import get_storage_helper
if TYPE_CHECKING:
from inference.web.services import InferenceService
from inference.web.config import StorageConfig
from backend.web.services import InferenceService
from backend.web.config import StorageConfig
logger = logging.getLogger(__name__)

View File

@@ -17,11 +17,11 @@ from fastapi.staticfiles import StaticFiles
from fastapi.responses import HTMLResponse
from .config import AppConfig, default_config
from inference.web.services import InferenceService
from inference.web.services.storage_helpers import get_storage_helper
from backend.web.services import InferenceService
from backend.web.services.storage_helpers import get_storage_helper
# Public API imports
from inference.web.api.v1.public import (
from backend.web.api.v1.public import (
create_inference_router,
create_async_router,
set_async_service,
@@ -29,14 +29,14 @@ from inference.web.api.v1.public import (
)
# Async processing imports
from inference.data.async_request_db import AsyncRequestDB
from inference.web.workers.async_queue import AsyncTaskQueue
from inference.web.services.async_processing import AsyncProcessingService
from inference.web.dependencies import init_dependencies
from inference.web.core.rate_limiter import RateLimiter
from backend.data.async_request_db import AsyncRequestDB
from backend.web.workers.async_queue import AsyncTaskQueue
from backend.web.services.async_processing import AsyncProcessingService
from backend.web.dependencies import init_dependencies
from backend.web.core.rate_limiter import RateLimiter
# Admin API imports
from inference.web.api.v1.admin import (
from backend.web.api.v1.admin import (
create_annotation_router,
create_augmentation_router,
create_auth_router,
@@ -44,15 +44,15 @@ from inference.web.api.v1.admin import (
create_locks_router,
create_training_router,
)
from inference.web.api.v1.admin.dashboard import create_dashboard_router
from inference.web.core.scheduler import start_scheduler, stop_scheduler
from inference.web.core.autolabel_scheduler import start_autolabel_scheduler, stop_autolabel_scheduler
from backend.web.api.v1.admin.dashboard import create_dashboard_router
from backend.web.core.scheduler import start_scheduler, stop_scheduler
from backend.web.core.autolabel_scheduler import start_autolabel_scheduler, stop_autolabel_scheduler
# Batch upload imports
from inference.web.api.v1.batch.routes import router as batch_upload_router
from inference.web.workers.batch_queue import init_batch_queue, shutdown_batch_queue
from inference.web.services.batch_upload import BatchUploadService
from inference.data.repositories import ModelVersionRepository
from backend.web.api.v1.batch.routes import router as batch_upload_router
from backend.web.workers.batch_queue import init_batch_queue, shutdown_batch_queue
from backend.web.services.batch_upload import BatchUploadService
from backend.data.repositories import ModelVersionRepository
if TYPE_CHECKING:
from collections.abc import AsyncGenerator
@@ -125,7 +125,7 @@ def create_app(config: AppConfig | None = None) -> FastAPI:
# Initialize admin database tables (admin_tokens, admin_documents, training_tasks, etc.)
try:
from inference.data.database import create_db_and_tables
from backend.data.database import create_db_and_tables
create_db_and_tables()
logger.info("Admin database tables ready")
except Exception as e:

View File

@@ -4,7 +4,7 @@ Core Components
Reusable core functionality: authentication, rate limiting, scheduling.
"""
from inference.web.core.auth import (
from backend.web.core.auth import (
validate_admin_token,
get_token_repository,
get_document_repository,
@@ -22,14 +22,14 @@ from inference.web.core.auth import (
ModelVersionRepoDep,
BatchUploadRepoDep,
)
from inference.web.core.rate_limiter import RateLimiter
from inference.web.core.scheduler import start_scheduler, stop_scheduler, get_training_scheduler
from inference.web.core.autolabel_scheduler import (
from backend.web.core.rate_limiter import RateLimiter
from backend.web.core.scheduler import start_scheduler, stop_scheduler, get_training_scheduler
from backend.web.core.autolabel_scheduler import (
start_autolabel_scheduler,
stop_autolabel_scheduler,
get_autolabel_scheduler,
)
from inference.web.core.task_interface import TaskRunner, TaskStatus, TaskManager
from backend.web.core.task_interface import TaskRunner, TaskStatus, TaskManager
__all__ = [
"validate_admin_token",

View File

@@ -9,7 +9,7 @@ from typing import Annotated
from fastapi import Depends, Header, HTTPException
from inference.data.repositories import (
from backend.data.repositories import (
TokenRepository,
DocumentRepository,
AnnotationRepository,

View File

@@ -8,13 +8,13 @@ import logging
import threading
from pathlib import Path
from inference.data.repositories import DocumentRepository, AnnotationRepository
from inference.web.core.task_interface import TaskRunner, TaskStatus
from inference.web.services.db_autolabel import (
from backend.data.repositories import DocumentRepository, AnnotationRepository
from backend.web.core.task_interface import TaskRunner, TaskStatus
from backend.web.services.db_autolabel import (
get_pending_autolabel_documents,
process_document_autolabel,
)
from inference.web.services.storage_helpers import get_storage_helper
from backend.web.services.storage_helpers import get_storage_helper
logger = logging.getLogger(__name__)

View File

@@ -13,7 +13,7 @@ from threading import Lock
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from inference.data.async_request_db import AsyncRequestDB
from backend.data.async_request_db import AsyncRequestDB
logger = logging.getLogger(__name__)

View File

@@ -10,15 +10,15 @@ from datetime import datetime
from pathlib import Path
from typing import Any
from inference.data.repositories import (
from backend.data.repositories import (
TrainingTaskRepository,
DatasetRepository,
ModelVersionRepository,
DocumentRepository,
AnnotationRepository,
)
from inference.web.core.task_interface import TaskRunner, TaskStatus
from inference.web.services.storage_helpers import get_storage_helper
from backend.web.core.task_interface import TaskRunner, TaskStatus
from backend.web.services.storage_helpers import get_storage_helper
logger = logging.getLogger(__name__)
@@ -360,7 +360,7 @@ class TrainingScheduler(TaskRunner):
"""Export training data for a task."""
from pathlib import Path
from shared.fields import FIELD_CLASSES
from inference.web.services.storage_helpers import get_storage_helper
from backend.web.services.storage_helpers import get_storage_helper
# Get storage helper for reading images
storage = get_storage_helper()

View File

@@ -9,8 +9,8 @@ from typing import Annotated
from fastapi import Depends, Header, HTTPException, Request
from inference.data.async_request_db import AsyncRequestDB
from inference.web.rate_limiter import RateLimiter
from backend.data.async_request_db import AsyncRequestDB
from backend.web.rate_limiter import RateLimiter
logger = logging.getLogger(__name__)

View File

@@ -13,7 +13,7 @@ from threading import Lock
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from inference.data.async_request_db import AsyncRequestDB
from backend.data.async_request_db import AsyncRequestDB
logger = logging.getLogger(__name__)

View File

@@ -0,0 +1,11 @@
"""
API Schemas
Pydantic models for request/response validation.
"""
# Import everything from sub-modules for backward compatibility
from backend.web.schemas.common import * # noqa: F401, F403
from backend.web.schemas.admin import * # noqa: F401, F403
from backend.web.schemas.inference import * # noqa: F401, F403
from backend.web.schemas.labeling import * # noqa: F401, F403

View File

@@ -59,8 +59,8 @@ class RunningTrainingInfo(BaseModel):
progress: int = Field(0, description="Training progress percentage")
class ActiveModelResponse(BaseModel):
"""Response for active model endpoint."""
class DashboardActiveModelResponse(BaseModel):
"""Response for dashboard active model endpoint."""
model: ActiveModelInfo | None = Field(
None, description="Active model info, null if none"

View File

@@ -0,0 +1,18 @@
"""
Business Logic Services
Service layer for processing requests and orchestrating data operations.
"""
from backend.web.services.autolabel import AutoLabelService, get_auto_label_service
from backend.web.services.inference import InferenceService
from backend.web.services.async_processing import AsyncProcessingService
from backend.web.services.batch_upload import BatchUploadService
__all__ = [
"AutoLabelService",
"get_auto_label_service",
"InferenceService",
"AsyncProcessingService",
"BatchUploadService",
]

View File

@@ -15,14 +15,14 @@ from pathlib import Path
from threading import Event, Thread
from typing import TYPE_CHECKING
from inference.data.async_request_db import AsyncRequestDB
from inference.web.workers.async_queue import AsyncTask, AsyncTaskQueue
from inference.web.core.rate_limiter import RateLimiter
from inference.web.services.storage_helpers import get_storage_helper
from backend.data.async_request_db import AsyncRequestDB
from backend.web.workers.async_queue import AsyncTask, AsyncTaskQueue
from backend.web.core.rate_limiter import RateLimiter
from backend.web.services.storage_helpers import get_storage_helper
if TYPE_CHECKING:
from inference.web.config import AsyncConfig, StorageConfig
from inference.web.services.inference import InferenceService
from backend.web.config import AsyncConfig, StorageConfig
from backend.web.services.inference import InferenceService
logger = logging.getLogger(__name__)

View File

@@ -11,8 +11,8 @@ import numpy as np
from fastapi import HTTPException
from PIL import Image
from inference.data.repositories import DocumentRepository, DatasetRepository
from inference.web.schemas.admin.augmentation import (
from backend.data.repositories import DocumentRepository, DatasetRepository
from backend.web.schemas.admin.augmentation import (
AugmentationBatchResponse,
AugmentationConfigSchema,
AugmentationPreviewResponse,
@@ -283,7 +283,7 @@ class AugmentationService:
images_dir = Path(document.images_dir)
else:
# Fallback to constructed path
from inference.web.core.config import get_settings
from backend.web.core.config import get_settings
settings = get_settings()
images_dir = Path(settings.admin_storage_path) / "documents" / document_id / "images"

View File

@@ -12,7 +12,7 @@ import numpy as np
from PIL import Image
from shared.config import DEFAULT_DPI
from inference.data.repositories import DocumentRepository, AnnotationRepository
from backend.data.repositories import DocumentRepository, AnnotationRepository
from shared.fields import FIELD_CLASS_IDS, FIELD_CLASSES
from shared.matcher.field_matcher import FieldMatcher
from shared.ocr.paddle_ocr import OCREngine, OCRToken

View File

@@ -15,7 +15,7 @@ from uuid import UUID
from pydantic import BaseModel, Field, field_validator
from inference.data.repositories import BatchUploadRepository
from backend.data.repositories import BatchUploadRepository
from shared.fields import CSV_TO_CLASS_MAPPING
logger = logging.getLogger(__name__)

View File

@@ -12,8 +12,8 @@ from uuid import UUID
from sqlalchemy import func, exists, and_, or_
from sqlmodel import select
from inference.data.database import get_session_context
from inference.data.admin_models import (
from backend.data.database import get_session_context
from backend.data.admin_models import (
AdminDocument,
AdminAnnotation,
AnnotationHistory,

View File

@@ -13,10 +13,10 @@ from typing import Any
from shared.config import DEFAULT_DPI
from shared.fields import CSV_TO_CLASS_MAPPING
from inference.data.admin_models import AdminDocument
from inference.data.repositories import DocumentRepository, AnnotationRepository
from backend.data.admin_models import AdminDocument
from backend.data.repositories import DocumentRepository, AnnotationRepository
from shared.data.db import DocumentDB
from inference.web.services.storage_helpers import get_storage_helper
from backend.web.services.storage_helpers import get_storage_helper
logger = logging.getLogger(__name__)
@@ -80,8 +80,8 @@ def get_pending_autolabel_documents(
List of AdminDocument records with status='auto_labeling' and auto_label_status='pending'
"""
from sqlmodel import select
from inference.data.database import get_session_context
from inference.data.admin_models import AdminDocument
from backend.data.database import get_session_context
from backend.data.admin_models import AdminDocument
with get_session_context() as session:
statement = select(AdminDocument).where(
@@ -260,7 +260,7 @@ def _save_annotations_to_db(
Number of annotations saved
"""
from shared.fields import FIELD_CLASS_IDS
from inference.web.services.storage_helpers import get_storage_helper
from backend.web.services.storage_helpers import get_storage_helper
# Mapping from CSV field names to internal field names
CSV_TO_INTERNAL_FIELD: dict[str, str] = {

Some files were not shown because too many files have changed in this diff Show More