"""LLM provider factory with prompt caching support.""" from __future__ import annotations from typing import TYPE_CHECKING if TYPE_CHECKING: from langchain_core.language_models import BaseChatModel from app.config import Settings def create_llm(settings: Settings) -> BaseChatModel: """Create an LLM instance based on the configured provider.""" provider = settings.llm_provider model = settings.llm_model if provider == "anthropic": from langchain_anthropic import ChatAnthropic return ChatAnthropic( model=model, api_key=settings.anthropic_api_key, ) if provider == "openai": from langchain_openai import ChatOpenAI return ChatOpenAI( model=model, api_key=settings.openai_api_key, ) if provider == "google": from langchain_google_genai import ChatGoogleGenerativeAI return ChatGoogleGenerativeAI( model=model, google_api_key=settings.google_api_key, ) raise ValueError(f"Unknown LLM provider: '{provider}'. Use 'anthropic', 'openai', or 'google'.")