56 lines
1.5 KiB
Python
56 lines
1.5 KiB
Python
"""LLM provider factory with prompt caching support."""
|
|
|
|
from __future__ import annotations
|
|
|
|
from typing import TYPE_CHECKING
|
|
|
|
if TYPE_CHECKING:
|
|
from langchain_core.language_models import BaseChatModel
|
|
|
|
from app.config import Settings
|
|
|
|
|
|
def create_llm(settings: Settings) -> BaseChatModel:
|
|
"""Create an LLM instance based on the configured provider."""
|
|
provider = settings.llm_provider
|
|
model = settings.llm_model
|
|
|
|
if provider == "anthropic":
|
|
from langchain_anthropic import ChatAnthropic
|
|
|
|
return ChatAnthropic(
|
|
model=model,
|
|
api_key=settings.anthropic_api_key,
|
|
)
|
|
|
|
if provider == "openai":
|
|
from langchain_openai import ChatOpenAI
|
|
|
|
return ChatOpenAI(
|
|
model=model,
|
|
api_key=settings.openai_api_key,
|
|
)
|
|
|
|
if provider == "azure_openai":
|
|
from langchain_openai import AzureChatOpenAI
|
|
|
|
return AzureChatOpenAI(
|
|
azure_deployment=settings.azure_openai_deployment,
|
|
azure_endpoint=settings.azure_openai_endpoint,
|
|
api_key=settings.azure_openai_api_key,
|
|
api_version=settings.azure_openai_api_version,
|
|
)
|
|
|
|
if provider == "google":
|
|
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
|
|
return ChatGoogleGenerativeAI(
|
|
model=model,
|
|
google_api_key=settings.google_api_key,
|
|
)
|
|
|
|
raise ValueError(
|
|
f"Unknown LLM provider: '{provider}'. "
|
|
"Use 'anthropic', 'openai', 'azure_openai', or 'google'."
|
|
)
|