import os from dotenv import load_dotenv from langchain_openai import AzureChatOpenAI, ChatOpenAI from .callbacks import TokenUsageCallbackHandler load_dotenv() LLM_PROVIDER = os.getenv("LLM_PROVIDER", "openai").lower() llm = None print(f"--- [Core] Initializing LLM with provider: {LLM_PROVIDER} ---") token_callback = TokenUsageCallbackHandler() if LLM_PROVIDER == "azure": llm = AzureChatOpenAI( azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"), api_key=os.getenv("AZURE_OPENAI_API_KEY"), api_version=os.getenv("OPENAI_API_VERSION"), azure_deployment=os.getenv("AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"), temperature=0, callbacks=[token_callback] ) elif LLM_PROVIDER == "openai": llm = ChatOpenAI( api_key=os.getenv("OPENAI_API_KEY"), model_name=os.getenv("OPENAI_MODEL_NAME", "gpt-4o"), temperature=0, callbacks=[token_callback] ) else: raise ValueError(f"Unsupported LLM_PROVIDER: {LLM_PROVIDER}. Please use 'azure' or 'openai'.")