- Intent classification with LLM structured output (single/multi/ambiguous) - Discount agent with apply_discount and generate_coupon tools - Interrupt manager with 30-min TTL auto-expiration and retry prompts - Webhook escalation module with exponential backoff retry (max 3) - Three vertical industry templates (e-commerce, SaaS, fintech) - Template loading in AgentRegistry - Enhanced supervisor prompt with dynamic agent descriptions - 153 tests passing, 90.18% coverage
119 lines
3.8 KiB
Python
119 lines
3.8 KiB
Python
"""Intent classification using LLM structured output."""
|
|
|
|
from __future__ import annotations
|
|
|
|
import logging
|
|
from typing import TYPE_CHECKING, Protocol
|
|
|
|
from pydantic import BaseModel
|
|
|
|
if TYPE_CHECKING:
|
|
from langchain_core.language_models import BaseChatModel
|
|
|
|
from app.registry import AgentConfig
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
CLASSIFICATION_PROMPT = (
|
|
"You are an intent classifier for a customer support system.\n"
|
|
"Given a user message, determine which agent(s) should handle it.\n\n"
|
|
"Available agents:\n{agent_list}\n\n"
|
|
"Rules:\n"
|
|
"- If the message clearly maps to one agent, return a single intent.\n"
|
|
"- If the message contains multiple distinct requests, return multiple intents "
|
|
"in execution order.\n"
|
|
"- If the message is vague or doesn't match any agent, set is_ambiguous=True "
|
|
"and provide a clarification_question.\n"
|
|
"- Never route to the fallback agent unless truly ambiguous.\n"
|
|
"- confidence should be between 0.0 and 1.0.\n"
|
|
)
|
|
|
|
AMBIGUITY_THRESHOLD = 0.5
|
|
|
|
|
|
class IntentTarget(BaseModel, frozen=True):
|
|
"""A single classified intent targeting a specific agent."""
|
|
|
|
agent_name: str
|
|
confidence: float
|
|
reasoning: str
|
|
|
|
|
|
class ClassificationResult(BaseModel, frozen=True):
|
|
"""Result of intent classification -- may contain multiple intents."""
|
|
|
|
intents: tuple[IntentTarget, ...]
|
|
is_ambiguous: bool = False
|
|
clarification_question: str | None = None
|
|
|
|
|
|
class IntentClassifier(Protocol):
|
|
"""Protocol for intent classification implementations."""
|
|
|
|
async def classify(
|
|
self,
|
|
message: str,
|
|
available_agents: tuple[AgentConfig, ...],
|
|
) -> ClassificationResult: ...
|
|
|
|
|
|
def _build_agent_list(agents: tuple[AgentConfig, ...]) -> str:
|
|
"""Format agent descriptions for the classification prompt."""
|
|
lines = []
|
|
for agent in agents:
|
|
lines.append(f"- {agent.name}: {agent.description} (permission: {agent.permission})")
|
|
return "\n".join(lines)
|
|
|
|
|
|
class LLMIntentClassifier:
|
|
"""Classifies user intent using LLM structured output."""
|
|
|
|
def __init__(self, llm: BaseChatModel) -> None:
|
|
self._llm = llm
|
|
|
|
async def classify(
|
|
self,
|
|
message: str,
|
|
available_agents: tuple[AgentConfig, ...],
|
|
) -> ClassificationResult:
|
|
"""Classify user message into one or more agent intents."""
|
|
agent_list = _build_agent_list(available_agents)
|
|
system_prompt = CLASSIFICATION_PROMPT.format(agent_list=agent_list)
|
|
|
|
structured_llm = self._llm.with_structured_output(ClassificationResult)
|
|
|
|
try:
|
|
result = await structured_llm.ainvoke(
|
|
[
|
|
{"role": "system", "content": system_prompt},
|
|
{"role": "user", "content": message},
|
|
]
|
|
)
|
|
except Exception:
|
|
logger.exception("Intent classification failed, returning ambiguous")
|
|
return ClassificationResult(
|
|
intents=(),
|
|
is_ambiguous=True,
|
|
clarification_question="I'm not sure I understood. Could you please rephrase?",
|
|
)
|
|
|
|
if not isinstance(result, ClassificationResult):
|
|
return ClassificationResult(
|
|
intents=(),
|
|
is_ambiguous=True,
|
|
clarification_question="I'm not sure I understood. Could you please rephrase?",
|
|
)
|
|
|
|
# Apply ambiguity threshold
|
|
if result.intents and all(i.confidence < AMBIGUITY_THRESHOLD for i in result.intents):
|
|
return ClassificationResult(
|
|
intents=result.intents,
|
|
is_ambiguous=True,
|
|
clarification_question=(
|
|
result.clarification_question
|
|
or "I'm not sure I understood. Could you please rephrase?"
|
|
),
|
|
)
|
|
|
|
return result
|