Proven (Updated October 5, 2025 - GREAT-4A validation complete)
When users interact with Piper Morgan through natural language, their intent needs to be classified to route to appropriate handlers. This pattern transforms ambiguous natural language into actionable intent types.
class IntentType(Enum):
CREATE_ISSUE = "create_issue" # User wants to create a new GitHub issue
REVIEW_ISSUE = "review_issue" # User wants to review/update existing issue
QUERY_KNOWLEDGE = "query_knowledge" # User asking about project knowledge
ANALYZE_DATA = "analyze_data" # User requesting data analysis
CLARIFY = "clarify" # User needs clarification/multi-turn
CHAT = "chat" # General conversation
class IntentClassifier:
"""
Map natural language to intent types using pattern matching
initially, evolving to ML-based classification.
"""
def classify(self, user_input: str) -> IntentType:
"""
Phase 1: Keyword-based classification
Phase 2: LLM-based classification
Phase 3: Fine-tuned model
"""
input_lower = user_input.lower()
# Phase 1 Implementation (Current)
if any(word in input_lower for word in ['create', 'new', 'add', 'issue', 'ticket']):
return IntentType.CREATE_ISSUE
elif any(word in input_lower for word in ['review', 'update', 'check', 'status']):
return IntentType.REVIEW_ISSUE
elif any(word in input_lower for word in ['what', 'how', 'explain', 'tell me']):
return IntentType.QUERY_KNOWLEDGE
elif any(word in input_lower for word in ['analyze', 'trends', 'metrics', 'data']):
return IntentType.ANALYZE_DATA
elif '?' in user_input and len(user_input.split()) < 5:
return IntentType.CLARIFY
else:
return IntentType.CHAT
# In services/queries/router.py
async def route_query(self, query: QueryModel) -> QueryResponse:
# First classify intent
intent = self.intent_classifier.classify(query.text)
# Then route to appropriate handler
if intent == IntentType.CREATE_ISSUE:
return await self.github_service.create_issue_workflow(query)
elif intent == IntentType.QUERY_KNOWLEDGE:
return await self.knowledge_service.query(query)
# ... etc
The intent classification system now uses a two-stage approach:
# services/intent_service/classifier.py
class IntentClassifier:
async def classify(self, message: str) -> Intent:
# Stage 1: Pre-classification (regex patterns)
pre_intent = PreClassifier.pre_classify(message)
if pre_intent:
return pre_intent
# Stage 2: LLM classification (fallback)
return await self._classify_with_reasoning(message)
# services/intent_service/canonical_handlers.py
class CanonicalHandlers:
async def handle(self, intent: Intent, session_id: str) -> Dict:
if intent.category == IntentCategory.TEMPORAL:
return await self._handle_temporal_query(intent, session_id)
elif intent.category == IntentCategory.STATUS:
return await self._handle_status_query(intent, session_id)
elif intent.category == IntentCategory.PRIORITY:
return await self._handle_priority_query(intent, session_id)
services/intent_service/ (complete)dev/2025/10/05/intent-baseline-metrics.md