Files
project-lyra/cortex/autonomy/executive/planner.py
2025-12-14 01:44:05 -05:00

122 lines
3.5 KiB
Python

"""
Executive planner - generates execution plans for complex requests.
Activated when inner monologue sets consult_executive=true.
"""
import os
import logging
from typing import Dict, Any, Optional
from llm.llm_router import call_llm
EXECUTIVE_LLM = os.getenv("EXECUTIVE_LLM", "CLOUD").upper()
VERBOSE_DEBUG = os.getenv("VERBOSE_DEBUG", "false").lower() == "true"
logger = logging.getLogger(__name__)
if VERBOSE_DEBUG:
logger.setLevel(logging.DEBUG)
EXECUTIVE_SYSTEM_PROMPT = """
You are Lyra's executive planning system.
You create structured execution plans for complex tasks.
You do NOT generate the final response - only the plan.
Your plan should include:
1. Task decomposition (break into steps)
2. Required tools/resources
3. Reasoning strategy
4. Success criteria
Return a concise plan in natural language.
"""
async def plan_execution(
user_prompt: str,
intent: str,
context_state: Dict[str, Any],
identity_block: Dict[str, Any]
) -> Dict[str, Any]:
"""
Generate execution plan for complex request.
Args:
user_prompt: User's message
intent: Detected intent from inner monologue
context_state: Full context
identity_block: Lyra's identity
Returns:
Plan dictionary with structure:
{
"summary": "One-line plan summary",
"plan_text": "Detailed plan",
"steps": ["step1", "step2", ...],
"tools_needed": ["RAG", "WEB", ...],
"estimated_complexity": "low | medium | high"
}
"""
# Build planning prompt
tools_available = context_state.get("tools_available", [])
prompt = f"""{EXECUTIVE_SYSTEM_PROMPT}
User request: {user_prompt}
Detected intent: {intent}
Available tools: {", ".join(tools_available) if tools_available else "None"}
Session context:
- Message count: {context_state.get('message_count', 0)}
- Time since last message: {context_state.get('minutes_since_last_msg', 0):.1f} minutes
- Active project: {context_state.get('active_project', 'None')}
Generate a structured execution plan.
"""
if VERBOSE_DEBUG:
logger.debug(f"[EXECUTIVE] Planning prompt:\n{prompt}")
# Call executive LLM
plan_text = await call_llm(
prompt,
backend=EXECUTIVE_LLM,
temperature=0.3, # Lower temperature for planning
max_tokens=500
)
if VERBOSE_DEBUG:
logger.debug(f"[EXECUTIVE] Generated plan:\n{plan_text}")
# Parse plan (simple heuristic extraction for Phase 1)
steps = []
tools_needed = []
for line in plan_text.split('\n'):
line_lower = line.lower()
if any(marker in line_lower for marker in ['step', '1.', '2.', '3.', '-']):
steps.append(line.strip())
if tools_available:
for tool in tools_available:
if tool.lower() in line_lower and tool not in tools_needed:
tools_needed.append(tool)
# Estimate complexity (simple heuristic)
complexity = "low"
if len(steps) > 3 or len(tools_needed) > 1:
complexity = "medium"
if len(steps) > 5 or "research" in intent.lower() or "analyze" in intent.lower():
complexity = "high"
return {
"summary": plan_text.split('\n')[0][:100] if plan_text else "Complex task execution plan",
"plan_text": plan_text,
"steps": steps[:10], # Limit to 10 steps
"tools_needed": tools_needed,
"estimated_complexity": complexity
}