Files
project-lyra/cortex/router.py
2025-12-14 14:43:08 -05:00

376 lines
14 KiB
Python

# router.py
import os
import logging
from fastapi import APIRouter
from pydantic import BaseModel
from reasoning.reasoning import reason_check
from reasoning.reflection import reflect_notes
from reasoning.refine import refine_answer
from persona.speak import speak
from persona.identity import load_identity
from context import collect_context, update_last_assistant_message
from intake.intake import add_exchange_internal
from autonomy.monologue.monologue import InnerMonologue
from autonomy.self.state import load_self_state
# -------------------------------------------------------------------
# Setup
# -------------------------------------------------------------------
VERBOSE_DEBUG = os.getenv("VERBOSE_DEBUG", "false").lower() == "true"
logger = logging.getLogger(__name__)
if VERBOSE_DEBUG:
logger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
console_handler.setFormatter(logging.Formatter(
'%(asctime)s [ROUTER] %(levelname)s: %(message)s',
datefmt='%H:%M:%S'
))
logger.addHandler(console_handler)
try:
os.makedirs('/app/logs', exist_ok=True)
file_handler = logging.FileHandler('/app/logs/cortex_verbose_debug.log', mode='a')
file_handler.setFormatter(logging.Formatter(
'%(asctime)s [ROUTER] %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
))
logger.addHandler(file_handler)
logger.debug("VERBOSE_DEBUG enabled for router.py")
except Exception as e:
logger.debug(f"File logging failed: {e}")
cortex_router = APIRouter()
inner_monologue = InnerMonologue()
# -------------------------------------------------------------------
# Models
# -------------------------------------------------------------------
class ReasonRequest(BaseModel):
session_id: str
user_prompt: str
temperature: float | None = None
# -------------------------------------------------------------------
# /reason endpoint
# -------------------------------------------------------------------
@cortex_router.post("/reason")
async def run_reason(req: ReasonRequest):
if VERBOSE_DEBUG:
logger.debug(f"\n{'='*80}")
logger.debug(f"[PIPELINE START] Session: {req.session_id}")
logger.debug(f"[PIPELINE START] User prompt: {req.user_prompt[:200]}...")
logger.debug(f"{'='*80}\n")
# ----------------------------------------------------------------
# STAGE 0 — Context
# ----------------------------------------------------------------
if VERBOSE_DEBUG:
logger.debug("[STAGE 0] Collecting unified context...")
context_state = await collect_context(req.session_id, req.user_prompt)
if VERBOSE_DEBUG:
logger.debug(f"[STAGE 0] Context collected - {len(context_state.get('rag', []))} RAG results")
# ----------------------------------------------------------------
# STAGE 0.5 — Identity
# ----------------------------------------------------------------
if VERBOSE_DEBUG:
logger.debug("[STAGE 0.5] Loading identity block...")
identity_block = load_identity(req.session_id)
if VERBOSE_DEBUG:
logger.debug(f"[STAGE 0.5] Identity loaded: {identity_block.get('name', 'Unknown')}")
# ----------------------------------------------------------------
# STAGE 0.6 — Inner Monologue (observer-only)
# ----------------------------------------------------------------
if VERBOSE_DEBUG:
logger.debug("[STAGE 0.6] Running inner monologue...")
inner_result = None
try:
self_state = load_self_state()
mono_context = {
"user_message": req.user_prompt,
"session_id": req.session_id,
"self_state": self_state,
"context_summary": context_state,
}
inner_result = await inner_monologue.process(mono_context)
logger.info(f"[INNER_MONOLOGUE] {inner_result}")
# Store in context for downstream use
context_state["monologue"] = inner_result
except Exception as e:
logger.warning(f"[INNER_MONOLOGUE] failed: {e}")
# ----------------------------------------------------------------
# STAGE 0.7 — Executive Planning (conditional)
# ----------------------------------------------------------------
executive_plan = None
if inner_result and inner_result.get("consult_executive"):
if VERBOSE_DEBUG:
logger.debug("[STAGE 0.7] Executive consultation requested...")
try:
from autonomy.executive.planner import plan_execution
executive_plan = await plan_execution(
user_prompt=req.user_prompt,
intent=inner_result.get("intent", "unknown"),
context_state=context_state,
identity_block=identity_block
)
logger.info(f"[EXECUTIVE] Generated plan: {executive_plan.get('summary', 'N/A')}")
except Exception as e:
logger.warning(f"[EXECUTIVE] Planning failed: {e}")
executive_plan = None
# ----------------------------------------------------------------
# STAGE 0.8 — Autonomous Tool Invocation
# ----------------------------------------------------------------
tool_results = None
autonomous_enabled = os.getenv("ENABLE_AUTONOMOUS_TOOLS", "true").lower() == "true"
tool_confidence_threshold = float(os.getenv("AUTONOMOUS_TOOL_CONFIDENCE_THRESHOLD", "0.6"))
if autonomous_enabled and inner_result:
if VERBOSE_DEBUG:
logger.debug("[STAGE 0.8] Analyzing autonomous tool needs...")
try:
from autonomy.tools.decision_engine import ToolDecisionEngine
from autonomy.tools.orchestrator import ToolOrchestrator
# Analyze which tools to invoke
decision_engine = ToolDecisionEngine()
tool_decision = await decision_engine.analyze_tool_needs(
user_prompt=req.user_prompt,
monologue=inner_result,
context_state=context_state,
available_tools=["RAG", "WEB", "WEATHER", "CODEBRAIN"]
)
# Execute tools if confidence threshold met
if tool_decision["should_invoke_tools"] and tool_decision["confidence"] >= tool_confidence_threshold:
orchestrator = ToolOrchestrator(tool_timeout=30)
tool_results = await orchestrator.execute_tools(
tools_to_invoke=tool_decision["tools_to_invoke"],
context_state=context_state
)
# Format results for context injection
tool_context = orchestrator.format_results_for_context(tool_results)
context_state["autonomous_tool_results"] = tool_context
if VERBOSE_DEBUG:
summary = tool_results.get("execution_summary", {})
logger.debug(f"[STAGE 0.8] Tools executed: {summary.get('successful', [])} succeeded")
else:
if VERBOSE_DEBUG:
logger.debug(f"[STAGE 0.8] No tools invoked (confidence: {tool_decision.get('confidence', 0):.2f})")
except Exception as e:
logger.warning(f"[STAGE 0.8] Autonomous tool invocation failed: {e}")
if VERBOSE_DEBUG:
import traceback
traceback.print_exc()
# ----------------------------------------------------------------
# STAGE 1 — Intake summary
# ----------------------------------------------------------------
intake_summary = "(no context available)"
if context_state.get("intake"):
l20 = context_state["intake"].get("L20")
if isinstance(l20, dict):
intake_summary = l20.get("summary", intake_summary)
elif isinstance(l20, str):
intake_summary = l20
if VERBOSE_DEBUG:
logger.debug(f"[STAGE 1] Intake summary extracted (L20): {intake_summary[:150]}...")
# ----------------------------------------------------------------
# STAGE 2 — Reflection
# ----------------------------------------------------------------
if VERBOSE_DEBUG:
logger.debug("[STAGE 2] Running reflection...")
try:
reflection = await reflect_notes(intake_summary, identity_block=identity_block)
reflection_notes = reflection.get("notes", [])
except Exception as e:
reflection_notes = []
if VERBOSE_DEBUG:
logger.debug(f"[STAGE 2] Reflection failed: {e}")
# ----------------------------------------------------------------
# STAGE 3 — Reasoning (draft)
# ----------------------------------------------------------------
if VERBOSE_DEBUG:
logger.debug("[STAGE 3] Running reasoning (draft)...")
draft = await reason_check(
req.user_prompt,
identity_block=identity_block,
rag_block=context_state.get("rag", []),
reflection_notes=reflection_notes,
context=context_state,
monologue=inner_result, # NEW: Pass monologue guidance
executive_plan=executive_plan # NEW: Pass executive plan
)
# ----------------------------------------------------------------
# STAGE 4 — Refinement
# ----------------------------------------------------------------
if VERBOSE_DEBUG:
logger.debug("[STAGE 4] Running refinement...")
result = await refine_answer(
draft_output=draft,
reflection_notes=reflection_notes,
identity_block=identity_block,
rag_block=context_state.get("rag", []),
)
final_neutral = result["final_output"]
# ----------------------------------------------------------------
# STAGE 5 — Persona
# ----------------------------------------------------------------
if VERBOSE_DEBUG:
logger.debug("[STAGE 5] Applying persona layer...")
# Extract tone and depth from monologue for persona guidance
tone = inner_result.get("tone", "neutral") if inner_result else "neutral"
depth = inner_result.get("depth", "medium") if inner_result else "medium"
persona_answer = await speak(final_neutral, tone=tone, depth=depth)
# ----------------------------------------------------------------
# STAGE 6 — Session update
# ----------------------------------------------------------------
update_last_assistant_message(req.session_id, persona_answer)
# ----------------------------------------------------------------
# STAGE 6.5 — Self-state update & Pattern Learning
# ----------------------------------------------------------------
try:
from autonomy.self.analyzer import analyze_and_update_state
await analyze_and_update_state(
monologue=inner_result or {},
user_prompt=req.user_prompt,
response=persona_answer,
context=context_state
)
except Exception as e:
logger.warning(f"[SELF_STATE] Update failed: {e}")
# Pattern learning
try:
from autonomy.learning.pattern_learner import get_pattern_learner
learner = get_pattern_learner()
await learner.learn_from_interaction(
user_prompt=req.user_prompt,
response=persona_answer,
monologue=inner_result or {},
context=context_state
)
except Exception as e:
logger.warning(f"[PATTERN_LEARNER] Learning failed: {e}")
# ----------------------------------------------------------------
# STAGE 7 — Proactive Monitoring & Suggestions
# ----------------------------------------------------------------
proactive_enabled = os.getenv("ENABLE_PROACTIVE_MONITORING", "true").lower() == "true"
proactive_min_priority = float(os.getenv("PROACTIVE_SUGGESTION_MIN_PRIORITY", "0.6"))
if proactive_enabled:
try:
from autonomy.proactive.monitor import get_proactive_monitor
from autonomy.self.state import load_self_state
monitor = get_proactive_monitor(min_priority=proactive_min_priority)
self_state = load_self_state()
suggestion = await monitor.analyze_session(
session_id=req.session_id,
context_state=context_state,
self_state=self_state
)
# Append suggestion to response if exists
if suggestion:
suggestion_text = monitor.format_suggestion(suggestion)
persona_answer += suggestion_text
if VERBOSE_DEBUG:
logger.debug(f"[STAGE 7] Proactive suggestion added: {suggestion['type']} (priority: {suggestion['priority']:.2f})")
except Exception as e:
logger.warning(f"[STAGE 7] Proactive monitoring failed: {e}")
if VERBOSE_DEBUG:
logger.debug(f"\n{'='*80}")
logger.debug(f"[PIPELINE COMPLETE] Session: {req.session_id}")
logger.debug(f"[PIPELINE COMPLETE] Final answer length: {len(persona_answer)} chars")
logger.debug(f"{'='*80}\n")
# ----------------------------------------------------------------
# RETURN
# ----------------------------------------------------------------
return {
"draft": draft,
"neutral": final_neutral,
"persona": persona_answer,
"reflection": reflection_notes,
"session_id": req.session_id,
"context_summary": {
"rag_results": len(context_state.get("rag", [])),
"minutes_since_last": context_state.get("minutes_since_last_msg"),
"message_count": context_state.get("message_count"),
"mode": context_state.get("mode"),
}
}
# -------------------------------------------------------------------
# /ingest endpoint (internal)
# -------------------------------------------------------------------
class IngestPayload(BaseModel):
session_id: str
user_msg: str
assistant_msg: str
@cortex_router.post("/ingest")
async def ingest(payload: IngestPayload):
try:
update_last_assistant_message(payload.session_id, payload.assistant_msg)
except Exception as e:
logger.warning(f"[INGEST] Session update failed: {e}")
try:
add_exchange_internal({
"session_id": payload.session_id,
"user_msg": payload.user_msg,
"assistant_msg": payload.assistant_msg,
})
except Exception as e:
logger.warning(f"[INGEST] Intake update failed: {e}")
return {"status": "ok", "session_id": payload.session_id}