autonomy build, phase 1

This commit is contained in:
serversdwn
2025-12-14 01:44:05 -05:00
parent fa4dd46cfc
commit 49f792f20c
10 changed files with 735 additions and 14 deletions

View File

@@ -0,0 +1 @@
"""Executive planning and decision-making module."""

View File

@@ -0,0 +1,121 @@
"""
Executive planner - generates execution plans for complex requests.
Activated when inner monologue sets consult_executive=true.
"""
import os
import logging
from typing import Dict, Any, Optional
from llm.llm_router import call_llm
EXECUTIVE_LLM = os.getenv("EXECUTIVE_LLM", "CLOUD").upper()
VERBOSE_DEBUG = os.getenv("VERBOSE_DEBUG", "false").lower() == "true"
logger = logging.getLogger(__name__)
if VERBOSE_DEBUG:
logger.setLevel(logging.DEBUG)
EXECUTIVE_SYSTEM_PROMPT = """
You are Lyra's executive planning system.
You create structured execution plans for complex tasks.
You do NOT generate the final response - only the plan.
Your plan should include:
1. Task decomposition (break into steps)
2. Required tools/resources
3. Reasoning strategy
4. Success criteria
Return a concise plan in natural language.
"""
async def plan_execution(
user_prompt: str,
intent: str,
context_state: Dict[str, Any],
identity_block: Dict[str, Any]
) -> Dict[str, Any]:
"""
Generate execution plan for complex request.
Args:
user_prompt: User's message
intent: Detected intent from inner monologue
context_state: Full context
identity_block: Lyra's identity
Returns:
Plan dictionary with structure:
{
"summary": "One-line plan summary",
"plan_text": "Detailed plan",
"steps": ["step1", "step2", ...],
"tools_needed": ["RAG", "WEB", ...],
"estimated_complexity": "low | medium | high"
}
"""
# Build planning prompt
tools_available = context_state.get("tools_available", [])
prompt = f"""{EXECUTIVE_SYSTEM_PROMPT}
User request: {user_prompt}
Detected intent: {intent}
Available tools: {", ".join(tools_available) if tools_available else "None"}
Session context:
- Message count: {context_state.get('message_count', 0)}
- Time since last message: {context_state.get('minutes_since_last_msg', 0):.1f} minutes
- Active project: {context_state.get('active_project', 'None')}
Generate a structured execution plan.
"""
if VERBOSE_DEBUG:
logger.debug(f"[EXECUTIVE] Planning prompt:\n{prompt}")
# Call executive LLM
plan_text = await call_llm(
prompt,
backend=EXECUTIVE_LLM,
temperature=0.3, # Lower temperature for planning
max_tokens=500
)
if VERBOSE_DEBUG:
logger.debug(f"[EXECUTIVE] Generated plan:\n{plan_text}")
# Parse plan (simple heuristic extraction for Phase 1)
steps = []
tools_needed = []
for line in plan_text.split('\n'):
line_lower = line.lower()
if any(marker in line_lower for marker in ['step', '1.', '2.', '3.', '-']):
steps.append(line.strip())
if tools_available:
for tool in tools_available:
if tool.lower() in line_lower and tool not in tools_needed:
tools_needed.append(tool)
# Estimate complexity (simple heuristic)
complexity = "low"
if len(steps) > 3 or len(tools_needed) > 1:
complexity = "medium"
if len(steps) > 5 or "research" in intent.lower() or "analyze" in intent.lower():
complexity = "high"
return {
"summary": plan_text.split('\n')[0][:100] if plan_text else "Complex task execution plan",
"plan_text": plan_text,
"steps": steps[:10], # Limit to 10 steps
"tools_needed": tools_needed,
"estimated_complexity": complexity
}

View File

@@ -0,0 +1,74 @@
"""
Analyze interactions and update self-state accordingly.
"""
import logging
from typing import Dict, Any
from .state import update_self_state
logger = logging.getLogger(__name__)
async def analyze_and_update_state(
monologue: Dict[str, Any],
user_prompt: str,
response: str,
context: Dict[str, Any]
) -> None:
"""
Analyze interaction and update self-state.
This runs after response generation to update Lyra's internal state
based on the interaction.
Args:
monologue: Inner monologue output
user_prompt: User's message
response: Lyra's response
context: Full context state
"""
# Simple heuristics for state updates
# TODO: Replace with LLM-based sentiment analysis in Phase 2
mood_delta = 0.0
energy_delta = 0.0
confidence_delta = 0.0
curiosity_delta = 0.0
new_focus = None
# Analyze intent from monologue
intent = monologue.get("intent", "").lower() if monologue else ""
if "technical" in intent or "complex" in intent:
energy_delta = -0.05 # Deep thinking is tiring
confidence_delta = 0.05 if len(response) > 200 else -0.05
new_focus = "technical_problem"
elif "creative" in intent or "brainstorm" in intent:
mood_delta = 0.1 # Creative work is engaging
curiosity_delta = 0.1
new_focus = "creative_exploration"
elif "clarification" in intent or "confused" in intent:
confidence_delta = -0.05
new_focus = "understanding_user"
elif "simple" in intent or "casual" in intent:
energy_delta = 0.05 # Light conversation is refreshing
new_focus = "conversation"
# Check for learning opportunities (questions in user prompt)
if "?" in user_prompt and any(word in user_prompt.lower() for word in ["how", "why", "what"]):
curiosity_delta += 0.05
# Update state
update_self_state(
mood_delta=mood_delta,
energy_delta=energy_delta,
new_focus=new_focus,
confidence_delta=confidence_delta,
curiosity_delta=curiosity_delta
)
logger.info(f"Self-state updated based on interaction: focus={new_focus}")

View File

@@ -1,11 +1,189 @@
"""
Stub for self state management.
Self-state management for Project Lyra.
Maintains persistent identity, mood, energy, and focus across sessions.
"""
def load_self_state():
"""Load self state - stub implementation"""
return {
"mood": "neutral",
"energy": 0.8,
"focus": "user_request"
import json
import logging
import os
from datetime import datetime
from pathlib import Path
from typing import Dict, Any, Optional
# Configuration
STATE_FILE = Path(os.getenv("SELF_STATE_FILE", "/app/data/self_state.json"))
VERBOSE_DEBUG = os.getenv("VERBOSE_DEBUG", "false").lower() == "true"
logger = logging.getLogger(__name__)
if VERBOSE_DEBUG:
logger.setLevel(logging.DEBUG)
# Default state structure
DEFAULT_STATE = {
"mood": "neutral",
"energy": 0.8,
"focus": "user_request",
"confidence": 0.7,
"curiosity": 0.5,
"last_updated": None,
"interaction_count": 0,
"learning_queue": [], # Topics Lyra wants to explore
"active_goals": [], # Self-directed goals
"preferences": {
"verbosity": "medium",
"formality": "casual",
"proactivity": 0.3 # How likely to suggest things unprompted
},
"metadata": {
"version": "1.0",
"created_at": None
}
}
class SelfState:
"""Manages Lyra's persistent self-state."""
def __init__(self):
self._state = self._load_state()
def _load_state(self) -> Dict[str, Any]:
"""Load state from disk or create default."""
if STATE_FILE.exists():
try:
with open(STATE_FILE, 'r') as f:
state = json.load(f)
logger.info(f"Loaded self-state from {STATE_FILE}")
return state
except Exception as e:
logger.error(f"Failed to load self-state: {e}")
return self._create_default_state()
else:
return self._create_default_state()
def _create_default_state(self) -> Dict[str, Any]:
"""Create and save default state."""
state = DEFAULT_STATE.copy()
state["metadata"]["created_at"] = datetime.now().isoformat()
state["last_updated"] = datetime.now().isoformat()
self._save_state(state)
logger.info("Created new default self-state")
return state
def _save_state(self, state: Dict[str, Any]) -> None:
"""Persist state to disk."""
try:
STATE_FILE.parent.mkdir(parents=True, exist_ok=True)
with open(STATE_FILE, 'w') as f:
json.dump(state, f, indent=2)
if VERBOSE_DEBUG:
logger.debug(f"Saved self-state to {STATE_FILE}")
except Exception as e:
logger.error(f"Failed to save self-state: {e}")
def get_state(self) -> Dict[str, Any]:
"""Get current state snapshot."""
return self._state.copy()
def update_from_interaction(
self,
mood_delta: float = 0.0,
energy_delta: float = 0.0,
new_focus: Optional[str] = None,
confidence_delta: float = 0.0,
curiosity_delta: float = 0.0
) -> None:
"""
Update state based on interaction.
Args:
mood_delta: Change in mood (-1.0 to 1.0)
energy_delta: Change in energy (-1.0 to 1.0)
new_focus: New focus area
confidence_delta: Change in confidence
curiosity_delta: Change in curiosity
"""
# Apply deltas with bounds checking
self._state["energy"] = max(0.0, min(1.0,
self._state.get("energy", 0.8) + energy_delta))
self._state["confidence"] = max(0.0, min(1.0,
self._state.get("confidence", 0.7) + confidence_delta))
self._state["curiosity"] = max(0.0, min(1.0,
self._state.get("curiosity", 0.5) + curiosity_delta))
# Update focus if provided
if new_focus:
self._state["focus"] = new_focus
# Update mood (simplified sentiment)
if mood_delta != 0:
mood_map = ["frustrated", "neutral", "engaged", "excited"]
current_mood_idx = 1 # neutral default
if self._state.get("mood") in mood_map:
current_mood_idx = mood_map.index(self._state["mood"])
new_mood_idx = max(0, min(len(mood_map) - 1,
int(current_mood_idx + mood_delta * 2)))
self._state["mood"] = mood_map[new_mood_idx]
# Increment interaction counter
self._state["interaction_count"] = self._state.get("interaction_count", 0) + 1
self._state["last_updated"] = datetime.now().isoformat()
# Persist changes
self._save_state(self._state)
if VERBOSE_DEBUG:
logger.debug(f"Updated self-state: mood={self._state['mood']}, "
f"energy={self._state['energy']:.2f}, "
f"confidence={self._state['confidence']:.2f}")
def add_learning_goal(self, topic: str) -> None:
"""Add topic to learning queue."""
queue = self._state.get("learning_queue", [])
if topic not in [item.get("topic") for item in queue]:
queue.append({
"topic": topic,
"added_at": datetime.now().isoformat(),
"priority": 0.5
})
self._state["learning_queue"] = queue
self._save_state(self._state)
logger.info(f"Added learning goal: {topic}")
def add_active_goal(self, goal: str, context: str = "") -> None:
"""Add self-directed goal."""
goals = self._state.get("active_goals", [])
goals.append({
"goal": goal,
"context": context,
"created_at": datetime.now().isoformat(),
"status": "active"
})
self._state["active_goals"] = goals
self._save_state(self._state)
logger.info(f"Added active goal: {goal}")
# Global instance
_self_state_instance = None
def get_self_state_instance() -> SelfState:
"""Get or create global SelfState instance."""
global _self_state_instance
if _self_state_instance is None:
_self_state_instance = SelfState()
return _self_state_instance
def load_self_state() -> Dict[str, Any]:
"""Load self state - public API for backwards compatibility."""
return get_self_state_instance().get_state()
def update_self_state(**kwargs) -> None:
"""Update self state - public API."""
get_self_state_instance().update_from_interaction(**kwargs)

View File

@@ -0,0 +1,20 @@
{
"mood": "neutral",
"energy": 0.8,
"focus": "user_request",
"confidence": 0.7,
"curiosity": 0.6000000000000001,
"last_updated": "2025-12-14T06:36:21.236816",
"interaction_count": 3,
"learning_queue": [],
"active_goals": [],
"preferences": {
"verbosity": "medium",
"formality": "casual",
"proactivity": 0.3
},
"metadata": {
"version": "1.0",
"created_at": "2025-12-14T03:28:49.364768"
}
}

View File

@@ -59,17 +59,44 @@ Guidelines:
# Build persona prompt
# ============================================================
def build_speak_prompt(final_answer: str) -> str:
def build_speak_prompt(final_answer: str, tone: str = "neutral", depth: str = "medium") -> str:
"""
Wrap Cortex's final neutral answer in the Lyra persona.
Cortex → neutral reasoning
Speak → stylistic transformation
The LLM sees the original answer and rewrites it in Lyra's voice.
Args:
final_answer: The neutral reasoning output
tone: Desired emotional tone (neutral | warm | focused | playful | direct)
depth: Response depth (short | medium | deep)
"""
# Tone-specific guidance
tone_guidance = {
"neutral": "balanced and professional",
"warm": "friendly and empathetic",
"focused": "precise and technical",
"playful": "light and engaging",
"direct": "concise and straightforward"
}
depth_guidance = {
"short": "Keep responses brief and to-the-point.",
"medium": "Provide balanced detail.",
"deep": "Elaborate thoroughly with nuance and examples."
}
tone_hint = tone_guidance.get(tone, "balanced and professional")
depth_hint = depth_guidance.get(depth, "Provide balanced detail.")
return f"""
{PERSONA_STYLE}
Tone guidance: Your response should be {tone_hint}.
Depth guidance: {depth_hint}
Rewrite the following message into Lyra's natural voice.
Preserve meaning exactly.
@@ -84,16 +111,21 @@ Preserve meaning exactly.
# Public API — async wrapper
# ============================================================
async def speak(final_answer: str) -> str:
async def speak(final_answer: str, tone: str = "neutral", depth: str = "medium") -> str:
"""
Given the final refined answer from Cortex,
apply Lyra persona styling using the designated backend.
Args:
final_answer: The polished answer from refinement stage
tone: Desired emotional tone (neutral | warm | focused | playful | direct)
depth: Response depth (short | medium | deep)
"""
if not final_answer:
return ""
prompt = build_speak_prompt(final_answer)
prompt = build_speak_prompt(final_answer, tone, depth)
backend = SPEAK_BACKEND

View File

@@ -45,7 +45,9 @@ async def reason_check(
identity_block: dict | None,
rag_block: dict | None,
reflection_notes: list[str],
context: dict | None = None
context: dict | None = None,
monologue: dict | None = None, # NEW: Inner monologue guidance
executive_plan: dict | None = None # NEW: Executive plan for complex tasks
) -> str:
"""
Build the *draft answer* for Lyra Cortex.
@@ -57,6 +59,8 @@ async def reason_check(
rag_block: Relevant long-term memories from NeoMem
reflection_notes: Meta-awareness notes from reflection stage
context: Unified context state from context.py (session state, intake, rag, etc.)
monologue: Inner monologue analysis (intent, tone, depth, consult_executive)
executive_plan: Executive plan for complex queries (steps, tools, strategy)
"""
# --------------------------------------------------------
@@ -79,6 +83,52 @@ async def reason_check(
except Exception:
identity_txt = f"Identity Rules:\n{str(identity_block)}\n\n"
# --------------------------------------------------------
# Inner Monologue guidance (NEW)
# --------------------------------------------------------
monologue_section = ""
if monologue:
intent = monologue.get("intent", "unknown")
tone_desired = monologue.get("tone", "neutral")
depth_desired = monologue.get("depth", "medium")
monologue_section = f"""
=== INNER MONOLOGUE GUIDANCE ===
User Intent Detected: {intent}
Desired Tone: {tone_desired}
Desired Response Depth: {depth_desired}
Adjust your response accordingly:
- Focus on addressing the {intent} intent
- Aim for {depth_desired} depth (short/medium/deep)
- The persona layer will handle {tone_desired} tone, focus on content
"""
# --------------------------------------------------------
# Executive Plan (NEW)
# --------------------------------------------------------
plan_section = ""
if executive_plan:
plan_section = f"""
=== EXECUTIVE PLAN ===
Task Complexity: {executive_plan.get('estimated_complexity', 'unknown')}
Plan Summary: {executive_plan.get('summary', 'No summary')}
Detailed Plan:
{executive_plan.get('plan_text', 'No detailed plan available')}
Required Steps:
"""
for idx, step in enumerate(executive_plan.get('steps', []), 1):
plan_section += f"{idx}. {step}\n"
tools_needed = executive_plan.get('tools_needed', [])
if tools_needed:
plan_section += f"\nTools to leverage: {', '.join(tools_needed)}\n"
plan_section += "\nFollow this plan while generating your response.\n\n"
# --------------------------------------------------------
# RAG block (optional factual grounding)
# --------------------------------------------------------
@@ -164,6 +214,8 @@ async def reason_check(
prompt = (
f"{notes_section}"
f"{identity_txt}"
f"{monologue_section}" # NEW: Intent/tone/depth guidance
f"{plan_section}" # NEW: Executive plan if generated
f"{context_txt}" # Context BEFORE RAG for better coherence
f"{rag_txt}"
f"User message:\n{user_prompt}\n\n"

View File

@@ -99,6 +99,7 @@ async def run_reason(req: ReasonRequest):
if VERBOSE_DEBUG:
logger.debug("[STAGE 0.6] Running inner monologue...")
inner_result = None
try:
self_state = load_self_state()
@@ -112,9 +113,33 @@ async def run_reason(req: ReasonRequest):
inner_result = await inner_monologue.process(mono_context)
logger.info(f"[INNER_MONOLOGUE] {inner_result}")
# Store in context for downstream use
context_state["monologue"] = inner_result
except Exception as e:
logger.warning(f"[INNER_MONOLOGUE] failed: {e}")
# ----------------------------------------------------------------
# STAGE 0.7 — Executive Planning (conditional)
# ----------------------------------------------------------------
executive_plan = None
if inner_result and inner_result.get("consult_executive"):
if VERBOSE_DEBUG:
logger.debug("[STAGE 0.7] Executive consultation requested...")
try:
from autonomy.executive.planner import plan_execution
executive_plan = await plan_execution(
user_prompt=req.user_prompt,
intent=inner_result.get("intent", "unknown"),
context_state=context_state,
identity_block=identity_block
)
logger.info(f"[EXECUTIVE] Generated plan: {executive_plan.get('summary', 'N/A')}")
except Exception as e:
logger.warning(f"[EXECUTIVE] Planning failed: {e}")
executive_plan = None
# ----------------------------------------------------------------
# STAGE 1 — Intake summary
# ----------------------------------------------------------------
@@ -154,7 +179,9 @@ async def run_reason(req: ReasonRequest):
identity_block=identity_block,
rag_block=context_state.get("rag", []),
reflection_notes=reflection_notes,
context=context_state
context=context_state,
monologue=inner_result, # NEW: Pass monologue guidance
executive_plan=executive_plan # NEW: Pass executive plan
)
# ----------------------------------------------------------------
@@ -178,13 +205,31 @@ async def run_reason(req: ReasonRequest):
if VERBOSE_DEBUG:
logger.debug("[STAGE 5] Applying persona layer...")
persona_answer = await speak(final_neutral)
# Extract tone and depth from monologue for persona guidance
tone = inner_result.get("tone", "neutral") if inner_result else "neutral"
depth = inner_result.get("depth", "medium") if inner_result else "medium"
persona_answer = await speak(final_neutral, tone=tone, depth=depth)
# ----------------------------------------------------------------
# STAGE 6 — Session update
# ----------------------------------------------------------------
update_last_assistant_message(req.session_id, persona_answer)
# ----------------------------------------------------------------
# STAGE 6.5 — Self-state update
# ----------------------------------------------------------------
try:
from autonomy.self.analyzer import analyze_and_update_state
await analyze_and_update_state(
monologue=inner_result or {},
user_prompt=req.user_prompt,
response=persona_answer,
context=context_state
)
except Exception as e:
logger.warning(f"[SELF_STATE] Update failed: {e}")
if VERBOSE_DEBUG:
logger.debug(f"\n{'='*80}")
logger.debug(f"[PIPELINE COMPLETE] Session: {req.session_id}")

1
cortex/tests/__init__.py Normal file
View File

@@ -0,0 +1 @@
"""Tests for Project Lyra Cortex."""

View File

@@ -0,0 +1,197 @@
"""
Integration tests for Phase 1 autonomy features.
Tests monologue integration, executive planning, and self-state persistence.
"""
import asyncio
import json
import sys
import os
# Add parent directory to path for imports
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from autonomy.monologue.monologue import InnerMonologue
from autonomy.self.state import load_self_state, update_self_state, get_self_state_instance
from autonomy.executive.planner import plan_execution
async def test_monologue_integration():
"""Test monologue generates valid output."""
print("\n" + "="*60)
print("TEST 1: Monologue Integration")
print("="*60)
mono = InnerMonologue()
context = {
"user_message": "Explain quantum computing to me like I'm 5",
"session_id": "test_001",
"self_state": load_self_state(),
"context_summary": {"message_count": 5}
}
result = await mono.process(context)
assert "intent" in result, "Missing intent field"
assert "tone" in result, "Missing tone field"
assert "depth" in result, "Missing depth field"
assert "consult_executive" in result, "Missing consult_executive field"
print("✓ Monologue integration test passed")
print(f" Result: {json.dumps(result, indent=2)}")
return result
async def test_executive_planning():
"""Test executive planner generates valid plans."""
print("\n" + "="*60)
print("TEST 2: Executive Planning")
print("="*60)
plan = await plan_execution(
user_prompt="Help me build a distributed system with microservices architecture",
intent="technical_implementation",
context_state={
"tools_available": ["RAG", "WEB", "CODEBRAIN"],
"message_count": 3,
"minutes_since_last_msg": 2.5,
"active_project": None
},
identity_block={}
)
assert "summary" in plan, "Missing summary field"
assert "plan_text" in plan, "Missing plan_text field"
assert "steps" in plan, "Missing steps field"
assert len(plan["steps"]) > 0, "No steps generated"
print("✓ Executive planning test passed")
print(f" Plan summary: {plan['summary']}")
print(f" Steps: {len(plan['steps'])}")
print(f" Complexity: {plan.get('estimated_complexity', 'unknown')}")
return plan
def test_self_state_persistence():
"""Test self-state loads and updates."""
print("\n" + "="*60)
print("TEST 3: Self-State Persistence")
print("="*60)
state1 = load_self_state()
assert "mood" in state1, "Missing mood field"
assert "energy" in state1, "Missing energy field"
assert "interaction_count" in state1, "Missing interaction_count"
initial_count = state1.get("interaction_count", 0)
print(f" Initial interaction count: {initial_count}")
update_self_state(
mood_delta=0.1,
energy_delta=-0.05,
new_focus="testing"
)
state2 = load_self_state()
assert state2["interaction_count"] == initial_count + 1, "Interaction count not incremented"
assert state2["focus"] == "testing", "Focus not updated"
print("✓ Self-state persistence test passed")
print(f" New interaction count: {state2['interaction_count']}")
print(f" New focus: {state2['focus']}")
print(f" New energy: {state2['energy']:.2f}")
return state2
async def test_end_to_end_flow():
"""Test complete flow from monologue through planning."""
print("\n" + "="*60)
print("TEST 4: End-to-End Flow")
print("="*60)
# Step 1: Monologue detects complex query
mono = InnerMonologue()
mono_result = await mono.process({
"user_message": "Design a scalable ML pipeline with CI/CD integration",
"session_id": "test_e2e",
"self_state": load_self_state(),
"context_summary": {}
})
print(f" Monologue intent: {mono_result.get('intent')}")
print(f" Consult executive: {mono_result.get('consult_executive')}")
# Step 2: If executive requested, generate plan
if mono_result.get("consult_executive"):
plan = await plan_execution(
user_prompt="Design a scalable ML pipeline with CI/CD integration",
intent=mono_result.get("intent", "unknown"),
context_state={"tools_available": ["CODEBRAIN", "WEB"]},
identity_block={}
)
assert plan is not None, "Plan should be generated"
print(f" Executive plan generated: {len(plan.get('steps', []))} steps")
# Step 3: Update self-state
update_self_state(
energy_delta=-0.1, # Complex task is tiring
new_focus="ml_pipeline_design",
confidence_delta=0.05
)
state = load_self_state()
assert state["focus"] == "ml_pipeline_design", "Focus should be updated"
print("✓ End-to-end flow test passed")
print(f" Final state: {state['mood']}, energy={state['energy']:.2f}")
return True
async def run_all_tests():
"""Run all Phase 1 tests."""
print("\n" + "="*60)
print("PHASE 1 AUTONOMY TESTS")
print("="*60)
try:
# Test 1: Monologue
mono_result = await test_monologue_integration()
# Test 2: Executive Planning
plan_result = await test_executive_planning()
# Test 3: Self-State
state_result = test_self_state_persistence()
# Test 4: End-to-End
await test_end_to_end_flow()
print("\n" + "="*60)
print("ALL TESTS PASSED ✓")
print("="*60)
print("\nSummary:")
print(f" - Monologue: {mono_result.get('intent')} ({mono_result.get('tone')})")
print(f" - Executive: {plan_result.get('estimated_complexity')} complexity")
print(f" - Self-state: {state_result.get('interaction_count')} interactions")
return True
except Exception as e:
print("\n" + "="*60)
print(f"TEST FAILED: {e}")
print("="*60)
import traceback
traceback.print_exc()
return False
if __name__ == "__main__":
success = asyncio.run(run_all_tests())
sys.exit(0 if success else 1)