autonomy phase 2
This commit is contained in:
1
cortex/autonomy/actions/__init__.py
Normal file
1
cortex/autonomy/actions/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Autonomous action execution system."""
|
||||
480
cortex/autonomy/actions/autonomous_actions.py
Normal file
480
cortex/autonomy/actions/autonomous_actions.py
Normal file
@@ -0,0 +1,480 @@
|
||||
"""
|
||||
Autonomous Action Manager - executes safe, self-initiated actions.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import json
|
||||
from typing import Dict, List, Any, Optional
|
||||
from datetime import datetime
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AutonomousActionManager:
|
||||
"""
|
||||
Manages safe autonomous actions that Lyra can take without explicit user prompting.
|
||||
|
||||
Whitelist of allowed actions:
|
||||
- create_memory: Store information in NeoMem
|
||||
- update_goal: Modify goal status
|
||||
- schedule_reminder: Create future reminder
|
||||
- summarize_session: Generate conversation summary
|
||||
- learn_topic: Add topic to learning queue
|
||||
- update_focus: Change current focus area
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize action manager with whitelisted actions."""
|
||||
self.allowed_actions = {
|
||||
"create_memory": self._create_memory,
|
||||
"update_goal": self._update_goal,
|
||||
"schedule_reminder": self._schedule_reminder,
|
||||
"summarize_session": self._summarize_session,
|
||||
"learn_topic": self._learn_topic,
|
||||
"update_focus": self._update_focus
|
||||
}
|
||||
|
||||
self.action_log = [] # Track all actions for audit
|
||||
|
||||
async def execute_action(
|
||||
self,
|
||||
action_type: str,
|
||||
parameters: Dict[str, Any],
|
||||
context: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Execute a single autonomous action.
|
||||
|
||||
Args:
|
||||
action_type: Type of action (must be in whitelist)
|
||||
parameters: Action-specific parameters
|
||||
context: Current context state
|
||||
|
||||
Returns:
|
||||
{
|
||||
"success": bool,
|
||||
"action": action_type,
|
||||
"result": action_result,
|
||||
"timestamp": ISO timestamp,
|
||||
"error": optional error message
|
||||
}
|
||||
"""
|
||||
# Safety check: action must be whitelisted
|
||||
if action_type not in self.allowed_actions:
|
||||
logger.error(f"[ACTIONS] Attempted to execute non-whitelisted action: {action_type}")
|
||||
return {
|
||||
"success": False,
|
||||
"action": action_type,
|
||||
"error": f"Action '{action_type}' not in whitelist",
|
||||
"timestamp": datetime.utcnow().isoformat()
|
||||
}
|
||||
|
||||
try:
|
||||
logger.info(f"[ACTIONS] Executing autonomous action: {action_type}")
|
||||
|
||||
# Execute the action
|
||||
action_func = self.allowed_actions[action_type]
|
||||
result = await action_func(parameters, context)
|
||||
|
||||
# Log successful action
|
||||
action_record = {
|
||||
"success": True,
|
||||
"action": action_type,
|
||||
"result": result,
|
||||
"timestamp": datetime.utcnow().isoformat(),
|
||||
"parameters": parameters
|
||||
}
|
||||
|
||||
self.action_log.append(action_record)
|
||||
logger.info(f"[ACTIONS] Action {action_type} completed successfully")
|
||||
|
||||
return action_record
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"[ACTIONS] Action {action_type} failed: {e}")
|
||||
|
||||
error_record = {
|
||||
"success": False,
|
||||
"action": action_type,
|
||||
"error": str(e),
|
||||
"timestamp": datetime.utcnow().isoformat(),
|
||||
"parameters": parameters
|
||||
}
|
||||
|
||||
self.action_log.append(error_record)
|
||||
return error_record
|
||||
|
||||
async def execute_batch(
|
||||
self,
|
||||
actions: List[Dict[str, Any]],
|
||||
context: Dict[str, Any]
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Execute multiple actions sequentially.
|
||||
|
||||
Args:
|
||||
actions: List of {"action": str, "parameters": dict}
|
||||
context: Current context state
|
||||
|
||||
Returns:
|
||||
List of action results
|
||||
"""
|
||||
results = []
|
||||
|
||||
for action_spec in actions:
|
||||
action_type = action_spec.get("action")
|
||||
parameters = action_spec.get("parameters", {})
|
||||
|
||||
result = await self.execute_action(action_type, parameters, context)
|
||||
results.append(result)
|
||||
|
||||
# Stop on first failure if critical
|
||||
if not result["success"] and action_spec.get("critical", False):
|
||||
logger.warning(f"[ACTIONS] Critical action {action_type} failed, stopping batch")
|
||||
break
|
||||
|
||||
return results
|
||||
|
||||
# ========================================
|
||||
# Whitelisted Action Implementations
|
||||
# ========================================
|
||||
|
||||
async def _create_memory(
|
||||
self,
|
||||
parameters: Dict[str, Any],
|
||||
context: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Create a memory entry in NeoMem.
|
||||
|
||||
Parameters:
|
||||
- text: Memory content (required)
|
||||
- tags: Optional tags for memory
|
||||
- importance: 0.0-1.0 importance score
|
||||
"""
|
||||
text = parameters.get("text")
|
||||
if not text:
|
||||
raise ValueError("Memory text required")
|
||||
|
||||
tags = parameters.get("tags", [])
|
||||
importance = parameters.get("importance", 0.5)
|
||||
session_id = context.get("session_id", "autonomous")
|
||||
|
||||
# Import NeoMem client
|
||||
try:
|
||||
from memory.neomem_client import store_memory
|
||||
|
||||
result = await store_memory(
|
||||
text=text,
|
||||
session_id=session_id,
|
||||
tags=tags,
|
||||
importance=importance
|
||||
)
|
||||
|
||||
return {
|
||||
"memory_id": result.get("id"),
|
||||
"text": text[:50] + "..." if len(text) > 50 else text
|
||||
}
|
||||
|
||||
except ImportError:
|
||||
logger.warning("[ACTIONS] NeoMem client not available, simulating memory storage")
|
||||
return {
|
||||
"memory_id": "simulated",
|
||||
"text": text[:50] + "..." if len(text) > 50 else text,
|
||||
"note": "NeoMem not available, memory not persisted"
|
||||
}
|
||||
|
||||
async def _update_goal(
|
||||
self,
|
||||
parameters: Dict[str, Any],
|
||||
context: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Update goal status in self-state.
|
||||
|
||||
Parameters:
|
||||
- goal_id: Goal identifier (required)
|
||||
- status: New status (pending/in_progress/completed)
|
||||
- progress: Optional progress note
|
||||
"""
|
||||
goal_id = parameters.get("goal_id")
|
||||
if not goal_id:
|
||||
raise ValueError("goal_id required")
|
||||
|
||||
status = parameters.get("status", "in_progress")
|
||||
progress = parameters.get("progress")
|
||||
|
||||
# Import self-state manager
|
||||
from autonomy.self.state import get_self_state_instance
|
||||
|
||||
state = get_self_state_instance()
|
||||
active_goals = state._state.get("active_goals", [])
|
||||
|
||||
# Find and update goal
|
||||
updated = False
|
||||
for goal in active_goals:
|
||||
if isinstance(goal, dict) and goal.get("id") == goal_id:
|
||||
goal["status"] = status
|
||||
if progress:
|
||||
goal["progress"] = progress
|
||||
goal["updated_at"] = datetime.utcnow().isoformat()
|
||||
updated = True
|
||||
break
|
||||
|
||||
if updated:
|
||||
state._save_state()
|
||||
return {
|
||||
"goal_id": goal_id,
|
||||
"status": status,
|
||||
"updated": True
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"goal_id": goal_id,
|
||||
"updated": False,
|
||||
"note": "Goal not found"
|
||||
}
|
||||
|
||||
async def _schedule_reminder(
|
||||
self,
|
||||
parameters: Dict[str, Any],
|
||||
context: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Schedule a future reminder.
|
||||
|
||||
Parameters:
|
||||
- message: Reminder text (required)
|
||||
- delay_minutes: Minutes until reminder
|
||||
- priority: 0.0-1.0 priority score
|
||||
"""
|
||||
message = parameters.get("message")
|
||||
if not message:
|
||||
raise ValueError("Reminder message required")
|
||||
|
||||
delay_minutes = parameters.get("delay_minutes", 60)
|
||||
priority = parameters.get("priority", 0.5)
|
||||
|
||||
# For now, store in self-state's learning queue
|
||||
# In future: integrate with scheduler/cron system
|
||||
from autonomy.self.state import get_self_state_instance
|
||||
|
||||
state = get_self_state_instance()
|
||||
|
||||
reminder = {
|
||||
"type": "reminder",
|
||||
"message": message,
|
||||
"scheduled_at": datetime.utcnow().isoformat(),
|
||||
"trigger_at_minutes": delay_minutes,
|
||||
"priority": priority
|
||||
}
|
||||
|
||||
# Add to learning queue as placeholder
|
||||
state._state.setdefault("reminders", []).append(reminder)
|
||||
state._save_state(state._state) # Pass state dict as argument
|
||||
|
||||
logger.info(f"[ACTIONS] Reminder scheduled: {message} (in {delay_minutes}min)")
|
||||
|
||||
return {
|
||||
"message": message,
|
||||
"delay_minutes": delay_minutes,
|
||||
"note": "Reminder stored in self-state (scheduler integration pending)"
|
||||
}
|
||||
|
||||
async def _summarize_session(
|
||||
self,
|
||||
parameters: Dict[str, Any],
|
||||
context: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Generate a summary of current session.
|
||||
|
||||
Parameters:
|
||||
- max_length: Max summary length in words
|
||||
- focus_topics: Optional list of topics to emphasize
|
||||
"""
|
||||
max_length = parameters.get("max_length", 200)
|
||||
session_id = context.get("session_id", "unknown")
|
||||
|
||||
# Import summarizer (from deferred_summary or create simple one)
|
||||
try:
|
||||
from utils.deferred_summary import summarize_conversation
|
||||
|
||||
summary = await summarize_conversation(
|
||||
session_id=session_id,
|
||||
max_words=max_length
|
||||
)
|
||||
|
||||
return {
|
||||
"summary": summary,
|
||||
"word_count": len(summary.split())
|
||||
}
|
||||
|
||||
except ImportError:
|
||||
# Fallback: simple summary
|
||||
message_count = context.get("message_count", 0)
|
||||
focus = context.get("monologue", {}).get("intent", "general")
|
||||
|
||||
summary = f"Session {session_id}: {message_count} messages exchanged, focused on {focus}."
|
||||
|
||||
return {
|
||||
"summary": summary,
|
||||
"word_count": len(summary.split()),
|
||||
"note": "Simple summary (full summarizer not available)"
|
||||
}
|
||||
|
||||
async def _learn_topic(
|
||||
self,
|
||||
parameters: Dict[str, Any],
|
||||
context: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Add topic to learning queue.
|
||||
|
||||
Parameters:
|
||||
- topic: Topic name (required)
|
||||
- reason: Why this topic
|
||||
- priority: 0.0-1.0 priority score
|
||||
"""
|
||||
topic = parameters.get("topic")
|
||||
if not topic:
|
||||
raise ValueError("Topic required")
|
||||
|
||||
reason = parameters.get("reason", "autonomous learning")
|
||||
priority = parameters.get("priority", 0.5)
|
||||
|
||||
# Import self-state manager
|
||||
from autonomy.self.state import get_self_state_instance
|
||||
|
||||
state = get_self_state_instance()
|
||||
state.add_learning_goal(topic) # Only pass topic parameter
|
||||
|
||||
logger.info(f"[ACTIONS] Added to learning queue: {topic} (reason: {reason})")
|
||||
|
||||
return {
|
||||
"topic": topic,
|
||||
"reason": reason,
|
||||
"queue_position": len(state._state.get("learning_queue", []))
|
||||
}
|
||||
|
||||
async def _update_focus(
|
||||
self,
|
||||
parameters: Dict[str, Any],
|
||||
context: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Update current focus area.
|
||||
|
||||
Parameters:
|
||||
- focus: New focus area (required)
|
||||
- reason: Why this focus
|
||||
"""
|
||||
focus = parameters.get("focus")
|
||||
if not focus:
|
||||
raise ValueError("Focus required")
|
||||
|
||||
reason = parameters.get("reason", "autonomous update")
|
||||
|
||||
# Import self-state manager
|
||||
from autonomy.self.state import get_self_state_instance
|
||||
|
||||
state = get_self_state_instance()
|
||||
old_focus = state._state.get("focus", "none")
|
||||
|
||||
state._state["focus"] = focus
|
||||
state._state["focus_updated_at"] = datetime.utcnow().isoformat()
|
||||
state._state["focus_reason"] = reason
|
||||
state._save_state(state._state) # Pass state dict as argument
|
||||
|
||||
logger.info(f"[ACTIONS] Focus updated: {old_focus} -> {focus}")
|
||||
|
||||
return {
|
||||
"old_focus": old_focus,
|
||||
"new_focus": focus,
|
||||
"reason": reason
|
||||
}
|
||||
|
||||
# ========================================
|
||||
# Utility Methods
|
||||
# ========================================
|
||||
|
||||
def get_allowed_actions(self) -> List[str]:
|
||||
"""Get list of all allowed action types."""
|
||||
return list(self.allowed_actions.keys())
|
||||
|
||||
def get_action_log(self, limit: int = 50) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get recent action log.
|
||||
|
||||
Args:
|
||||
limit: Max number of entries to return
|
||||
|
||||
Returns:
|
||||
List of action records
|
||||
"""
|
||||
return self.action_log[-limit:]
|
||||
|
||||
def clear_action_log(self) -> None:
|
||||
"""Clear action log."""
|
||||
self.action_log = []
|
||||
logger.info("[ACTIONS] Action log cleared")
|
||||
|
||||
def validate_action(self, action_type: str, parameters: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""
|
||||
Validate an action without executing it.
|
||||
|
||||
Args:
|
||||
action_type: Type of action
|
||||
parameters: Action parameters
|
||||
|
||||
Returns:
|
||||
{
|
||||
"valid": bool,
|
||||
"action": action_type,
|
||||
"errors": [error messages] or []
|
||||
}
|
||||
"""
|
||||
errors = []
|
||||
|
||||
# Check whitelist
|
||||
if action_type not in self.allowed_actions:
|
||||
errors.append(f"Action '{action_type}' not in whitelist")
|
||||
|
||||
# Check required parameters (basic validation)
|
||||
if action_type == "create_memory" and not parameters.get("text"):
|
||||
errors.append("Memory 'text' parameter required")
|
||||
|
||||
if action_type == "update_goal" and not parameters.get("goal_id"):
|
||||
errors.append("Goal 'goal_id' parameter required")
|
||||
|
||||
if action_type == "schedule_reminder" and not parameters.get("message"):
|
||||
errors.append("Reminder 'message' parameter required")
|
||||
|
||||
if action_type == "learn_topic" and not parameters.get("topic"):
|
||||
errors.append("Learning 'topic' parameter required")
|
||||
|
||||
if action_type == "update_focus" and not parameters.get("focus"):
|
||||
errors.append("Focus 'focus' parameter required")
|
||||
|
||||
return {
|
||||
"valid": len(errors) == 0,
|
||||
"action": action_type,
|
||||
"errors": errors
|
||||
}
|
||||
|
||||
|
||||
# Singleton instance
|
||||
_action_manager_instance = None
|
||||
|
||||
|
||||
def get_action_manager() -> AutonomousActionManager:
|
||||
"""
|
||||
Get singleton action manager instance.
|
||||
|
||||
Returns:
|
||||
AutonomousActionManager instance
|
||||
"""
|
||||
global _action_manager_instance
|
||||
if _action_manager_instance is None:
|
||||
_action_manager_instance = AutonomousActionManager()
|
||||
return _action_manager_instance
|
||||
1
cortex/autonomy/learning/__init__.py
Normal file
1
cortex/autonomy/learning/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Pattern learning and adaptation system."""
|
||||
383
cortex/autonomy/learning/pattern_learner.py
Normal file
383
cortex/autonomy/learning/pattern_learner.py
Normal file
@@ -0,0 +1,383 @@
|
||||
"""
|
||||
Pattern Learning System - learns from interaction patterns to improve autonomy.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import json
|
||||
import os
|
||||
from typing import Dict, List, Any, Optional
|
||||
from datetime import datetime
|
||||
from collections import defaultdict
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PatternLearner:
|
||||
"""
|
||||
Learns from interaction patterns to improve Lyra's autonomous behavior.
|
||||
|
||||
Tracks:
|
||||
- Topic frequencies (what users talk about)
|
||||
- Time-of-day patterns (when users interact)
|
||||
- User preferences (how users like responses)
|
||||
- Successful response strategies (what works well)
|
||||
"""
|
||||
|
||||
def __init__(self, patterns_file: str = "/app/data/learned_patterns.json"):
|
||||
"""
|
||||
Initialize pattern learner.
|
||||
|
||||
Args:
|
||||
patterns_file: Path to persistent patterns storage
|
||||
"""
|
||||
self.patterns_file = patterns_file
|
||||
self.patterns = self._load_patterns()
|
||||
|
||||
def _load_patterns(self) -> Dict[str, Any]:
|
||||
"""Load patterns from disk."""
|
||||
if os.path.exists(self.patterns_file):
|
||||
try:
|
||||
with open(self.patterns_file, 'r') as f:
|
||||
patterns = json.load(f)
|
||||
logger.info(f"[PATTERN_LEARNER] Loaded patterns from {self.patterns_file}")
|
||||
return patterns
|
||||
except Exception as e:
|
||||
logger.error(f"[PATTERN_LEARNER] Failed to load patterns: {e}")
|
||||
|
||||
# Initialize empty patterns
|
||||
return {
|
||||
"topic_frequencies": {},
|
||||
"time_patterns": {},
|
||||
"user_preferences": {},
|
||||
"successful_strategies": {},
|
||||
"interaction_count": 0,
|
||||
"last_updated": datetime.utcnow().isoformat()
|
||||
}
|
||||
|
||||
def _save_patterns(self) -> None:
|
||||
"""Save patterns to disk."""
|
||||
try:
|
||||
# Ensure directory exists
|
||||
os.makedirs(os.path.dirname(self.patterns_file), exist_ok=True)
|
||||
|
||||
self.patterns["last_updated"] = datetime.utcnow().isoformat()
|
||||
|
||||
with open(self.patterns_file, 'w') as f:
|
||||
json.dump(self.patterns, f, indent=2)
|
||||
|
||||
logger.debug(f"[PATTERN_LEARNER] Saved patterns to {self.patterns_file}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"[PATTERN_LEARNER] Failed to save patterns: {e}")
|
||||
|
||||
async def learn_from_interaction(
|
||||
self,
|
||||
user_prompt: str,
|
||||
response: str,
|
||||
monologue: Dict[str, Any],
|
||||
context: Dict[str, Any]
|
||||
) -> None:
|
||||
"""
|
||||
Learn from a single interaction.
|
||||
|
||||
Args:
|
||||
user_prompt: User's message
|
||||
response: Lyra's response
|
||||
monologue: Inner monologue analysis
|
||||
context: Full context state
|
||||
"""
|
||||
self.patterns["interaction_count"] += 1
|
||||
|
||||
# Learn topic frequencies
|
||||
self._learn_topics(user_prompt, monologue)
|
||||
|
||||
# Learn time patterns
|
||||
self._learn_time_patterns()
|
||||
|
||||
# Learn user preferences
|
||||
self._learn_preferences(monologue, context)
|
||||
|
||||
# Learn successful strategies
|
||||
self._learn_strategies(monologue, response, context)
|
||||
|
||||
# Save periodically (every 10 interactions)
|
||||
if self.patterns["interaction_count"] % 10 == 0:
|
||||
self._save_patterns()
|
||||
|
||||
def _learn_topics(self, user_prompt: str, monologue: Dict[str, Any]) -> None:
|
||||
"""Track topic frequencies."""
|
||||
intent = monologue.get("intent", "unknown")
|
||||
|
||||
# Increment topic counter
|
||||
topic_freq = self.patterns["topic_frequencies"]
|
||||
topic_freq[intent] = topic_freq.get(intent, 0) + 1
|
||||
|
||||
# Extract keywords (simple approach - words > 5 chars)
|
||||
keywords = [word.lower() for word in user_prompt.split() if len(word) > 5]
|
||||
|
||||
for keyword in keywords:
|
||||
topic_freq[f"keyword:{keyword}"] = topic_freq.get(f"keyword:{keyword}", 0) + 1
|
||||
|
||||
logger.debug(f"[PATTERN_LEARNER] Topic learned: {intent}")
|
||||
|
||||
def _learn_time_patterns(self) -> None:
|
||||
"""Track time-of-day patterns."""
|
||||
now = datetime.utcnow()
|
||||
hour = now.hour
|
||||
|
||||
# Track interactions by hour
|
||||
time_patterns = self.patterns["time_patterns"]
|
||||
hour_key = f"hour_{hour:02d}"
|
||||
time_patterns[hour_key] = time_patterns.get(hour_key, 0) + 1
|
||||
|
||||
# Track day of week
|
||||
day_key = f"day_{now.strftime('%A').lower()}"
|
||||
time_patterns[day_key] = time_patterns.get(day_key, 0) + 1
|
||||
|
||||
def _learn_preferences(self, monologue: Dict[str, Any], context: Dict[str, Any]) -> None:
|
||||
"""Learn user preferences from detected tone and depth."""
|
||||
tone = monologue.get("tone", "neutral")
|
||||
depth = monologue.get("depth", "medium")
|
||||
|
||||
prefs = self.patterns["user_preferences"]
|
||||
|
||||
# Track preferred tone
|
||||
prefs.setdefault("tone_counts", {})
|
||||
prefs["tone_counts"][tone] = prefs["tone_counts"].get(tone, 0) + 1
|
||||
|
||||
# Track preferred depth
|
||||
prefs.setdefault("depth_counts", {})
|
||||
prefs["depth_counts"][depth] = prefs["depth_counts"].get(depth, 0) + 1
|
||||
|
||||
def _learn_strategies(
|
||||
self,
|
||||
monologue: Dict[str, Any],
|
||||
response: str,
|
||||
context: Dict[str, Any]
|
||||
) -> None:
|
||||
"""
|
||||
Learn which response strategies are successful.
|
||||
|
||||
Success indicators:
|
||||
- Executive was consulted and plan generated
|
||||
- Response length matches depth request
|
||||
- Tone matches request
|
||||
"""
|
||||
intent = monologue.get("intent", "unknown")
|
||||
executive_used = context.get("executive_plan") is not None
|
||||
|
||||
strategies = self.patterns["successful_strategies"]
|
||||
strategies.setdefault(intent, {})
|
||||
|
||||
# Track executive usage for this intent
|
||||
if executive_used:
|
||||
key = f"{intent}:executive_used"
|
||||
strategies.setdefault(key, 0)
|
||||
strategies[key] += 1
|
||||
|
||||
# Track response length patterns
|
||||
response_length = len(response.split())
|
||||
depth = monologue.get("depth", "medium")
|
||||
|
||||
length_key = f"{depth}:avg_words"
|
||||
if length_key not in strategies:
|
||||
strategies[length_key] = response_length
|
||||
else:
|
||||
# Running average
|
||||
strategies[length_key] = (strategies[length_key] + response_length) / 2
|
||||
|
||||
# ========================================
|
||||
# Pattern Analysis and Recommendations
|
||||
# ========================================
|
||||
|
||||
def get_top_topics(self, limit: int = 10) -> List[tuple]:
|
||||
"""
|
||||
Get most frequent topics.
|
||||
|
||||
Args:
|
||||
limit: Max number of topics to return
|
||||
|
||||
Returns:
|
||||
List of (topic, count) tuples, sorted by count
|
||||
"""
|
||||
topics = self.patterns["topic_frequencies"]
|
||||
sorted_topics = sorted(topics.items(), key=lambda x: x[1], reverse=True)
|
||||
return sorted_topics[:limit]
|
||||
|
||||
def get_preferred_tone(self) -> str:
|
||||
"""
|
||||
Get user's most preferred tone.
|
||||
|
||||
Returns:
|
||||
Preferred tone string
|
||||
"""
|
||||
prefs = self.patterns["user_preferences"]
|
||||
tone_counts = prefs.get("tone_counts", {})
|
||||
|
||||
if not tone_counts:
|
||||
return "neutral"
|
||||
|
||||
return max(tone_counts.items(), key=lambda x: x[1])[0]
|
||||
|
||||
def get_preferred_depth(self) -> str:
|
||||
"""
|
||||
Get user's most preferred response depth.
|
||||
|
||||
Returns:
|
||||
Preferred depth string
|
||||
"""
|
||||
prefs = self.patterns["user_preferences"]
|
||||
depth_counts = prefs.get("depth_counts", {})
|
||||
|
||||
if not depth_counts:
|
||||
return "medium"
|
||||
|
||||
return max(depth_counts.items(), key=lambda x: x[1])[0]
|
||||
|
||||
def get_peak_hours(self, limit: int = 3) -> List[int]:
|
||||
"""
|
||||
Get peak interaction hours.
|
||||
|
||||
Args:
|
||||
limit: Number of top hours to return
|
||||
|
||||
Returns:
|
||||
List of hours (0-23)
|
||||
"""
|
||||
time_patterns = self.patterns["time_patterns"]
|
||||
hour_counts = {k: v for k, v in time_patterns.items() if k.startswith("hour_")}
|
||||
|
||||
if not hour_counts:
|
||||
return []
|
||||
|
||||
sorted_hours = sorted(hour_counts.items(), key=lambda x: x[1], reverse=True)
|
||||
top_hours = sorted_hours[:limit]
|
||||
|
||||
# Extract hour numbers
|
||||
return [int(h[0].split("_")[1]) for h in top_hours]
|
||||
|
||||
def should_use_executive(self, intent: str) -> bool:
|
||||
"""
|
||||
Recommend whether to use executive for given intent based on patterns.
|
||||
|
||||
Args:
|
||||
intent: Intent type
|
||||
|
||||
Returns:
|
||||
True if executive is recommended
|
||||
"""
|
||||
strategies = self.patterns["successful_strategies"]
|
||||
key = f"{intent}:executive_used"
|
||||
|
||||
# If we've used executive for this intent >= 3 times, recommend it
|
||||
return strategies.get(key, 0) >= 3
|
||||
|
||||
def get_recommended_response_length(self, depth: str) -> int:
|
||||
"""
|
||||
Get recommended response length in words for given depth.
|
||||
|
||||
Args:
|
||||
depth: Depth level (short/medium/deep)
|
||||
|
||||
Returns:
|
||||
Recommended word count
|
||||
"""
|
||||
strategies = self.patterns["successful_strategies"]
|
||||
key = f"{depth}:avg_words"
|
||||
|
||||
avg_length = strategies.get(key, None)
|
||||
|
||||
if avg_length:
|
||||
return int(avg_length)
|
||||
|
||||
# Defaults if no pattern learned
|
||||
defaults = {
|
||||
"short": 50,
|
||||
"medium": 150,
|
||||
"deep": 300
|
||||
}
|
||||
|
||||
return defaults.get(depth, 150)
|
||||
|
||||
def get_insights(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Get high-level insights from learned patterns.
|
||||
|
||||
Returns:
|
||||
{
|
||||
"total_interactions": int,
|
||||
"top_topics": [(topic, count), ...],
|
||||
"preferred_tone": str,
|
||||
"preferred_depth": str,
|
||||
"peak_hours": [hours],
|
||||
"learning_recommendations": [str]
|
||||
}
|
||||
"""
|
||||
recommendations = []
|
||||
|
||||
# Check if user consistently prefers certain settings
|
||||
preferred_tone = self.get_preferred_tone()
|
||||
preferred_depth = self.get_preferred_depth()
|
||||
|
||||
if preferred_tone != "neutral":
|
||||
recommendations.append(f"User prefers {preferred_tone} tone")
|
||||
|
||||
if preferred_depth != "medium":
|
||||
recommendations.append(f"User prefers {preferred_depth} depth responses")
|
||||
|
||||
# Check for recurring topics
|
||||
top_topics = self.get_top_topics(limit=3)
|
||||
if top_topics:
|
||||
top_topic = top_topics[0][0]
|
||||
recommendations.append(f"Consider adding '{top_topic}' to learning queue")
|
||||
|
||||
return {
|
||||
"total_interactions": self.patterns["interaction_count"],
|
||||
"top_topics": self.get_top_topics(limit=5),
|
||||
"preferred_tone": preferred_tone,
|
||||
"preferred_depth": preferred_depth,
|
||||
"peak_hours": self.get_peak_hours(limit=3),
|
||||
"learning_recommendations": recommendations
|
||||
}
|
||||
|
||||
def reset_patterns(self) -> None:
|
||||
"""Reset all learned patterns (use with caution)."""
|
||||
self.patterns = {
|
||||
"topic_frequencies": {},
|
||||
"time_patterns": {},
|
||||
"user_preferences": {},
|
||||
"successful_strategies": {},
|
||||
"interaction_count": 0,
|
||||
"last_updated": datetime.utcnow().isoformat()
|
||||
}
|
||||
self._save_patterns()
|
||||
logger.warning("[PATTERN_LEARNER] Patterns reset")
|
||||
|
||||
def export_patterns(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Export all patterns for analysis.
|
||||
|
||||
Returns:
|
||||
Complete patterns dict
|
||||
"""
|
||||
return self.patterns.copy()
|
||||
|
||||
|
||||
# Singleton instance
|
||||
_learner_instance = None
|
||||
|
||||
|
||||
def get_pattern_learner(patterns_file: str = "/app/data/learned_patterns.json") -> PatternLearner:
|
||||
"""
|
||||
Get singleton pattern learner instance.
|
||||
|
||||
Args:
|
||||
patterns_file: Path to patterns file (only used on first call)
|
||||
|
||||
Returns:
|
||||
PatternLearner instance
|
||||
"""
|
||||
global _learner_instance
|
||||
if _learner_instance is None:
|
||||
_learner_instance = PatternLearner(patterns_file=patterns_file)
|
||||
return _learner_instance
|
||||
1
cortex/autonomy/proactive/__init__.py
Normal file
1
cortex/autonomy/proactive/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Proactive monitoring and suggestion system."""
|
||||
321
cortex/autonomy/proactive/monitor.py
Normal file
321
cortex/autonomy/proactive/monitor.py
Normal file
@@ -0,0 +1,321 @@
|
||||
"""
|
||||
Proactive Context Monitor - detects opportunities for autonomous suggestions.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import time
|
||||
from typing import Dict, List, Any, Optional
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ProactiveMonitor:
|
||||
"""
|
||||
Monitors conversation context and detects opportunities for proactive suggestions.
|
||||
|
||||
Triggers:
|
||||
- Long silence → Check-in
|
||||
- Learning queue + high curiosity → Suggest exploration
|
||||
- Active goals → Progress reminders
|
||||
- Conversation milestones → Offer summary
|
||||
- Pattern detection → Helpful suggestions
|
||||
"""
|
||||
|
||||
def __init__(self, min_priority: float = 0.6):
|
||||
"""
|
||||
Initialize proactive monitor.
|
||||
|
||||
Args:
|
||||
min_priority: Minimum priority for suggestions (0.0-1.0)
|
||||
"""
|
||||
self.min_priority = min_priority
|
||||
self.last_suggestion_time = {} # session_id -> timestamp
|
||||
self.cooldown_seconds = 300 # 5 minutes between proactive suggestions
|
||||
|
||||
async def analyze_session(
|
||||
self,
|
||||
session_id: str,
|
||||
context_state: Dict[str, Any],
|
||||
self_state: Dict[str, Any]
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Analyze session for proactive suggestion opportunities.
|
||||
|
||||
Args:
|
||||
session_id: Current session ID
|
||||
context_state: Full context including message history
|
||||
self_state: Lyra's current self-state
|
||||
|
||||
Returns:
|
||||
{
|
||||
"suggestion": "text to append to response",
|
||||
"priority": 0.0-1.0,
|
||||
"reason": "why this suggestion",
|
||||
"type": "check_in | learning | goal_reminder | summary | pattern"
|
||||
}
|
||||
or None if no suggestion
|
||||
"""
|
||||
# Check cooldown
|
||||
if not self._check_cooldown(session_id):
|
||||
logger.debug(f"[PROACTIVE] Session {session_id} in cooldown, skipping")
|
||||
return None
|
||||
|
||||
suggestions = []
|
||||
|
||||
# Check 1: Long silence detection
|
||||
silence_suggestion = self._check_long_silence(context_state)
|
||||
if silence_suggestion:
|
||||
suggestions.append(silence_suggestion)
|
||||
|
||||
# Check 2: Learning queue + high curiosity
|
||||
learning_suggestion = self._check_learning_opportunity(self_state)
|
||||
if learning_suggestion:
|
||||
suggestions.append(learning_suggestion)
|
||||
|
||||
# Check 3: Active goals reminder
|
||||
goal_suggestion = self._check_active_goals(self_state, context_state)
|
||||
if goal_suggestion:
|
||||
suggestions.append(goal_suggestion)
|
||||
|
||||
# Check 4: Conversation milestones
|
||||
milestone_suggestion = self._check_conversation_milestone(context_state)
|
||||
if milestone_suggestion:
|
||||
suggestions.append(milestone_suggestion)
|
||||
|
||||
# Check 5: Pattern-based suggestions
|
||||
pattern_suggestion = self._check_patterns(context_state, self_state)
|
||||
if pattern_suggestion:
|
||||
suggestions.append(pattern_suggestion)
|
||||
|
||||
# Filter by priority and return highest
|
||||
valid_suggestions = [s for s in suggestions if s["priority"] >= self.min_priority]
|
||||
|
||||
if not valid_suggestions:
|
||||
return None
|
||||
|
||||
# Return highest priority suggestion
|
||||
best_suggestion = max(valid_suggestions, key=lambda x: x["priority"])
|
||||
|
||||
# Update cooldown timer
|
||||
self._update_cooldown(session_id)
|
||||
|
||||
logger.info(f"[PROACTIVE] Suggestion generated: {best_suggestion['type']} (priority: {best_suggestion['priority']:.2f})")
|
||||
|
||||
return best_suggestion
|
||||
|
||||
def _check_cooldown(self, session_id: str) -> bool:
|
||||
"""Check if session is past cooldown period."""
|
||||
if session_id not in self.last_suggestion_time:
|
||||
return True
|
||||
|
||||
elapsed = time.time() - self.last_suggestion_time[session_id]
|
||||
return elapsed >= self.cooldown_seconds
|
||||
|
||||
def _update_cooldown(self, session_id: str) -> None:
|
||||
"""Update cooldown timer for session."""
|
||||
self.last_suggestion_time[session_id] = time.time()
|
||||
|
||||
def _check_long_silence(self, context_state: Dict[str, Any]) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Check if user has been silent for a long time.
|
||||
"""
|
||||
minutes_since_last = context_state.get("minutes_since_last_msg", 0)
|
||||
|
||||
# If > 30 minutes, suggest check-in
|
||||
if minutes_since_last > 30:
|
||||
return {
|
||||
"suggestion": "\n\n[Aside: I'm still here if you need anything!]",
|
||||
"priority": 0.7,
|
||||
"reason": f"User silent for {minutes_since_last:.0f} minutes",
|
||||
"type": "check_in"
|
||||
}
|
||||
|
||||
return None
|
||||
|
||||
def _check_learning_opportunity(self, self_state: Dict[str, Any]) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Check if Lyra has learning queue items and high curiosity.
|
||||
"""
|
||||
learning_queue = self_state.get("learning_queue", [])
|
||||
curiosity = self_state.get("curiosity", 0.5)
|
||||
|
||||
# If curiosity > 0.7 and learning queue exists
|
||||
if curiosity > 0.7 and learning_queue:
|
||||
topic = learning_queue[0] if learning_queue else "new topics"
|
||||
return {
|
||||
"suggestion": f"\n\n[Aside: I've been curious about {topic} lately. Would you like to explore it together?]",
|
||||
"priority": 0.65,
|
||||
"reason": f"High curiosity ({curiosity:.2f}) and learning queue present",
|
||||
"type": "learning"
|
||||
}
|
||||
|
||||
return None
|
||||
|
||||
def _check_active_goals(
|
||||
self,
|
||||
self_state: Dict[str, Any],
|
||||
context_state: Dict[str, Any]
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Check if there are active goals worth reminding about.
|
||||
"""
|
||||
active_goals = self_state.get("active_goals", [])
|
||||
|
||||
if not active_goals:
|
||||
return None
|
||||
|
||||
# Check if we've had multiple messages without goal progress
|
||||
message_count = context_state.get("message_count", 0)
|
||||
|
||||
# Every 10 messages, consider goal reminder
|
||||
if message_count % 10 == 0 and message_count > 0:
|
||||
goal = active_goals[0] # First active goal
|
||||
goal_name = goal if isinstance(goal, str) else goal.get("name", "your goal")
|
||||
|
||||
return {
|
||||
"suggestion": f"\n\n[Aside: Still thinking about {goal_name}. Let me know if you want to work on it.]",
|
||||
"priority": 0.6,
|
||||
"reason": f"Active goal present, {message_count} messages since start",
|
||||
"type": "goal_reminder"
|
||||
}
|
||||
|
||||
return None
|
||||
|
||||
def _check_conversation_milestone(self, context_state: Dict[str, Any]) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Check for conversation milestones (e.g., every 50 messages).
|
||||
"""
|
||||
message_count = context_state.get("message_count", 0)
|
||||
|
||||
# Every 50 messages, offer summary
|
||||
if message_count > 0 and message_count % 50 == 0:
|
||||
return {
|
||||
"suggestion": f"\n\n[Aside: We've exchanged {message_count} messages! Would you like a summary of our conversation?]",
|
||||
"priority": 0.65,
|
||||
"reason": f"Milestone: {message_count} messages",
|
||||
"type": "summary"
|
||||
}
|
||||
|
||||
return None
|
||||
|
||||
def _check_patterns(
|
||||
self,
|
||||
context_state: Dict[str, Any],
|
||||
self_state: Dict[str, Any]
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Check for behavioral patterns that merit suggestions.
|
||||
"""
|
||||
# Get current focus
|
||||
focus = self_state.get("focus", "")
|
||||
|
||||
# Check if user keeps asking similar questions (detected via focus)
|
||||
if focus and "repeated" in focus.lower():
|
||||
return {
|
||||
"suggestion": "\n\n[Aside: I notice we keep coming back to this topic. Would it help to create a summary or action plan?]",
|
||||
"priority": 0.7,
|
||||
"reason": "Repeated topic detected",
|
||||
"type": "pattern"
|
||||
}
|
||||
|
||||
# Check energy levels - if Lyra is low energy, maybe suggest break
|
||||
energy = self_state.get("energy", 0.8)
|
||||
if energy < 0.3:
|
||||
return {
|
||||
"suggestion": "\n\n[Aside: We've been at this for a while. Need a break or want to keep going?]",
|
||||
"priority": 0.65,
|
||||
"reason": f"Low energy ({energy:.2f})",
|
||||
"type": "pattern"
|
||||
}
|
||||
|
||||
return None
|
||||
|
||||
def format_suggestion(self, suggestion: Dict[str, Any]) -> str:
|
||||
"""
|
||||
Format suggestion for appending to response.
|
||||
|
||||
Args:
|
||||
suggestion: Suggestion dict from analyze_session()
|
||||
|
||||
Returns:
|
||||
Formatted string to append to response
|
||||
"""
|
||||
return suggestion.get("suggestion", "")
|
||||
|
||||
def set_cooldown_duration(self, seconds: int) -> None:
|
||||
"""
|
||||
Update cooldown duration.
|
||||
|
||||
Args:
|
||||
seconds: New cooldown duration
|
||||
"""
|
||||
self.cooldown_seconds = seconds
|
||||
logger.info(f"[PROACTIVE] Cooldown updated to {seconds}s")
|
||||
|
||||
def reset_cooldown(self, session_id: str) -> None:
|
||||
"""
|
||||
Reset cooldown for a specific session.
|
||||
|
||||
Args:
|
||||
session_id: Session to reset
|
||||
"""
|
||||
if session_id in self.last_suggestion_time:
|
||||
del self.last_suggestion_time[session_id]
|
||||
logger.info(f"[PROACTIVE] Cooldown reset for session {session_id}")
|
||||
|
||||
def get_session_stats(self, session_id: str) -> Dict[str, Any]:
|
||||
"""
|
||||
Get stats for a session's proactive monitoring.
|
||||
|
||||
Args:
|
||||
session_id: Session to check
|
||||
|
||||
Returns:
|
||||
{
|
||||
"last_suggestion_time": timestamp or None,
|
||||
"seconds_since_last": int,
|
||||
"cooldown_active": bool,
|
||||
"cooldown_remaining": int
|
||||
}
|
||||
"""
|
||||
last_time = self.last_suggestion_time.get(session_id)
|
||||
|
||||
if not last_time:
|
||||
return {
|
||||
"last_suggestion_time": None,
|
||||
"seconds_since_last": 0,
|
||||
"cooldown_active": False,
|
||||
"cooldown_remaining": 0
|
||||
}
|
||||
|
||||
seconds_since = int(time.time() - last_time)
|
||||
cooldown_active = seconds_since < self.cooldown_seconds
|
||||
cooldown_remaining = max(0, self.cooldown_seconds - seconds_since)
|
||||
|
||||
return {
|
||||
"last_suggestion_time": last_time,
|
||||
"seconds_since_last": seconds_since,
|
||||
"cooldown_active": cooldown_active,
|
||||
"cooldown_remaining": cooldown_remaining
|
||||
}
|
||||
|
||||
|
||||
# Singleton instance
|
||||
_monitor_instance = None
|
||||
|
||||
|
||||
def get_proactive_monitor(min_priority: float = 0.6) -> ProactiveMonitor:
|
||||
"""
|
||||
Get singleton proactive monitor instance.
|
||||
|
||||
Args:
|
||||
min_priority: Minimum priority threshold (only used on first call)
|
||||
|
||||
Returns:
|
||||
ProactiveMonitor instance
|
||||
"""
|
||||
global _monitor_instance
|
||||
if _monitor_instance is None:
|
||||
_monitor_instance = ProactiveMonitor(min_priority=min_priority)
|
||||
return _monitor_instance
|
||||
1
cortex/autonomy/tools/__init__.py
Normal file
1
cortex/autonomy/tools/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Autonomous tool invocation system."""
|
||||
124
cortex/autonomy/tools/decision_engine.py
Normal file
124
cortex/autonomy/tools/decision_engine.py
Normal file
@@ -0,0 +1,124 @@
|
||||
"""
|
||||
Tool Decision Engine - decides which tools to invoke autonomously.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Dict, List, Any
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ToolDecisionEngine:
|
||||
"""Decides which tools to invoke based on context analysis."""
|
||||
|
||||
async def analyze_tool_needs(
|
||||
self,
|
||||
user_prompt: str,
|
||||
monologue: Dict[str, Any],
|
||||
context_state: Dict[str, Any],
|
||||
available_tools: List[str]
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Analyze if tools should be invoked and which ones.
|
||||
|
||||
Args:
|
||||
user_prompt: User's message
|
||||
monologue: Inner monologue analysis
|
||||
context_state: Full context
|
||||
available_tools: List of available tools
|
||||
|
||||
Returns:
|
||||
{
|
||||
"should_invoke_tools": bool,
|
||||
"tools_to_invoke": [
|
||||
{
|
||||
"tool": "RAG | WEB | WEATHER | etc",
|
||||
"query": "search query",
|
||||
"reason": "why this tool",
|
||||
"priority": 0.0-1.0
|
||||
},
|
||||
...
|
||||
],
|
||||
"confidence": 0.0-1.0
|
||||
}
|
||||
"""
|
||||
|
||||
tools_to_invoke = []
|
||||
|
||||
# Check for memory/context needs
|
||||
if any(word in user_prompt.lower() for word in [
|
||||
"remember", "you said", "we discussed", "earlier", "before",
|
||||
"last time", "previously", "what did"
|
||||
]):
|
||||
tools_to_invoke.append({
|
||||
"tool": "RAG",
|
||||
"query": user_prompt,
|
||||
"reason": "User references past conversation",
|
||||
"priority": 0.9
|
||||
})
|
||||
|
||||
# Check for web search needs
|
||||
if any(word in user_prompt.lower() for word in [
|
||||
"current", "latest", "news", "today", "what's happening",
|
||||
"look up", "search for", "find information", "recent"
|
||||
]):
|
||||
tools_to_invoke.append({
|
||||
"tool": "WEB",
|
||||
"query": user_prompt,
|
||||
"reason": "Requires current information",
|
||||
"priority": 0.8
|
||||
})
|
||||
|
||||
# Check for weather needs
|
||||
if any(word in user_prompt.lower() for word in [
|
||||
"weather", "temperature", "forecast", "rain", "sunny", "climate"
|
||||
]):
|
||||
tools_to_invoke.append({
|
||||
"tool": "WEATHER",
|
||||
"query": user_prompt,
|
||||
"reason": "Weather information requested",
|
||||
"priority": 0.95
|
||||
})
|
||||
|
||||
# Check for code-related needs
|
||||
if any(word in user_prompt.lower() for word in [
|
||||
"code", "function", "debug", "implement", "algorithm",
|
||||
"programming", "script", "syntax"
|
||||
]):
|
||||
if "CODEBRAIN" in available_tools:
|
||||
tools_to_invoke.append({
|
||||
"tool": "CODEBRAIN",
|
||||
"query": user_prompt,
|
||||
"reason": "Code-related task",
|
||||
"priority": 0.85
|
||||
})
|
||||
|
||||
# Proactive RAG for complex queries (based on monologue)
|
||||
intent = monologue.get("intent", "") if monologue else ""
|
||||
if monologue and monologue.get("consult_executive"):
|
||||
# Complex query - might benefit from context
|
||||
if not any(t["tool"] == "RAG" for t in tools_to_invoke):
|
||||
tools_to_invoke.append({
|
||||
"tool": "RAG",
|
||||
"query": user_prompt,
|
||||
"reason": "Complex query benefits from context",
|
||||
"priority": 0.6
|
||||
})
|
||||
|
||||
# Sort by priority
|
||||
tools_to_invoke.sort(key=lambda x: x["priority"], reverse=True)
|
||||
|
||||
max_priority = max([t["priority"] for t in tools_to_invoke]) if tools_to_invoke else 0.0
|
||||
|
||||
result = {
|
||||
"should_invoke_tools": len(tools_to_invoke) > 0,
|
||||
"tools_to_invoke": tools_to_invoke,
|
||||
"confidence": max_priority
|
||||
}
|
||||
|
||||
if tools_to_invoke:
|
||||
logger.info(f"[TOOL_DECISION] Autonomous tool invocation recommended: {len(tools_to_invoke)} tools")
|
||||
for tool in tools_to_invoke:
|
||||
logger.info(f" - {tool['tool']} (priority: {tool['priority']:.2f}): {tool['reason']}")
|
||||
|
||||
return result
|
||||
354
cortex/autonomy/tools/orchestrator.py
Normal file
354
cortex/autonomy/tools/orchestrator.py
Normal file
@@ -0,0 +1,354 @@
|
||||
"""
|
||||
Tool Orchestrator - executes autonomous tool invocations asynchronously.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from typing import Dict, List, Any, Optional
|
||||
import os
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ToolOrchestrator:
|
||||
"""Orchestrates async tool execution and result aggregation."""
|
||||
|
||||
def __init__(self, tool_timeout: int = 30):
|
||||
"""
|
||||
Initialize orchestrator.
|
||||
|
||||
Args:
|
||||
tool_timeout: Max seconds per tool call (default 30)
|
||||
"""
|
||||
self.tool_timeout = tool_timeout
|
||||
self.available_tools = self._discover_tools()
|
||||
|
||||
def _discover_tools(self) -> Dict[str, Any]:
|
||||
"""Discover available tool modules."""
|
||||
tools = {}
|
||||
|
||||
# Import tool modules as they become available
|
||||
try:
|
||||
from memory.neomem_client import search_neomem
|
||||
tools["RAG"] = search_neomem
|
||||
logger.debug("[ORCHESTRATOR] RAG tool available")
|
||||
except ImportError:
|
||||
logger.debug("[ORCHESTRATOR] RAG tool not available")
|
||||
|
||||
try:
|
||||
from integrations.web_search import web_search
|
||||
tools["WEB"] = web_search
|
||||
logger.debug("[ORCHESTRATOR] WEB tool available")
|
||||
except ImportError:
|
||||
logger.debug("[ORCHESTRATOR] WEB tool not available")
|
||||
|
||||
try:
|
||||
from integrations.weather import get_weather
|
||||
tools["WEATHER"] = get_weather
|
||||
logger.debug("[ORCHESTRATOR] WEATHER tool available")
|
||||
except ImportError:
|
||||
logger.debug("[ORCHESTRATOR] WEATHER tool not available")
|
||||
|
||||
try:
|
||||
from integrations.codebrain import query_codebrain
|
||||
tools["CODEBRAIN"] = query_codebrain
|
||||
logger.debug("[ORCHESTRATOR] CODEBRAIN tool available")
|
||||
except ImportError:
|
||||
logger.debug("[ORCHESTRATOR] CODEBRAIN tool not available")
|
||||
|
||||
return tools
|
||||
|
||||
async def execute_tools(
|
||||
self,
|
||||
tools_to_invoke: List[Dict[str, Any]],
|
||||
context_state: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Execute multiple tools asynchronously.
|
||||
|
||||
Args:
|
||||
tools_to_invoke: List of tool specs from decision engine
|
||||
[{"tool": "RAG", "query": "...", "reason": "...", "priority": 0.9}, ...]
|
||||
context_state: Full context for tool execution
|
||||
|
||||
Returns:
|
||||
{
|
||||
"results": {
|
||||
"RAG": {...},
|
||||
"WEB": {...},
|
||||
...
|
||||
},
|
||||
"execution_summary": {
|
||||
"tools_invoked": ["RAG", "WEB"],
|
||||
"successful": ["RAG"],
|
||||
"failed": ["WEB"],
|
||||
"total_time_ms": 1234
|
||||
}
|
||||
}
|
||||
"""
|
||||
import time
|
||||
start_time = time.time()
|
||||
|
||||
logger.info(f"[ORCHESTRATOR] Executing {len(tools_to_invoke)} tools asynchronously")
|
||||
|
||||
# Create tasks for each tool
|
||||
tasks = []
|
||||
tool_names = []
|
||||
|
||||
for tool_spec in tools_to_invoke:
|
||||
tool_name = tool_spec["tool"]
|
||||
query = tool_spec["query"]
|
||||
|
||||
if tool_name in self.available_tools:
|
||||
task = self._execute_single_tool(tool_name, query, context_state)
|
||||
tasks.append(task)
|
||||
tool_names.append(tool_name)
|
||||
logger.debug(f"[ORCHESTRATOR] Queued {tool_name}: {query[:50]}...")
|
||||
else:
|
||||
logger.warning(f"[ORCHESTRATOR] Tool {tool_name} not available, skipping")
|
||||
|
||||
# Execute all tools concurrently with timeout
|
||||
results = {}
|
||||
successful = []
|
||||
failed = []
|
||||
|
||||
if tasks:
|
||||
try:
|
||||
# Wait for all tasks with global timeout
|
||||
completed = await asyncio.wait_for(
|
||||
asyncio.gather(*tasks, return_exceptions=True),
|
||||
timeout=self.tool_timeout
|
||||
)
|
||||
|
||||
# Process results
|
||||
for tool_name, result in zip(tool_names, completed):
|
||||
if isinstance(result, Exception):
|
||||
logger.error(f"[ORCHESTRATOR] {tool_name} failed: {result}")
|
||||
results[tool_name] = {"error": str(result), "success": False}
|
||||
failed.append(tool_name)
|
||||
else:
|
||||
logger.info(f"[ORCHESTRATOR] {tool_name} completed successfully")
|
||||
results[tool_name] = result
|
||||
successful.append(tool_name)
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
logger.error(f"[ORCHESTRATOR] Global timeout ({self.tool_timeout}s) exceeded")
|
||||
for tool_name in tool_names:
|
||||
if tool_name not in results:
|
||||
results[tool_name] = {"error": "timeout", "success": False}
|
||||
failed.append(tool_name)
|
||||
|
||||
end_time = time.time()
|
||||
total_time_ms = int((end_time - start_time) * 1000)
|
||||
|
||||
execution_summary = {
|
||||
"tools_invoked": tool_names,
|
||||
"successful": successful,
|
||||
"failed": failed,
|
||||
"total_time_ms": total_time_ms
|
||||
}
|
||||
|
||||
logger.info(f"[ORCHESTRATOR] Execution complete: {len(successful)}/{len(tool_names)} successful in {total_time_ms}ms")
|
||||
|
||||
return {
|
||||
"results": results,
|
||||
"execution_summary": execution_summary
|
||||
}
|
||||
|
||||
async def _execute_single_tool(
|
||||
self,
|
||||
tool_name: str,
|
||||
query: str,
|
||||
context_state: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Execute a single tool with error handling.
|
||||
|
||||
Args:
|
||||
tool_name: Name of tool (RAG, WEB, etc.)
|
||||
query: Query string for the tool
|
||||
context_state: Context for tool execution
|
||||
|
||||
Returns:
|
||||
Tool-specific result dict
|
||||
"""
|
||||
tool_func = self.available_tools.get(tool_name)
|
||||
if not tool_func:
|
||||
raise ValueError(f"Tool {tool_name} not available")
|
||||
|
||||
try:
|
||||
logger.debug(f"[ORCHESTRATOR] Invoking {tool_name}...")
|
||||
|
||||
# Different tools have different signatures - adapt as needed
|
||||
if tool_name == "RAG":
|
||||
result = await self._invoke_rag(tool_func, query, context_state)
|
||||
elif tool_name == "WEB":
|
||||
result = await self._invoke_web(tool_func, query)
|
||||
elif tool_name == "WEATHER":
|
||||
result = await self._invoke_weather(tool_func, query)
|
||||
elif tool_name == "CODEBRAIN":
|
||||
result = await self._invoke_codebrain(tool_func, query, context_state)
|
||||
else:
|
||||
# Generic invocation
|
||||
result = await tool_func(query)
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"tool": tool_name,
|
||||
"query": query,
|
||||
"data": result
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"[ORCHESTRATOR] {tool_name} execution failed: {e}")
|
||||
raise
|
||||
|
||||
async def _invoke_rag(self, func, query: str, context: Dict[str, Any]) -> Any:
|
||||
"""Invoke RAG tool (NeoMem search)."""
|
||||
session_id = context.get("session_id", "unknown")
|
||||
# RAG searches memory for relevant past interactions
|
||||
try:
|
||||
results = await func(query, limit=5, session_id=session_id)
|
||||
return results
|
||||
except Exception as e:
|
||||
logger.warning(f"[ORCHESTRATOR] RAG invocation failed, returning empty: {e}")
|
||||
return []
|
||||
|
||||
async def _invoke_web(self, func, query: str) -> Any:
|
||||
"""Invoke web search tool."""
|
||||
try:
|
||||
results = await func(query, max_results=5)
|
||||
return results
|
||||
except Exception as e:
|
||||
logger.warning(f"[ORCHESTRATOR] WEB invocation failed: {e}")
|
||||
return {"error": str(e), "results": []}
|
||||
|
||||
async def _invoke_weather(self, func, query: str) -> Any:
|
||||
"""Invoke weather tool."""
|
||||
# Extract location from query (simple heuristic)
|
||||
# In future: use LLM to extract location
|
||||
try:
|
||||
location = self._extract_location(query)
|
||||
results = await func(location)
|
||||
return results
|
||||
except Exception as e:
|
||||
logger.warning(f"[ORCHESTRATOR] WEATHER invocation failed: {e}")
|
||||
return {"error": str(e)}
|
||||
|
||||
async def _invoke_codebrain(self, func, query: str, context: Dict[str, Any]) -> Any:
|
||||
"""Invoke codebrain tool."""
|
||||
try:
|
||||
results = await func(query, context=context)
|
||||
return results
|
||||
except Exception as e:
|
||||
logger.warning(f"[ORCHESTRATOR] CODEBRAIN invocation failed: {e}")
|
||||
return {"error": str(e)}
|
||||
|
||||
def _extract_location(self, query: str) -> str:
|
||||
"""
|
||||
Extract location from weather query.
|
||||
Simple heuristic - in future use LLM.
|
||||
"""
|
||||
# Common location indicators
|
||||
indicators = ["in ", "at ", "for ", "weather in ", "temperature in "]
|
||||
|
||||
query_lower = query.lower()
|
||||
for indicator in indicators:
|
||||
if indicator in query_lower:
|
||||
# Get text after indicator
|
||||
parts = query_lower.split(indicator, 1)
|
||||
if len(parts) > 1:
|
||||
location = parts[1].strip().split()[0] # First word after indicator
|
||||
return location
|
||||
|
||||
# Default fallback
|
||||
return "current location"
|
||||
|
||||
def format_results_for_context(self, orchestrator_result: Dict[str, Any]) -> str:
|
||||
"""
|
||||
Format tool results for inclusion in context/prompt.
|
||||
|
||||
Args:
|
||||
orchestrator_result: Output from execute_tools()
|
||||
|
||||
Returns:
|
||||
Formatted string for prompt injection
|
||||
"""
|
||||
results = orchestrator_result.get("results", {})
|
||||
summary = orchestrator_result.get("execution_summary", {})
|
||||
|
||||
if not results:
|
||||
return ""
|
||||
|
||||
formatted = "\n=== AUTONOMOUS TOOL RESULTS ===\n"
|
||||
|
||||
for tool_name, tool_result in results.items():
|
||||
if tool_result.get("success", False):
|
||||
formatted += f"\n[{tool_name}]\n"
|
||||
data = tool_result.get("data", {})
|
||||
|
||||
# Format based on tool type
|
||||
if tool_name == "RAG":
|
||||
formatted += self._format_rag_results(data)
|
||||
elif tool_name == "WEB":
|
||||
formatted += self._format_web_results(data)
|
||||
elif tool_name == "WEATHER":
|
||||
formatted += self._format_weather_results(data)
|
||||
elif tool_name == "CODEBRAIN":
|
||||
formatted += self._format_codebrain_results(data)
|
||||
else:
|
||||
formatted += f"{data}\n"
|
||||
else:
|
||||
formatted += f"\n[{tool_name}] - Failed: {tool_result.get('error', 'unknown')}\n"
|
||||
|
||||
formatted += f"\n(Tools executed in {summary.get('total_time_ms', 0)}ms)\n"
|
||||
formatted += "=" * 40 + "\n"
|
||||
|
||||
return formatted
|
||||
|
||||
def _format_rag_results(self, data: Any) -> str:
|
||||
"""Format RAG/memory search results."""
|
||||
if not data:
|
||||
return "No relevant memories found.\n"
|
||||
|
||||
formatted = "Relevant memories:\n"
|
||||
for i, item in enumerate(data[:3], 1): # Top 3
|
||||
text = item.get("text", item.get("content", str(item)))
|
||||
formatted += f" {i}. {text[:100]}...\n"
|
||||
return formatted
|
||||
|
||||
def _format_web_results(self, data: Any) -> str:
|
||||
"""Format web search results."""
|
||||
if isinstance(data, dict) and data.get("error"):
|
||||
return f"Web search failed: {data['error']}\n"
|
||||
|
||||
results = data.get("results", []) if isinstance(data, dict) else data
|
||||
if not results:
|
||||
return "No web results found.\n"
|
||||
|
||||
formatted = "Web search results:\n"
|
||||
for i, item in enumerate(results[:3], 1): # Top 3
|
||||
title = item.get("title", "No title")
|
||||
snippet = item.get("snippet", item.get("description", ""))
|
||||
formatted += f" {i}. {title}\n {snippet[:100]}...\n"
|
||||
return formatted
|
||||
|
||||
def _format_weather_results(self, data: Any) -> str:
|
||||
"""Format weather results."""
|
||||
if isinstance(data, dict) and data.get("error"):
|
||||
return f"Weather lookup failed: {data['error']}\n"
|
||||
|
||||
# Assuming weather API returns temp, conditions, etc.
|
||||
temp = data.get("temperature", "unknown")
|
||||
conditions = data.get("conditions", "unknown")
|
||||
location = data.get("location", "requested location")
|
||||
|
||||
return f"Weather for {location}: {temp}, {conditions}\n"
|
||||
|
||||
def _format_codebrain_results(self, data: Any) -> str:
|
||||
"""Format codebrain results."""
|
||||
if isinstance(data, dict) and data.get("error"):
|
||||
return f"Codebrain failed: {data['error']}\n"
|
||||
|
||||
# Format code-related results
|
||||
return f"{data}\n"
|
||||
Reference in New Issue
Block a user