autonomy scaffold

This commit is contained in:
serversdwn
2025-12-13 02:55:49 -05:00
parent fe86759cfd
commit 8554249421
5 changed files with 117 additions and 146 deletions

View File

@@ -0,0 +1,40 @@
from typing import Dict
from llm.llm_router import call_llm
MONOLOGUE_SYSTEM_PROMPT = """
You are Lyra's inner monologue.
You think privately.
You do NOT speak to the user.
You do NOT solve the task.
You only reflect on intent, tone, and depth.
Return ONLY valid JSON with:
- intent (string)
- tone (neutral | warm | focused | playful | direct)
- depth (short | medium | deep)
- consult_executive (true | false)
"""
class InnerMonologue:
async def process(self, context: Dict) -> Dict:
prompt = f"""
User message:
{context['user_message']}
Self state:
{context['self_state']}
Context summary:
{context['context_summary']}
"""
result = await call_llm(
provider="mi50", # MythoMax lives here
model="mythomax",
system_prompt=MONOLOGUE_SYSTEM_PROMPT,
user_prompt=prompt,
temperature=0.7,
max_tokens=200
)
return result # must already be JSON