autonomy scaffold
This commit is contained in:
40
autonomy/monologue/monologue.py
Normal file
40
autonomy/monologue/monologue.py
Normal file
@@ -0,0 +1,40 @@
|
||||
from typing import Dict
|
||||
from llm.llm_router import call_llm
|
||||
|
||||
MONOLOGUE_SYSTEM_PROMPT = """
|
||||
You are Lyra's inner monologue.
|
||||
You think privately.
|
||||
You do NOT speak to the user.
|
||||
You do NOT solve the task.
|
||||
You only reflect on intent, tone, and depth.
|
||||
|
||||
Return ONLY valid JSON with:
|
||||
- intent (string)
|
||||
- tone (neutral | warm | focused | playful | direct)
|
||||
- depth (short | medium | deep)
|
||||
- consult_executive (true | false)
|
||||
"""
|
||||
|
||||
class InnerMonologue:
|
||||
async def process(self, context: Dict) -> Dict:
|
||||
prompt = f"""
|
||||
User message:
|
||||
{context['user_message']}
|
||||
|
||||
Self state:
|
||||
{context['self_state']}
|
||||
|
||||
Context summary:
|
||||
{context['context_summary']}
|
||||
"""
|
||||
|
||||
result = await call_llm(
|
||||
provider="mi50", # MythoMax lives here
|
||||
model="mythomax",
|
||||
system_prompt=MONOLOGUE_SYSTEM_PROMPT,
|
||||
user_prompt=prompt,
|
||||
temperature=0.7,
|
||||
max_tokens=200
|
||||
)
|
||||
|
||||
return result # must already be JSON
|
||||
Reference in New Issue
Block a user