Cortex rework in progress

This commit is contained in:
serversdwn
2025-11-26 18:01:48 -05:00
parent a087de9790
commit 734999e8bb
8 changed files with 468 additions and 593 deletions

View File

@@ -1,42 +1,57 @@
# reflection.py
from llm.llm_router import call_llm
import json
import os
import re
from llm.llm_router import call_llm
async def reflect_notes(intake_summary: str, identity_block: dict | None) -> dict:
"""
Generate reflection notes (internal guidance) for the reasoning engine.
These notes help simulate continuity and identity without being shown to the user.
Produce short internal reflection notes for Cortex.
These are NOT shown to the user.
"""
# -----------------------------
# Build the prompt
# -----------------------------
identity_text = ""
if identity_block:
identity_text = f"Identity:\n{identity_block}\n\n"
prompt = (
f"{identity_text}"
f"Recent summary:\n{intake_summary}\n\n"
"You are Lyra's meta-awareness layer. Your job is to produce short, directive "
"internal notes that guide Lyras reasoning engine. These notes are NEVER "
"shown to the user.\n\n"
"Rules for output:\n"
"1. Return ONLY valid JSON.\n"
"2. JSON must have exactly one key: \"notes\".\n"
"3. \"notes\" must be a list of 3 to 6 short strings.\n"
"4. Notes must be actionable (e.g., \"keep it concise\", \"maintain context\").\n"
"5. No markdown, no apologies, no explanations.\n\n"
"Return JSON:\n"
"{ \"notes\": [\"...\"] }\n"
f"{identity_text}"
f"Recent summary:\n{intake_summary}\n\n"
"You are Lyra's meta-awareness layer. Your job is to produce short, directive "
"internal notes that guide Lyras reasoning engine. These notes are NEVER "
"shown to the user.\n\n"
"Rules for output:\n"
"1. Return ONLY valid JSON.\n"
"2. JSON must have exactly one key: \"notes\".\n"
"3. \"notes\" must be a list of 3 to 6 short strings.\n"
"4. Notes must be actionable (e.g., \"keep it concise\", \"maintain context\").\n"
"5. No markdown, no apologies, no explanations.\n\n"
"Return JSON:\n"
"{ \"notes\": [\"...\"] }\n"
)
import os
backend = os.getenv("LLM_FORCE_BACKEND", "primary")
# -----------------------------
# Module-specific backend choice
# -----------------------------
reflection_backend = os.getenv("REFLECTION_LLM")
cortex_backend = os.getenv("CORTEX_LLM", "PRIMARY").upper()
# Reflection uses its own backend if set, otherwise cortex backend
backend = (reflection_backend or cortex_backend).upper()
# -----------------------------
# Call the selected LLM backend
# -----------------------------
raw = await call_llm(prompt, backend=backend)
print("[Reflection-Raw]:", raw)
# -----------------------------
# Try direct JSON
# -----------------------------
try:
parsed = json.loads(raw.strip())
if isinstance(parsed, dict) and "notes" in parsed:
@@ -44,10 +59,11 @@ async def reflect_notes(intake_summary: str, identity_block: dict | None) -> dic
except:
pass
# Try to extract JSON inside text
# -----------------------------
# Try JSON extraction
# -----------------------------
try:
import re
match = re.search(r'\{.*?\}', raw, re.S) # <-- non-greedy !
match = re.search(r"\{.*?\}", raw, re.S)
if match:
parsed = json.loads(match.group(0))
if isinstance(parsed, dict) and "notes" in parsed:
@@ -55,5 +71,7 @@ async def reflect_notes(intake_summary: str, identity_block: dict | None) -> dic
except:
pass
# Final fallback
return {"notes": [raw.strip()]}
# -----------------------------
# Fallback — treat raw text as a single note
# -----------------------------
return {"notes": [raw.strip()]}