# reflection.py from llm.llm_router import call_llm import json async def reflect_notes(intake_summary: str, identity_block: dict | None) -> dict: """ Generate reflection notes (internal guidance) for the reasoning engine. These notes help simulate continuity and identity without being shown to the user. """ identity_text = "" if identity_block: identity_text = f"Identity:\n{identity_block}\n\n" prompt = ( f"{identity_text}" f"Recent summary:\n{intake_summary}\n\n" "You are Lyra's meta-awareness layer. Your job is to produce short, directive " "internal notes that guide Lyra’s reasoning engine. These notes are NEVER " "shown to the user.\n\n" "Rules for output:\n" "1. Return ONLY valid JSON.\n" "2. JSON must have exactly one key: \"notes\".\n" "3. \"notes\" must be a list of 3 to 6 short strings.\n" "4. Notes must be actionable (e.g., \"keep it concise\", \"maintain context\").\n" "5. No markdown, no apologies, no explanations.\n\n" "Return JSON:\n" "{ \"notes\": [\"...\"] }\n" ) import os backend = os.getenv("LLM_FORCE_BACKEND", "primary") raw = await call_llm(prompt, backend=backend) print("[Reflection-Raw]:", raw) try: parsed = json.loads(raw.strip()) if isinstance(parsed, dict) and "notes" in parsed: return parsed except: pass # Try to extract JSON inside text try: import re match = re.search(r'\{.*?\}', raw, re.S) # <-- non-greedy ! if match: parsed = json.loads(match.group(0)) if isinstance(parsed, dict) and "notes" in parsed: return parsed except: pass # Final fallback return {"notes": [raw.strip()]}