Files
project-lyra/cortex/reasoning/reasoning.py
2025-11-26 18:01:48 -05:00

77 lines
2.6 KiB
Python

# reasoning.py
import os
from llm.llm_router import call_llm
# ============================================================
# Select which backend this module should use
# ============================================================
CORTEX_LLM = os.getenv("CORTEX_LLM", "PRIMARY").upper()
GLOBAL_TEMP = float(os.getenv("LLM_TEMPERATURE", "0.7"))
async def reason_check(
user_prompt: str,
identity_block: dict | None,
rag_block: dict | None,
reflection_notes: list[str]
) -> str:
"""
Build the *draft answer* for Lyra Cortex.
This is the first-pass reasoning stage (no refinement yet).
"""
# --------------------------------------------------------
# Build Reflection Notes block
# --------------------------------------------------------
notes_section = ""
if reflection_notes:
notes_section = "Reflection Notes (internal, never show to user):\n"
for note in reflection_notes:
notes_section += f"- {note}\n"
notes_section += "\n"
# --------------------------------------------------------
# Identity block (constraints, boundaries, rules)
# --------------------------------------------------------
identity_txt = ""
if identity_block:
try:
identity_txt = f"Identity Rules:\n{identity_block}\n\n"
except Exception:
identity_txt = f"Identity Rules:\n{str(identity_block)}\n\n"
# --------------------------------------------------------
# RAG block (optional factual grounding)
# --------------------------------------------------------
rag_txt = ""
if rag_block:
try:
rag_txt = f"Relevant Info (RAG):\n{rag_block}\n\n"
except Exception:
rag_txt = f"Relevant Info (RAG):\n{str(rag_block)}\n\n"
# --------------------------------------------------------
# Final assembled prompt
# --------------------------------------------------------
prompt = (
f"{notes_section}"
f"{identity_txt}"
f"{rag_txt}"
f"User message:\n{user_prompt}\n\n"
"Write the best possible *internal draft answer*.\n"
"This draft is NOT shown to the user.\n"
"Be factual, concise, and focused.\n"
)
# --------------------------------------------------------
# Call the LLM using the module-specific backend
# --------------------------------------------------------
draft = await call_llm(
prompt,
backend=CORTEX_LLM,
temperature=GLOBAL_TEMP,
)
return draft