Cortex rework in progress
This commit is contained in:
@@ -1,33 +1,76 @@
|
||||
# reasoning.py
|
||||
import os
|
||||
from llm.llm_router import call_llm
|
||||
|
||||
async def reason_check(user_prompt: str,
|
||||
identity_block: dict | None,
|
||||
rag_block: dict | None,
|
||||
reflection_notes: list[str]) -> str:
|
||||
|
||||
# ============================================================
|
||||
# Select which backend this module should use
|
||||
# ============================================================
|
||||
CORTEX_LLM = os.getenv("CORTEX_LLM", "PRIMARY").upper()
|
||||
GLOBAL_TEMP = float(os.getenv("LLM_TEMPERATURE", "0.7"))
|
||||
|
||||
|
||||
async def reason_check(
|
||||
user_prompt: str,
|
||||
identity_block: dict | None,
|
||||
rag_block: dict | None,
|
||||
reflection_notes: list[str]
|
||||
) -> str:
|
||||
"""
|
||||
Generate a first draft using identity, RAG, and reflection notes.
|
||||
No critique loop yet.
|
||||
Build the *draft answer* for Lyra Cortex.
|
||||
This is the first-pass reasoning stage (no refinement yet).
|
||||
"""
|
||||
|
||||
# Build internal notes section
|
||||
# --------------------------------------------------------
|
||||
# Build Reflection Notes block
|
||||
# --------------------------------------------------------
|
||||
notes_section = ""
|
||||
if reflection_notes:
|
||||
notes_section = "Reflection Notes (internal, do NOT show to user):\n"
|
||||
for n in reflection_notes:
|
||||
notes_section += f"- {n}\n"
|
||||
notes_section = "Reflection Notes (internal, never show to user):\n"
|
||||
for note in reflection_notes:
|
||||
notes_section += f"- {note}\n"
|
||||
notes_section += "\n"
|
||||
|
||||
identity_txt = f"Identity: {identity_block}\n\n" if identity_block else ""
|
||||
rag_txt = f"Relevant info: {rag_block}\n\n" if rag_block else ""
|
||||
# --------------------------------------------------------
|
||||
# Identity block (constraints, boundaries, rules)
|
||||
# --------------------------------------------------------
|
||||
identity_txt = ""
|
||||
if identity_block:
|
||||
try:
|
||||
identity_txt = f"Identity Rules:\n{identity_block}\n\n"
|
||||
except Exception:
|
||||
identity_txt = f"Identity Rules:\n{str(identity_block)}\n\n"
|
||||
|
||||
# --------------------------------------------------------
|
||||
# RAG block (optional factual grounding)
|
||||
# --------------------------------------------------------
|
||||
rag_txt = ""
|
||||
if rag_block:
|
||||
try:
|
||||
rag_txt = f"Relevant Info (RAG):\n{rag_block}\n\n"
|
||||
except Exception:
|
||||
rag_txt = f"Relevant Info (RAG):\n{str(rag_block)}\n\n"
|
||||
|
||||
# --------------------------------------------------------
|
||||
# Final assembled prompt
|
||||
# --------------------------------------------------------
|
||||
prompt = (
|
||||
f"{notes_section}"
|
||||
f"{identity_txt}"
|
||||
f"{rag_txt}"
|
||||
f"User said:\n{user_prompt}\n\n"
|
||||
"Draft the best possible internal answer."
|
||||
f"User message:\n{user_prompt}\n\n"
|
||||
"Write the best possible *internal draft answer*.\n"
|
||||
"This draft is NOT shown to the user.\n"
|
||||
"Be factual, concise, and focused.\n"
|
||||
)
|
||||
|
||||
# --------------------------------------------------------
|
||||
# Call the LLM using the module-specific backend
|
||||
# --------------------------------------------------------
|
||||
draft = await call_llm(
|
||||
prompt,
|
||||
backend=CORTEX_LLM,
|
||||
temperature=GLOBAL_TEMP,
|
||||
)
|
||||
|
||||
draft = await call_llm(prompt)
|
||||
return draft
|
||||
|
||||
Reference in New Issue
Block a user