v0.5.2 - fixed: llm router async, relay-UI mismatch, intake summarization failure, among others.

Memory relevance thresh. increased.
This commit is contained in:
serversdwn
2025-12-12 02:58:23 -05:00
parent 6a20d3981f
commit fe86759cfd
4 changed files with 106 additions and 14 deletions

View File

@@ -282,11 +282,17 @@ JSON only. No text outside JSON.
try:
llm_response = await call_llm(
prompt,
backend=INTAKE_LLM,
temperature=0.2
)
print(f"[Intake] LLM response length: {len(llm_response) if llm_response else 0}")
print(f"[Intake] LLM response preview: {llm_response[:200] if llm_response else '(empty)'}")
# LLM should return JSON, parse it
if not llm_response or not llm_response.strip():
raise ValueError("Empty response from LLM")
summary = json.loads(llm_response)
return {