v0.5.2 - fixed: llm router async, relay-UI mismatch, intake summarization failure, among others.
Memory relevance thresh. increased.
This commit is contained in:
@@ -282,11 +282,17 @@ JSON only. No text outside JSON.
|
||||
try:
|
||||
llm_response = await call_llm(
|
||||
prompt,
|
||||
backend=INTAKE_LLM,
|
||||
temperature=0.2
|
||||
)
|
||||
|
||||
print(f"[Intake] LLM response length: {len(llm_response) if llm_response else 0}")
|
||||
print(f"[Intake] LLM response preview: {llm_response[:200] if llm_response else '(empty)'}")
|
||||
|
||||
# LLM should return JSON, parse it
|
||||
if not llm_response or not llm_response.strip():
|
||||
raise ValueError("Empty response from LLM")
|
||||
|
||||
summary = json.loads(llm_response)
|
||||
|
||||
return {
|
||||
|
||||
Reference in New Issue
Block a user