Cortex debugging logs cleaned up
This commit is contained in:
@@ -38,6 +38,8 @@ async function tryBackend(backend, messages) {
|
||||
|
||||
// 🧩 Normalize replies
|
||||
let reply = "";
|
||||
let parsedData = null;
|
||||
|
||||
try {
|
||||
if (isOllama) {
|
||||
// Ollama sometimes returns NDJSON lines; merge them
|
||||
@@ -49,21 +51,75 @@ async function tryBackend(backend, messages) {
|
||||
.join("");
|
||||
reply = merged.trim();
|
||||
} else {
|
||||
const data = JSON.parse(raw);
|
||||
console.log("🔍 RAW LLM RESPONSE:", JSON.stringify(data, null, 2));
|
||||
parsedData = JSON.parse(raw);
|
||||
reply =
|
||||
data?.choices?.[0]?.text?.trim() ||
|
||||
data?.choices?.[0]?.message?.content?.trim() ||
|
||||
data?.message?.content?.trim() ||
|
||||
parsedData?.choices?.[0]?.text?.trim() ||
|
||||
parsedData?.choices?.[0]?.message?.content?.trim() ||
|
||||
parsedData?.message?.content?.trim() ||
|
||||
"";
|
||||
|
||||
|
||||
}
|
||||
} catch (err) {
|
||||
reply = `[parse error: ${err.message}]`;
|
||||
}
|
||||
|
||||
return { reply, raw, backend: backend.key };
|
||||
return { reply, raw, parsedData, backend: backend.key };
|
||||
}
|
||||
|
||||
// ------------------------------------
|
||||
// Structured logging helper
|
||||
// ------------------------------------
|
||||
const LOG_DETAIL = process.env.LOG_DETAIL_LEVEL || "summary"; // minimal | summary | detailed | verbose
|
||||
|
||||
function logLLMCall(backend, messages, result, error = null) {
|
||||
const timestamp = new Date().toISOString().split('T')[1].slice(0, -1);
|
||||
|
||||
if (error) {
|
||||
// Always log errors
|
||||
console.warn(`⚠️ [LLM] ${backend.key.toUpperCase()} failed | ${timestamp} | ${error.message}`);
|
||||
return;
|
||||
}
|
||||
|
||||
// Success - log based on detail level
|
||||
if (LOG_DETAIL === "minimal") {
|
||||
return; // Don't log successful calls in minimal mode
|
||||
}
|
||||
|
||||
if (LOG_DETAIL === "summary") {
|
||||
console.log(`✅ [LLM] ${backend.key.toUpperCase()} | ${timestamp} | Reply: ${result.reply.substring(0, 80)}...`);
|
||||
return;
|
||||
}
|
||||
|
||||
// Detailed or verbose
|
||||
console.log(`\n${'─'.repeat(100)}`);
|
||||
console.log(`🧠 LLM CALL | Backend: ${backend.key.toUpperCase()} | ${timestamp}`);
|
||||
console.log(`${'─'.repeat(100)}`);
|
||||
|
||||
// Show prompt preview
|
||||
const lastMsg = messages[messages.length - 1];
|
||||
const promptPreview = (lastMsg?.content || '').substring(0, 150);
|
||||
console.log(`📝 Prompt: ${promptPreview}...`);
|
||||
|
||||
// Show parsed reply
|
||||
console.log(`💬 Reply: ${result.reply.substring(0, 200)}...`);
|
||||
|
||||
// Show raw response only in verbose mode
|
||||
if (LOG_DETAIL === "verbose" && result.parsedData) {
|
||||
console.log(`\n╭─ RAW RESPONSE ────────────────────────────────────────────────────────────────────────────`);
|
||||
const jsonStr = JSON.stringify(result.parsedData, null, 2);
|
||||
const lines = jsonStr.split('\n');
|
||||
const maxLines = 50;
|
||||
|
||||
lines.slice(0, maxLines).forEach(line => {
|
||||
console.log(`│ ${line}`);
|
||||
});
|
||||
|
||||
if (lines.length > maxLines) {
|
||||
console.log(`│ ... (${lines.length - maxLines} more lines - check raw field for full response)`);
|
||||
}
|
||||
console.log(`╰${'─'.repeat(95)}`);
|
||||
}
|
||||
|
||||
console.log(`${'─'.repeat(100)}\n`);
|
||||
}
|
||||
|
||||
// ------------------------------------
|
||||
@@ -77,17 +133,29 @@ export async function callSpeechLLM(messages) {
|
||||
{ key: "fallback", type: "llamacpp", url: process.env.LLM_FALLBACK_URL, model: process.env.LLM_FALLBACK_MODEL },
|
||||
];
|
||||
|
||||
const failedBackends = [];
|
||||
|
||||
for (const b of backends) {
|
||||
if (!b.url || !b.model) continue;
|
||||
|
||||
try {
|
||||
console.log(`🧠 Trying backend: ${b.key.toUpperCase()} (${b.url})`);
|
||||
const out = await tryBackend(b, messages);
|
||||
console.log(`✅ Success via ${b.key.toUpperCase()}`);
|
||||
logLLMCall(b, messages, out);
|
||||
return out;
|
||||
} catch (err) {
|
||||
console.warn(`⚠️ ${b.key.toUpperCase()} failed: ${err.message}`);
|
||||
logLLMCall(b, messages, null, err);
|
||||
failedBackends.push({ backend: b.key, error: err.message });
|
||||
}
|
||||
}
|
||||
|
||||
// All backends failed - log summary
|
||||
console.error(`\n${'='.repeat(100)}`);
|
||||
console.error(`🔴 ALL LLM BACKENDS FAILED`);
|
||||
console.error(`${'='.repeat(100)}`);
|
||||
failedBackends.forEach(({ backend, error }) => {
|
||||
console.error(` ${backend.toUpperCase()}: ${error}`);
|
||||
});
|
||||
console.error(`${'='.repeat(100)}\n`);
|
||||
|
||||
throw new Error("all_backends_failed");
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user