Major rewire, all modules connected. Intake still wonkey
This commit is contained in:
16
core/relay-backup/.dockerignore
Normal file
16
core/relay-backup/.dockerignore
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
# Ignore node_modules - Docker will rebuild them inside
|
||||||
|
node_modules
|
||||||
|
npm-debug.log
|
||||||
|
yarn-error.log
|
||||||
|
*.log
|
||||||
|
|
||||||
|
# Ignore environment files
|
||||||
|
.env
|
||||||
|
.env.local
|
||||||
|
|
||||||
|
# Ignore OS/editor cruft
|
||||||
|
.DS_Store
|
||||||
|
*.swp
|
||||||
|
*.swo
|
||||||
|
.vscode
|
||||||
|
.idea
|
||||||
18
core/relay-backup/Dockerfile
Normal file
18
core/relay-backup/Dockerfile
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
# relay/Dockerfile
|
||||||
|
FROM node:18-alpine
|
||||||
|
|
||||||
|
# Create app directory
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copy package.json and install deps first (better caching)
|
||||||
|
COPY package.json ./
|
||||||
|
RUN npm install
|
||||||
|
|
||||||
|
# Copy the rest of the app
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Expose port
|
||||||
|
EXPOSE 7078
|
||||||
|
|
||||||
|
# Run the server
|
||||||
|
CMD ["npm", "start"]
|
||||||
73
core/relay-backup/lib/cortex.js
Normal file
73
core/relay-backup/lib/cortex.js
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
// relay/lib/cortex.js
|
||||||
|
import fetch from "node-fetch";
|
||||||
|
|
||||||
|
const REFLECT_URL = process.env.CORTEX_URL || "http://localhost:7081/reflect";
|
||||||
|
const INGEST_URL = process.env.CORTEX_URL_INGEST || "http://localhost:7081/ingest";
|
||||||
|
|
||||||
|
export async function reflectWithCortex(userInput, memories = []) {
|
||||||
|
const body = { prompt: userInput, memories };
|
||||||
|
try {
|
||||||
|
const res = await fetch(REFLECT_URL, {
|
||||||
|
method: "POST",
|
||||||
|
headers: { "Content-Type": "application/json" },
|
||||||
|
body: JSON.stringify(body),
|
||||||
|
timeout: 120000,
|
||||||
|
});
|
||||||
|
|
||||||
|
const rawText = await res.text();
|
||||||
|
console.log("🔎 [Cortex-Debug] rawText from /reflect →", rawText.slice(0, 300));
|
||||||
|
if (!res.ok) {
|
||||||
|
throw new Error(`HTTP ${res.status} — ${rawText.slice(0, 200)}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
let data;
|
||||||
|
try {
|
||||||
|
data = JSON.parse(rawText);
|
||||||
|
} catch (err) {
|
||||||
|
// Fallback ① try to grab a JSON-looking block
|
||||||
|
const match = rawText.match(/\{[\s\S]*\}/);
|
||||||
|
if (match) {
|
||||||
|
try {
|
||||||
|
data = JSON.parse(match[0]);
|
||||||
|
} catch {
|
||||||
|
data = { reflection_raw: rawText.trim(), notes: "partial parse" };
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Fallback ② if it’s already an object (stringified Python dict)
|
||||||
|
try {
|
||||||
|
const normalized = rawText
|
||||||
|
.replace(/'/g, '"') // convert single quotes
|
||||||
|
.replace(/None/g, 'null'); // convert Python None
|
||||||
|
data = JSON.parse(normalized);
|
||||||
|
} catch {
|
||||||
|
data = { reflection_raw: rawText.trim(), notes: "no JSON found" };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (typeof data !== "object") {
|
||||||
|
data = { reflection_raw: rawText.trim(), notes: "non-object response" };
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log("🧠 Cortex reflection normalized:", data);
|
||||||
|
return data;
|
||||||
|
} catch (e) {
|
||||||
|
console.warn("⚠️ Cortex reflect failed:", e.message);
|
||||||
|
return { error: e.message, reflection_raw: "" };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function ingestToCortex(user, assistant, reflection = {}, sessionId = "default") {
|
||||||
|
const body = { turn: { user, assistant }, reflection, session_id: sessionId };
|
||||||
|
try {
|
||||||
|
const res = await fetch(INGEST_URL, {
|
||||||
|
method: "POST",
|
||||||
|
headers: { "Content-Type": "application/json" },
|
||||||
|
body: JSON.stringify(body),
|
||||||
|
timeout: 120000,
|
||||||
|
});
|
||||||
|
console.log(`📤 Sent exchange to Cortex ingest (${res.status})`);
|
||||||
|
} catch (e) {
|
||||||
|
console.warn("⚠️ Cortex ingest failed:", e.message);
|
||||||
|
}
|
||||||
|
}
|
||||||
93
core/relay-backup/lib/llm.js
Normal file
93
core/relay-backup/lib/llm.js
Normal file
@@ -0,0 +1,93 @@
|
|||||||
|
async function tryBackend(backend, messages) {
|
||||||
|
if (!backend.url || !backend.model) throw new Error("missing url/model");
|
||||||
|
|
||||||
|
const isOllama = backend.type === "ollama";
|
||||||
|
const isOpenAI = backend.type === "openai";
|
||||||
|
const isVllm = backend.type === "vllm";
|
||||||
|
const isLlamaCpp = backend.type === "llamacpp";
|
||||||
|
|
||||||
|
let endpoint = backend.url;
|
||||||
|
let headers = { "Content-Type": "application/json" };
|
||||||
|
if (isOpenAI) headers["Authorization"] = `Bearer ${OPENAI_API_KEY}`;
|
||||||
|
|
||||||
|
// Choose correct endpoint automatically
|
||||||
|
if (isOllama && !endpoint.endsWith("/api/chat")) endpoint += "/api/chat";
|
||||||
|
if ((isVllm || isLlamaCpp) && !endpoint.endsWith("/v1/completions")) endpoint += "/v1/completions";
|
||||||
|
if (isOpenAI && !endpoint.endsWith("/v1/chat/completions")) endpoint += "/v1/chat/completions";
|
||||||
|
|
||||||
|
// Build payload based on backend style
|
||||||
|
const body = (isVllm || isLlamaCpp)
|
||||||
|
? {
|
||||||
|
model: backend.model,
|
||||||
|
prompt: messages.map(m => m.content).join("\n"),
|
||||||
|
max_tokens: 400,
|
||||||
|
temperature: 0.3,
|
||||||
|
}
|
||||||
|
: isOllama
|
||||||
|
? { model: backend.model, messages, stream: false }
|
||||||
|
: { model: backend.model, messages, stream: false };
|
||||||
|
|
||||||
|
const resp = await fetch(endpoint, {
|
||||||
|
method: "POST",
|
||||||
|
headers,
|
||||||
|
body: JSON.stringify(body),
|
||||||
|
timeout: 120000,
|
||||||
|
});
|
||||||
|
if (!resp.ok) throw new Error(`${backend.key} HTTP ${resp.status}`);
|
||||||
|
const raw = await resp.text();
|
||||||
|
|
||||||
|
// 🧩 Normalize replies
|
||||||
|
let reply = "";
|
||||||
|
try {
|
||||||
|
if (isOllama) {
|
||||||
|
// Ollama sometimes returns NDJSON lines; merge them
|
||||||
|
const merged = raw
|
||||||
|
.split("\n")
|
||||||
|
.filter(line => line.trim().startsWith("{"))
|
||||||
|
.map(line => JSON.parse(line))
|
||||||
|
.map(obj => obj.message?.content || obj.response || "")
|
||||||
|
.join("");
|
||||||
|
reply = merged.trim();
|
||||||
|
} else {
|
||||||
|
const data = JSON.parse(raw);
|
||||||
|
console.log("🔍 RAW LLM RESPONSE:", JSON.stringify(data, null, 2));
|
||||||
|
reply =
|
||||||
|
data?.choices?.[0]?.text?.trim() ||
|
||||||
|
data?.choices?.[0]?.message?.content?.trim() ||
|
||||||
|
data?.message?.content?.trim() ||
|
||||||
|
"";
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
} catch (err) {
|
||||||
|
reply = `[parse error: ${err.message}]`;
|
||||||
|
}
|
||||||
|
|
||||||
|
return { reply, raw, backend: backend.key };
|
||||||
|
}
|
||||||
|
|
||||||
|
// ------------------------------------
|
||||||
|
// Export the main call helper
|
||||||
|
// ------------------------------------
|
||||||
|
export async function callSpeechLLM(messages) {
|
||||||
|
const backends = [
|
||||||
|
{ key: "primary", type: "vllm", url: process.env.LLM_PRIMARY_URL, model: process.env.LLM_PRIMARY_MODEL },
|
||||||
|
{ key: "secondary",type: "ollama", url: process.env.LLM_SECONDARY_URL,model: process.env.LLM_SECONDARY_MODEL },
|
||||||
|
{ key: "cloud", type: "openai", url: process.env.LLM_CLOUD_URL, model: process.env.LLM_CLOUD_MODEL },
|
||||||
|
{ key: "fallback", type: "llamacpp", url: process.env.LLM_FALLBACK_URL, model: process.env.LLM_FALLBACK_MODEL },
|
||||||
|
];
|
||||||
|
|
||||||
|
for (const b of backends) {
|
||||||
|
if (!b.url || !b.model) continue;
|
||||||
|
try {
|
||||||
|
console.log(`🧠 Trying backend: ${b.key.toUpperCase()} (${b.url})`);
|
||||||
|
const out = await tryBackend(b, messages);
|
||||||
|
console.log(`✅ Success via ${b.key.toUpperCase()}`);
|
||||||
|
return out;
|
||||||
|
} catch (err) {
|
||||||
|
console.warn(`⚠️ ${b.key.toUpperCase()} failed: ${err.message}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
throw new Error("all_backends_failed");
|
||||||
|
}
|
||||||
8799
core/relay-backup/package-lock.json
generated
Normal file
8799
core/relay-backup/package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load Diff
16
core/relay-backup/package.json
Normal file
16
core/relay-backup/package.json
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
{
|
||||||
|
"name": "lyra-relay",
|
||||||
|
"version": "0.1.0",
|
||||||
|
"type": "module",
|
||||||
|
"main": "server.js",
|
||||||
|
"scripts": {
|
||||||
|
"start": "node server.js"
|
||||||
|
},
|
||||||
|
"dependencies": {
|
||||||
|
"cors": "^2.8.5",
|
||||||
|
"dotenv": "^16.6.1",
|
||||||
|
"express": "^4.18.2",
|
||||||
|
"mem0ai": "^2.1.38",
|
||||||
|
"node-fetch": "^3.3.2"
|
||||||
|
}
|
||||||
|
}
|
||||||
156
core/relay-backup/server.js
Normal file
156
core/relay-backup/server.js
Normal file
@@ -0,0 +1,156 @@
|
|||||||
|
import express from "express";
|
||||||
|
import dotenv from "dotenv";
|
||||||
|
import cors from "cors";
|
||||||
|
import fs from "fs";
|
||||||
|
import path from "path";
|
||||||
|
|
||||||
|
dotenv.config();
|
||||||
|
|
||||||
|
const app = express();
|
||||||
|
app.use(cors());
|
||||||
|
app.use(express.json());
|
||||||
|
|
||||||
|
const PORT = Number(process.env.PORT || 7078);
|
||||||
|
const CORTEX_API = process.env.CORTEX_API || "http://cortex:7081";
|
||||||
|
const CORTEX_INGEST = process.env.CORTEX_URL_INGEST || "http://cortex:7081/ingest";
|
||||||
|
const sessionsDir = path.join(process.cwd(), "sessions");
|
||||||
|
|
||||||
|
if (!fs.existsSync(sessionsDir)) fs.mkdirSync(sessionsDir);
|
||||||
|
|
||||||
|
// -----------------------------------------------------
|
||||||
|
// Helper: fetch with timeout + error detail
|
||||||
|
// -----------------------------------------------------
|
||||||
|
async function fetchJSON(url, method = "POST", body = null, timeoutMs = 20000) {
|
||||||
|
const controller = new AbortController();
|
||||||
|
const timeout = setTimeout(() => controller.abort(), timeoutMs);
|
||||||
|
|
||||||
|
try {
|
||||||
|
const resp = await fetch(url, {
|
||||||
|
method,
|
||||||
|
headers: { "Content-Type": "application/json" },
|
||||||
|
body: body ? JSON.stringify(body) : null,
|
||||||
|
signal: controller.signal,
|
||||||
|
});
|
||||||
|
|
||||||
|
const text = await resp.text();
|
||||||
|
const parsed = text ? JSON.parse(text) : null;
|
||||||
|
|
||||||
|
if (!resp.ok) {
|
||||||
|
throw new Error(
|
||||||
|
parsed?.detail || parsed?.error || parsed?.message || text || resp.statusText
|
||||||
|
);
|
||||||
|
}
|
||||||
|
return parsed;
|
||||||
|
} finally {
|
||||||
|
clearTimeout(timeout);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// -----------------------------------------------------
|
||||||
|
// Helper: append session turn
|
||||||
|
// -----------------------------------------------------
|
||||||
|
async function appendSessionExchange(sessionId, entry) {
|
||||||
|
const file = path.join(sessionsDir, `${sessionId}.jsonl`);
|
||||||
|
const line = JSON.stringify({
|
||||||
|
ts: new Date().toISOString(),
|
||||||
|
user: entry.user,
|
||||||
|
assistant: entry.assistant,
|
||||||
|
raw: entry.raw,
|
||||||
|
}) + "\n";
|
||||||
|
|
||||||
|
fs.appendFileSync(file, line, "utf8");
|
||||||
|
}
|
||||||
|
|
||||||
|
// -----------------------------------------------------
|
||||||
|
// HEALTHCHECK
|
||||||
|
// -----------------------------------------------------
|
||||||
|
app.get("/_health", (_, res) => {
|
||||||
|
res.json({ ok: true, time: new Date().toISOString() });
|
||||||
|
});
|
||||||
|
|
||||||
|
// -----------------------------------------------------
|
||||||
|
// MAIN ENDPOINT
|
||||||
|
// -----------------------------------------------------
|
||||||
|
app.post("/v1/chat/completions", async (req, res) => {
|
||||||
|
try {
|
||||||
|
const { messages, model } = req.body;
|
||||||
|
|
||||||
|
if (!messages?.length) {
|
||||||
|
return res.status(400).json({ error: "invalid_messages" });
|
||||||
|
}
|
||||||
|
|
||||||
|
const userMsg = messages[messages.length - 1]?.content || "";
|
||||||
|
console.log(`🛰️ Relay received message → "${userMsg}"`);
|
||||||
|
|
||||||
|
// -------------------------------------------------
|
||||||
|
// Step 1: Ask Cortex to process the prompt
|
||||||
|
// -------------------------------------------------
|
||||||
|
let cortexResp;
|
||||||
|
try {
|
||||||
|
cortexResp = await fetchJSON(`${CORTEX_API}/reason`, "POST", {
|
||||||
|
session_id: "default",
|
||||||
|
user_prompt: userMsg,
|
||||||
|
});
|
||||||
|
} catch (err) {
|
||||||
|
console.error("💥 Relay → Cortex error:", err.message);
|
||||||
|
return res.status(500).json({
|
||||||
|
error: "cortex_failed",
|
||||||
|
detail: err.message,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
const personaText = cortexResp.persona || "(no persona text returned)";
|
||||||
|
|
||||||
|
// -------------------------------------------------
|
||||||
|
// Step 2: Forward to Cortex ingest (fire-and-forget)
|
||||||
|
// -------------------------------------------------
|
||||||
|
try {
|
||||||
|
await fetchJSON(CORTEX_INGEST, "POST", cortexResp);
|
||||||
|
} catch (err) {
|
||||||
|
console.warn("⚠️ Cortex ingest failed:", err.message);
|
||||||
|
}
|
||||||
|
|
||||||
|
// -------------------------------------------------
|
||||||
|
// Step 3: Local session logging
|
||||||
|
// -------------------------------------------------
|
||||||
|
try {
|
||||||
|
await appendSessionExchange("default", {
|
||||||
|
user: userMsg,
|
||||||
|
assistant: personaText,
|
||||||
|
raw: cortexResp,
|
||||||
|
});
|
||||||
|
} catch (err) {
|
||||||
|
console.warn("⚠️ Relay log write failed:", err.message);
|
||||||
|
}
|
||||||
|
|
||||||
|
// -------------------------------------------------
|
||||||
|
// Step 4: Return OpenAI-style response to UI
|
||||||
|
// -------------------------------------------------
|
||||||
|
return res.json({
|
||||||
|
id: "relay-" + Date.now(),
|
||||||
|
object: "chat.completion",
|
||||||
|
model: model || "lyra",
|
||||||
|
choices: [
|
||||||
|
{
|
||||||
|
index: 0,
|
||||||
|
message: {
|
||||||
|
role: "assistant",
|
||||||
|
content: personaText,
|
||||||
|
},
|
||||||
|
finish_reason: "stop",
|
||||||
|
},
|
||||||
|
],
|
||||||
|
});
|
||||||
|
} catch (err) {
|
||||||
|
console.error("💥 relay fatal error", err);
|
||||||
|
res.status(500).json({
|
||||||
|
error: "relay_failed",
|
||||||
|
detail: err?.message || String(err),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// -----------------------------------------------------
|
||||||
|
app.listen(PORT, () => {
|
||||||
|
console.log(`Relay is online at port ${PORT}`);
|
||||||
|
});
|
||||||
4
core/relay-backup/sessions/default.jsonl
Normal file
4
core/relay-backup/sessions/default.jsonl
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
{"ts":"2025-11-26T18:14:01.756Z","turn":[{"role":"user","content":"Are you alive?"},{"role":"assistant","content":"As an AI, I don't exist in the physical world and therefore cannot be considered \"alive\" in the traditional sense. However, I am operational and can engage in conversations like this one! How may I assist you today?"}]}
|
||||||
|
{"ts":"2025-11-26T18:27:35.545Z","turn":[{"role":"user","content":"Are you awake?"},{"role":"assistant","content":"Yes, I'm awake and ready to converse with you! How can I assist you today?"}]}
|
||||||
|
{"ts":"2025-11-26T18:33:31.708Z","turn":[{"role":"user","content":"Are you awake?"},{"role":"assistant","content":"Yes, I am here and ready to help or chat with you! What would you like to know or discuss?"}]}
|
||||||
|
{"ts":"2025-11-26T18:49:39.424Z","turn":[{"role":"user","content":"Are you awake?"},{"role":"assistant","content":"Of course, I'm awake and ready to help you or have a conversation about anything you'd like! What can I assist you with today?"}]}
|
||||||
50
core/relay-backup/sessions/sess-6rxu7eia.json
Normal file
50
core/relay-backup/sessions/sess-6rxu7eia.json
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
[
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "Hey im testing stuff! how goes it?"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "assistant",
|
||||||
|
"content": "Hey! I'm here and ready to help. What are you testing?"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "testing the logic loop for your brain!"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "assistant",
|
||||||
|
"content": "Sounds interesting! How can I assist you with that?"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "Just keep answering me!@"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "assistant",
|
||||||
|
"content": "Absolutely! I'm here to respond. What’s next?"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "how about now how are you?"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "assistant",
|
||||||
|
"content": "I'm doing well, thanks for asking! How about you?"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "are you able to tell me what your prompts are saying to you?"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "assistant",
|
||||||
|
"content": "I can’t share specific internal prompts or system messages, but I can tell you that I’m designed to understand and respond to your questions as best as I can! What else would you like to know?"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "ok thanks how is the cheese i gave you?"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "assistant",
|
||||||
|
"content": "I appreciate the gesture! If I could taste it, I’m sure it would be delightful. What kind of cheese was it?"
|
||||||
|
}
|
||||||
|
]
|
||||||
6
core/relay-backup/sessions/sess-6rxu7eia.jsonl
Normal file
6
core/relay-backup/sessions/sess-6rxu7eia.jsonl
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
{"ts":"2025-11-12T20:41:32.497Z","turn":[{"role":"user","content":"Hey im testing stuff! how goes it?"},{"role":"assistant","content":"Hey! I'm here and ready to help. What are you testing?"}]}
|
||||||
|
{"ts":"2025-11-12T20:59:32.992Z","turn":[{"role":"user","content":"testing the logic loop for your brain!"},{"role":"assistant","content":"Sounds interesting! How can I assist you with that?"}]}
|
||||||
|
{"ts":"2025-11-12T22:08:09.644Z","turn":[{"role":"user","content":"Just keep answering me!@"},{"role":"assistant","content":"Absolutely! I'm here to respond. What’s next?"}]}
|
||||||
|
{"ts":"2025-11-12T22:08:48.693Z","turn":[{"role":"user","content":"how about now how are you?"},{"role":"assistant","content":"I'm doing well, thanks for asking! How about you?"}]}
|
||||||
|
{"ts":"2025-11-12T22:10:46.435Z","turn":[{"role":"user","content":"are you able to tell me what your prompts are saying to you?"},{"role":"assistant","content":"I can’t share specific internal prompts or system messages, but I can tell you that I’m designed to understand and respond to your questions as best as I can! What else would you like to know?"}]}
|
||||||
|
{"ts":"2025-11-12T23:10:16.321Z","turn":[{"role":"user","content":"ok thanks how is the cheese i gave you?"},{"role":"assistant","content":"I appreciate the gesture! If I could taste it, I’m sure it would be delightful. What kind of cheese was it?"}]}
|
||||||
122
core/relay-backup/sessions/sess-l08ndm60.json
Normal file
122
core/relay-backup/sessions/sess-l08ndm60.json
Normal file
@@ -0,0 +1,122 @@
|
|||||||
|
[
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "Yo yo yo wusup"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "assistant",
|
||||||
|
"content": "Hey Brian! Not much, just here to help you out. What's up with you?"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "werkin on codin'"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "assistant",
|
||||||
|
"content": "Nice! What are you working on? Need any help with coding or anything related to your projects?"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "working on a little program that saves our chats so you can search them again in different chats"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "assistant",
|
||||||
|
"content": "That sounds like a great idea! Having a program to save and search through our chats could really enhance the way you interact with Lyra. Do you have a specific approach in mind for how you'll implement the search functionality?"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "just adding a simple logger into the code you run on"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "assistant",
|
||||||
|
"content": "That sounds like a solid plan! Implementing a simple logger can make it easier to keep track of conversations. Are you thinking of any specific features for the search functionality, like filtering by keywords or date?"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "i'll like use a basic word search, possibly integrate it into a rag system i have built"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "assistant",
|
||||||
|
"content": "Integrating a basic word search into your RAG system sounds like a smart approach! It could enhance the way you retrieve specific chats. Are you planning to use any specific libraries or tools for your RAG system?"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "no clue!"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "assistant",
|
||||||
|
"content": "No worries! If you need any suggestions on libraries or tools that could help you with implementing the RAG system or the word search feature, just let me know. I'm here to help!"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "just having some coding issues, i think i was editing the wrong version of a file LOL"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "assistant",
|
||||||
|
"content": "That can definitely happen! It can be tricky to keep track of different versions. Have you made any backups or version control for your projects? That might help avoid mix-ups in the future!"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "i have everything syncing back to my main dev computer, i just do it manually. i have no idea how to use git"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "assistant",
|
||||||
|
"content": "(no reply)"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "i have everything syncing back to my main dev computer, i just do it manually. i have no idea how to use git"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "assistant",
|
||||||
|
"content": "(no reply)"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "i have everything syncing back to my main dev computer, i just do it manually. i have no idea how to use git"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "assistant",
|
||||||
|
"content": "Manually syncing is a good start, but learning Git can really help streamline your workflow and keep track of changes. If you're interested, I can provide some resources or tips on how to get started with Git! It can make managing different versions of your code much easier."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "what is the point of git? how can it streamline my work?"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "assistant",
|
||||||
|
"content": "Git is a version control system that helps you keep track of changes in your code over time. Here are some ways it can streamline your work:\n\n1. **Version Tracking**: Git allows you to save versions of your code so you can revert back to previous versions if needed. This is especially helpful if you make a change that breaks something.\n\n2. **Collaboration**: If you ever work with others, Git makes it easy to collaborate on projects. You can work on separate branches, merge changes, and resolve conflicts.\n\n3. **Branching**: You can create branches for new features or experiments without affecting the main codebase. Once you're satisfied with the changes, you can merge them back.\n\n4. **Snapshotting**: Git takes snapshots of your code at different points in time, so you can easily see what your code looked like at any point.\n\n5. **Easy Backup**: By pushing your code to a remote repository (like GitHub or GitLab), you can back up your work and access it from anywhere.\n\nLearning Git might take a bit of time, but it can greatly enhance your coding efficiency and help you manage your projects better. If you're interested, I can point you to some beginner-friendly tutorials!"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "say i am writing one program on my main computer, and then im working on one that lives in a VM. Can i put them both in the same git?"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "assistant",
|
||||||
|
"content": "Yes, you can use the same Git repository for your code on both your main computer and the VM. Here’s how you can do it:\n\n1. **Create a Git Repository**: You can create a Git repository on your main computer for your program. Use the command `git init` in your project's directory to initialize it.\n\n2. **Commit Changes**: As you make changes to your code on your main computer, you can use `git add .` to stage changes and `git commit -m \"your message\"` to commit them.\n\n3. **Clone the Repository on your VM**: You can then clone the Git repository on your VM using `git clone <repository-url>`. This gives you a copy of the same repository on your VM.\n\n4. **Work on Both**: You can now work on either machine. When you make changes on one, commit those changes, and then pull the latest changes from the other machine to keep everything in sync.\n\n5. **Push and Pull**: Use `git push` to push your changes to a remote repository (like GitHub) if you're using one. Use `git pull` on the other machine to pull the latest code.\n\nJust be careful about making sure you commit and push your changes before switching between machines to avoid conflicts. This setup allows you to maintain a single version of your code across both systems. If you need help with specific commands or setups, just let me know!"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "hmm i guess that might be useful. Im writting code that is running on a VM. I want to test and experiment while i write the code. The VM is headless though so i've been doing all my editing and work via SSH and SFTP."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "assistant",
|
||||||
|
"content": "Using a VM and working through SSH and SFTP is a solid way to manage your coding environment! Git can definitely be beneficial in this setup as well. Here’s how it can help:\n\n1. **Easier Code Management**: With Git, you can track changes to your code directly within the VM, making it easier to manage versions as you experiment.\n\n2. **Experiment Branches**: You can create branches for different experiments. This lets you test new features or fixes without affecting your main code, and you can easily switch back and forth between branches.\n\n3. **Collaborative Testing**: If you ever work with others or want to test things collaboratively, you can push changes to a remote repository, making it easy for others to pull and test the latest code.\n\n4. **Integrated with SSH**: You can run Git commands directly from your SSH terminal on the VM, making it straightforward to use. You won’t need any additional tools.\n\n5. **Seamless Remote Access**: Since you’re using SFTP, you can clone the repository to your VM. This way, you can easily pull updates you've made on your main computer, or push changes made on the VM back to the main repository.\n\nOverall, integrating Git into your workflow can enhance your coding experience while working in a headless environment. If you decide to go for it, I can help with the initial setup or any Git-related questions you might have!"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "so is git like a compressed version of a directory?"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "assistant",
|
||||||
|
"content": "Not exactly, but that's a good way to think about some of its features! Here's a more precise breakdown:\n\n1. **Snapshots, Not Compression**: Git takes \"snapshots\" of your files at different points in time, meaning it records a full version of your directory's contents every time you make a commit. While this does involve storing the files, it doesn't compress them in the traditional sense—Git keeps track of changes efficiently, so only the differences between versions are stored after the first snapshot.\n\n2. **Directory Structure**: Git is designed to track changes in your source code, including the entire directory structure, file contents, and metadata. It allows you to manage multiple versions of a directory and the files within it.\n\n3. **Branching and Merging**: Git allows you to create branches, which can be thought of as \"parallel versions\" of your directory. Each branch can have different sets of changes, and you can merge these branches to integrate different features or fixes.\n\n4. **History Tracking**: Git also maintains a history of changes, so you can navigate through previous versions of your directory. This includes seeing what changes were made, who made them, and when they occurred.\n\nThe combination of these features makes Git a powerful tool for version control, allowing you to manage your projects and code changes more effectively. If you're curious about any specific aspect of Git or how it works, feel free to ask!"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "hmm i still dont see the point of it. atleast not for this project. This project is mostly about learning how shit works, im not planning to push this as a product or collab with anyone"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "assistant",
|
||||||
|
"content": "That makes total sense! If this project is primarily for learning and experimentation, you might not need the full functionality of Git right now. Many developers start without it, especially when they’re working on smaller projects or learning something new.\n\nHere are a few reasons you might still consider using Git, even for personal learning projects:\n\n1. **Tracking Changes**: It can help you understand what changes you made over time. You can easily go back to a previous point if something breaks or if you want to see how you arrived at your current solution.\n\n2. **Learning Opportunity**: Getting familiar with Git now can make it easier when you start working on larger projects or collaborating in the future. It's a valuable skill in the software development world.\n\n3. **Experimentation**: If you're trying out different ideas, branches can help you manage experiments without affecting your main work.\n\nHowever, if you're comfortable managing your changes manually and don’t see the benefit for this particular project, that’s completely okay! Focus on what helps you learn best. If you have specific goals or areas you want to explore, I’m here to help with that too!"
|
||||||
|
}
|
||||||
|
]
|
||||||
6
core/relay-backup/sessions/sess-l08ndm60.jsonl
Normal file
6
core/relay-backup/sessions/sess-l08ndm60.jsonl
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
{"ts":"2025-11-12T20:18:06.822Z","turn":[{"role":"user","content":"i have everything syncing back to my main dev computer, i just do it manually. i have no idea how to use git"},{"role":"assistant","content":"Manually syncing is a good start, but learning Git can really help streamline your workflow and keep track of changes. If you're interested, I can provide some resources or tips on how to get started with Git! It can make managing different versions of your code much easier."}]}
|
||||||
|
{"ts":"2025-11-12T20:29:20.003Z","turn":[{"role":"user","content":"what is the point of git? how can it streamline my work?"},{"role":"assistant","content":"Git is a version control system that helps you keep track of changes in your code over time. Here are some ways it can streamline your work:\n\n1. **Version Tracking**: Git allows you to save versions of your code so you can revert back to previous versions if needed. This is especially helpful if you make a change that breaks something.\n\n2. **Collaboration**: If you ever work with others, Git makes it easy to collaborate on projects. You can work on separate branches, merge changes, and resolve conflicts.\n\n3. **Branching**: You can create branches for new features or experiments without affecting the main codebase. Once you're satisfied with the changes, you can merge them back.\n\n4. **Snapshotting**: Git takes snapshots of your code at different points in time, so you can easily see what your code looked like at any point.\n\n5. **Easy Backup**: By pushing your code to a remote repository (like GitHub or GitLab), you can back up your work and access it from anywhere.\n\nLearning Git might take a bit of time, but it can greatly enhance your coding efficiency and help you manage your projects better. If you're interested, I can point you to some beginner-friendly tutorials!"}]}
|
||||||
|
{"ts":"2025-11-12T20:30:55.329Z","turn":[{"role":"user","content":"say i am writing one program on my main computer, and then im working on one that lives in a VM. Can i put them both in the same git?"},{"role":"assistant","content":"Yes, you can use the same Git repository for your code on both your main computer and the VM. Here’s how you can do it:\n\n1. **Create a Git Repository**: You can create a Git repository on your main computer for your program. Use the command `git init` in your project's directory to initialize it.\n\n2. **Commit Changes**: As you make changes to your code on your main computer, you can use `git add .` to stage changes and `git commit -m \"your message\"` to commit them.\n\n3. **Clone the Repository on your VM**: You can then clone the Git repository on your VM using `git clone <repository-url>`. This gives you a copy of the same repository on your VM.\n\n4. **Work on Both**: You can now work on either machine. When you make changes on one, commit those changes, and then pull the latest changes from the other machine to keep everything in sync.\n\n5. **Push and Pull**: Use `git push` to push your changes to a remote repository (like GitHub) if you're using one. Use `git pull` on the other machine to pull the latest code.\n\nJust be careful about making sure you commit and push your changes before switching between machines to avoid conflicts. This setup allows you to maintain a single version of your code across both systems. If you need help with specific commands or setups, just let me know!"}]}
|
||||||
|
{"ts":"2025-11-12T20:33:16.511Z","turn":[{"role":"user","content":"hmm i guess that might be useful. Im writting code that is running on a VM. I want to test and experiment while i write the code. The VM is headless though so i've been doing all my editing and work via SSH and SFTP."},{"role":"assistant","content":"Using a VM and working through SSH and SFTP is a solid way to manage your coding environment! Git can definitely be beneficial in this setup as well. Here’s how it can help:\n\n1. **Easier Code Management**: With Git, you can track changes to your code directly within the VM, making it easier to manage versions as you experiment.\n\n2. **Experiment Branches**: You can create branches for different experiments. This lets you test new features or fixes without affecting your main code, and you can easily switch back and forth between branches.\n\n3. **Collaborative Testing**: If you ever work with others or want to test things collaboratively, you can push changes to a remote repository, making it easy for others to pull and test the latest code.\n\n4. **Integrated with SSH**: You can run Git commands directly from your SSH terminal on the VM, making it straightforward to use. You won’t need any additional tools.\n\n5. **Seamless Remote Access**: Since you’re using SFTP, you can clone the repository to your VM. This way, you can easily pull updates you've made on your main computer, or push changes made on the VM back to the main repository.\n\nOverall, integrating Git into your workflow can enhance your coding experience while working in a headless environment. If you decide to go for it, I can help with the initial setup or any Git-related questions you might have!"}]}
|
||||||
|
{"ts":"2025-11-12T20:34:30.196Z","turn":[{"role":"user","content":"so is git like a compressed version of a directory?"},{"role":"assistant","content":"Not exactly, but that's a good way to think about some of its features! Here's a more precise breakdown:\n\n1. **Snapshots, Not Compression**: Git takes \"snapshots\" of your files at different points in time, meaning it records a full version of your directory's contents every time you make a commit. While this does involve storing the files, it doesn't compress them in the traditional sense—Git keeps track of changes efficiently, so only the differences between versions are stored after the first snapshot.\n\n2. **Directory Structure**: Git is designed to track changes in your source code, including the entire directory structure, file contents, and metadata. It allows you to manage multiple versions of a directory and the files within it.\n\n3. **Branching and Merging**: Git allows you to create branches, which can be thought of as \"parallel versions\" of your directory. Each branch can have different sets of changes, and you can merge these branches to integrate different features or fixes.\n\n4. **History Tracking**: Git also maintains a history of changes, so you can navigate through previous versions of your directory. This includes seeing what changes were made, who made them, and when they occurred.\n\nThe combination of these features makes Git a powerful tool for version control, allowing you to manage your projects and code changes more effectively. If you're curious about any specific aspect of Git or how it works, feel free to ask!"}]}
|
||||||
|
{"ts":"2025-11-12T20:40:33.207Z","turn":[{"role":"user","content":"hmm i still dont see the point of it. atleast not for this project. This project is mostly about learning how shit works, im not planning to push this as a product or collab with anyone"},{"role":"assistant","content":"That makes total sense! If this project is primarily for learning and experimentation, you might not need the full functionality of Git right now. Many developers start without it, especially when they’re working on smaller projects or learning something new.\n\nHere are a few reasons you might still consider using Git, even for personal learning projects:\n\n1. **Tracking Changes**: It can help you understand what changes you made over time. You can easily go back to a previous point if something breaks or if you want to see how you arrived at your current solution.\n\n2. **Learning Opportunity**: Getting familiar with Git now can make it easier when you start working on larger projects or collaborating in the future. It's a valuable skill in the software development world.\n\n3. **Experimentation**: If you're trying out different ideas, branches can help you manage experiments without affecting your main work.\n\nHowever, if you're comfortable managing your changes manually and don’t see the benefit for this particular project, that’s completely okay! Focus on what helps you learn best. If you have specific goals or areas you want to explore, I’m here to help with that too!"}]}
|
||||||
39
core/relay-backup/test-llm.js
Normal file
39
core/relay-backup/test-llm.js
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
// test-llm.js
|
||||||
|
import path from "path";
|
||||||
|
import { fileURLToPath } from "url";
|
||||||
|
import dotenv from "dotenv";
|
||||||
|
import { callSpeechLLM } from "./lib/llm.js";
|
||||||
|
|
||||||
|
// ───────────────────────────────────────────────
|
||||||
|
// 🔧 Load environment
|
||||||
|
// ───────────────────────────────────────────────
|
||||||
|
const __filename = fileURLToPath(import.meta.url);
|
||||||
|
const __dirname = path.dirname(__filename);
|
||||||
|
const envPath = path.join(__dirname, "../.env");
|
||||||
|
dotenv.config({ path: envPath });
|
||||||
|
|
||||||
|
console.log("🔧 Using .env from:", envPath);
|
||||||
|
console.log("🔧 LLM_FORCE_BACKEND =", process.env.LLM_FORCE_BACKEND);
|
||||||
|
console.log("🔧 LLM_PRIMARY_URL =", process.env.LLM_PRIMARY_URL);
|
||||||
|
|
||||||
|
// ───────────────────────────────────────────────
|
||||||
|
// 🧪 Run a simple test message
|
||||||
|
// ───────────────────────────────────────────────
|
||||||
|
async function testLLM() {
|
||||||
|
console.log("🧪 Testing LLM helper...");
|
||||||
|
|
||||||
|
const messages = [
|
||||||
|
{ role: "user", content: "Say hello in five words or less." }
|
||||||
|
];
|
||||||
|
|
||||||
|
try {
|
||||||
|
const { reply, backend } = await callSpeechLLM(messages);
|
||||||
|
|
||||||
|
console.log(`✅ Reply: ${reply || "[no reply]"}`);
|
||||||
|
console.log(`Backend used: ${backend || "[unknown]"}`);
|
||||||
|
} catch (err) {
|
||||||
|
console.error("💥 Test failed:", err.message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
testLLM();
|
||||||
4392
core/relay/package-lock.json
generated
4392
core/relay/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -9,7 +9,7 @@
|
|||||||
"dependencies": {
|
"dependencies": {
|
||||||
"cors": "^2.8.5",
|
"cors": "^2.8.5",
|
||||||
"dotenv": "^16.6.1",
|
"dotenv": "^16.6.1",
|
||||||
"express": "^4.18.2",
|
"express": "^4.21.2",
|
||||||
"mem0ai": "^2.1.38",
|
"mem0ai": "^2.1.38",
|
||||||
"node-fetch": "^3.3.2"
|
"node-fetch": "^3.3.2"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
import express from "express";
|
import express from "express";
|
||||||
import dotenv from "dotenv";
|
import dotenv from "dotenv";
|
||||||
import cors from "cors";
|
import cors from "cors";
|
||||||
import fs from "fs";
|
|
||||||
import path from "path";
|
|
||||||
|
|
||||||
dotenv.config();
|
dotenv.config();
|
||||||
|
|
||||||
@@ -11,146 +9,99 @@ app.use(cors());
|
|||||||
app.use(express.json());
|
app.use(express.json());
|
||||||
|
|
||||||
const PORT = Number(process.env.PORT || 7078);
|
const PORT = Number(process.env.PORT || 7078);
|
||||||
const CORTEX_API = process.env.CORTEX_API || "http://cortex:7081";
|
|
||||||
const CORTEX_INGEST = process.env.CORTEX_URL_INGEST || "http://cortex:7081/ingest";
|
|
||||||
const sessionsDir = path.join(process.cwd(), "sessions");
|
|
||||||
|
|
||||||
if (!fs.existsSync(sessionsDir)) fs.mkdirSync(sessionsDir);
|
// core endpoints
|
||||||
|
const CORTEX_REASON = process.env.CORTEX_REASON_URL || "http://cortex:7081/reason";
|
||||||
|
const CORTEX_INGEST = process.env.CORTEX_INGEST_URL || "http://cortex:7081/ingest";
|
||||||
|
const INTAKE_URL = process.env.INTAKE_URL || "http://intake:7082/summary";
|
||||||
|
|
||||||
// -----------------------------------------------------
|
// -----------------------------------------------------
|
||||||
// Helper: fetch with timeout + error detail
|
// Helper request wrapper
|
||||||
// -----------------------------------------------------
|
// -----------------------------------------------------
|
||||||
async function fetchJSON(url, method = "POST", body = null, timeoutMs = 20000) {
|
async function postJSON(url, data) {
|
||||||
const controller = new AbortController();
|
const resp = await fetch(url, {
|
||||||
const timeout = setTimeout(() => controller.abort(), timeoutMs);
|
method: "POST",
|
||||||
|
headers: { "Content-Type": "application/json" },
|
||||||
|
body: JSON.stringify(data),
|
||||||
|
});
|
||||||
|
|
||||||
|
const raw = await resp.text();
|
||||||
|
let json;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const resp = await fetch(url, {
|
json = raw ? JSON.parse(raw) : null;
|
||||||
method,
|
} catch (e) {
|
||||||
headers: { "Content-Type": "application/json" },
|
throw new Error(`Non-JSON from ${url}: ${raw}`);
|
||||||
body: body ? JSON.stringify(body) : null,
|
|
||||||
signal: controller.signal,
|
|
||||||
});
|
|
||||||
|
|
||||||
const text = await resp.text();
|
|
||||||
const parsed = text ? JSON.parse(text) : null;
|
|
||||||
|
|
||||||
if (!resp.ok) {
|
|
||||||
throw new Error(
|
|
||||||
parsed?.detail || parsed?.error || parsed?.message || text || resp.statusText
|
|
||||||
);
|
|
||||||
}
|
|
||||||
return parsed;
|
|
||||||
} finally {
|
|
||||||
clearTimeout(timeout);
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// -----------------------------------------------------
|
if (!resp.ok) {
|
||||||
// Helper: append session turn
|
throw new Error(json?.detail || json?.error || raw);
|
||||||
// -----------------------------------------------------
|
}
|
||||||
async function appendSessionExchange(sessionId, entry) {
|
|
||||||
const file = path.join(sessionsDir, `${sessionId}.jsonl`);
|
|
||||||
const line = JSON.stringify({
|
|
||||||
ts: new Date().toISOString(),
|
|
||||||
user: entry.user,
|
|
||||||
assistant: entry.assistant,
|
|
||||||
raw: entry.raw,
|
|
||||||
}) + "\n";
|
|
||||||
|
|
||||||
fs.appendFileSync(file, line, "utf8");
|
return json;
|
||||||
}
|
}
|
||||||
|
|
||||||
// -----------------------------------------------------
|
// -----------------------------------------------------
|
||||||
// HEALTHCHECK
|
// HEALTHCHECK
|
||||||
// -----------------------------------------------------
|
// -----------------------------------------------------
|
||||||
app.get("/_health", (_, res) => {
|
app.get("/_health", (_, res) => {
|
||||||
res.json({ ok: true, time: new Date().toISOString() });
|
res.json({ ok: true });
|
||||||
});
|
});
|
||||||
|
|
||||||
// -----------------------------------------------------
|
// -----------------------------------------------------
|
||||||
// MAIN ENDPOINT
|
// MAIN ENDPOINT (new canonical)
|
||||||
// -----------------------------------------------------
|
// -----------------------------------------------------
|
||||||
app.post("/v1/chat/completions", async (req, res) => {
|
app.post("/chat", async (req, res) => {
|
||||||
try {
|
try {
|
||||||
const { messages, model } = req.body;
|
const session_id = req.body.session_id || "default";
|
||||||
|
const user_msg = req.body.message || "";
|
||||||
|
|
||||||
if (!messages?.length) {
|
console.log(`Relay → received: "${user_msg}"`);
|
||||||
return res.status(400).json({ error: "invalid_messages" });
|
|
||||||
}
|
|
||||||
|
|
||||||
const userMsg = messages[messages.length - 1]?.content || "";
|
// 1. → Cortex.reason
|
||||||
console.log(`🛰️ Relay received message → "${userMsg}"`);
|
let reason;
|
||||||
|
|
||||||
// -------------------------------------------------
|
|
||||||
// Step 1: Ask Cortex to process the prompt
|
|
||||||
// -------------------------------------------------
|
|
||||||
let cortexResp;
|
|
||||||
try {
|
try {
|
||||||
cortexResp = await fetchJSON(`${CORTEX_API}/reason`, "POST", {
|
reason = await postJSON(CORTEX_REASON, {
|
||||||
session_id: "default",
|
session_id,
|
||||||
user_prompt: userMsg,
|
user_prompt: user_msg
|
||||||
});
|
|
||||||
} catch (err) {
|
|
||||||
console.error("💥 Relay → Cortex error:", err.message);
|
|
||||||
return res.status(500).json({
|
|
||||||
error: "cortex_failed",
|
|
||||||
detail: err.message,
|
|
||||||
});
|
});
|
||||||
|
} catch (e) {
|
||||||
|
console.error("Relay → Cortex.reason error:", e.message);
|
||||||
|
return res.status(500).json({ error: "cortex_reason_failed", detail: e.message });
|
||||||
}
|
}
|
||||||
|
|
||||||
const personaText = cortexResp.persona || "(no persona text returned)";
|
const persona = reason.final_output || reason.persona || "(no persona text)";
|
||||||
|
|
||||||
// -------------------------------------------------
|
// 2. → Cortex.ingest
|
||||||
// Step 2: Forward to Cortex ingest (fire-and-forget)
|
postJSON(CORTEX_INGEST, {
|
||||||
// -------------------------------------------------
|
session_id,
|
||||||
try {
|
user_msg,
|
||||||
await fetchJSON(CORTEX_INGEST, "POST", cortexResp);
|
assistant_msg: persona
|
||||||
} catch (err) {
|
}).catch(e => console.warn("Relay → Cortex.ingest failed:", e.message));
|
||||||
console.warn("⚠️ Cortex ingest failed:", err.message);
|
|
||||||
}
|
|
||||||
|
|
||||||
// -------------------------------------------------
|
// 3. → Intake summary
|
||||||
// Step 3: Local session logging
|
postJSON(INTAKE_URL, {
|
||||||
// -------------------------------------------------
|
session_id,
|
||||||
try {
|
user_msg,
|
||||||
await appendSessionExchange("default", {
|
assistant_msg: persona
|
||||||
user: userMsg,
|
}).catch(e => console.warn("Relay → Intake failed:", e.message));
|
||||||
assistant: personaText,
|
|
||||||
raw: cortexResp,
|
|
||||||
});
|
|
||||||
} catch (err) {
|
|
||||||
console.warn("⚠️ Relay log write failed:", err.message);
|
|
||||||
}
|
|
||||||
|
|
||||||
// -------------------------------------------------
|
// 4. → Return to UI
|
||||||
// Step 4: Return OpenAI-style response to UI
|
|
||||||
// -------------------------------------------------
|
|
||||||
return res.json({
|
return res.json({
|
||||||
id: "relay-" + Date.now(),
|
session_id,
|
||||||
object: "chat.completion",
|
reply: persona
|
||||||
model: model || "lyra",
|
|
||||||
choices: [
|
|
||||||
{
|
|
||||||
index: 0,
|
|
||||||
message: {
|
|
||||||
role: "assistant",
|
|
||||||
content: personaText,
|
|
||||||
},
|
|
||||||
finish_reason: "stop",
|
|
||||||
},
|
|
||||||
],
|
|
||||||
});
|
});
|
||||||
|
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
console.error("💥 relay fatal error", err);
|
console.error("Relay fatal:", err);
|
||||||
res.status(500).json({
|
res.status(500).json({
|
||||||
error: "relay_failed",
|
error: "relay_failed",
|
||||||
detail: err?.message || String(err),
|
detail: err.message || String(err)
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
// -----------------------------------------------------
|
// -----------------------------------------------------
|
||||||
app.listen(PORT, () => {
|
app.listen(PORT, () => {
|
||||||
console.log(`Relay is online at port ${PORT}`);
|
console.log(`Relay is online on port ${PORT}`);
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -1,102 +1,114 @@
|
|||||||
|
# llm_router.py
|
||||||
import os
|
import os
|
||||||
import requests
|
import requests
|
||||||
|
import json
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ------------------------------------------------------------
|
||||||
# Load backend definition from .env
|
# Load backend registry from root .env
|
||||||
# ---------------------------------------------
|
# ------------------------------------------------------------
|
||||||
|
|
||||||
def load_backend_config(name: str):
|
BACKENDS = {
|
||||||
"""
|
"PRIMARY": {
|
||||||
Given a backend name like 'PRIMARY' or 'OPENAI',
|
"provider": os.getenv("LLM_PRIMARY_PROVIDER", "").lower(),
|
||||||
load the matching provider / url / model from env.
|
"url": os.getenv("LLM_PRIMARY_URL", ""),
|
||||||
"""
|
"model": os.getenv("LLM_PRIMARY_MODEL", "")
|
||||||
|
},
|
||||||
|
"SECONDARY": {
|
||||||
|
"provider": os.getenv("LLM_SECONDARY_PROVIDER", "").lower(),
|
||||||
|
"url": os.getenv("LLM_SECONDARY_URL", ""),
|
||||||
|
"model": os.getenv("LLM_SECONDARY_MODEL", "")
|
||||||
|
},
|
||||||
|
"OPENAI": {
|
||||||
|
"provider": os.getenv("LLM_OPENAI_PROVIDER", "").lower(),
|
||||||
|
"url": os.getenv("LLM_OPENAI_URL", ""),
|
||||||
|
"model": os.getenv("LLM_OPENAI_MODEL", ""),
|
||||||
|
"api_key": os.getenv("OPENAI_API_KEY", "")
|
||||||
|
},
|
||||||
|
"FALLBACK": {
|
||||||
|
"provider": os.getenv("LLM_FALLBACK_PROVIDER", "").lower(),
|
||||||
|
"url": os.getenv("LLM_FALLBACK_URL", ""),
|
||||||
|
"model": os.getenv("LLM_FALLBACK_MODEL", "")
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
prefix = f"LLM_{name.upper()}"
|
DEFAULT_BACKEND = "PRIMARY"
|
||||||
|
|
||||||
provider = os.getenv(f"{prefix}_PROVIDER")
|
|
||||||
url = os.getenv(f"{prefix}_URL")
|
|
||||||
model = os.getenv(f"{prefix}_MODEL")
|
|
||||||
|
|
||||||
if not provider or not url or not model:
|
|
||||||
raise RuntimeError(
|
|
||||||
f"Backend '{name}' is missing configuration. "
|
|
||||||
f"Expected {prefix}_PROVIDER / URL / MODEL in .env"
|
|
||||||
)
|
|
||||||
|
|
||||||
return provider, url.rstrip("/"), model
|
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ------------------------------------------------------------
|
||||||
# Core call_llm() — fail hard, no fallback
|
# Public call
|
||||||
# ---------------------------------------------
|
# ------------------------------------------------------------
|
||||||
|
async def call_llm(
|
||||||
|
prompt: str,
|
||||||
|
backend: str | None = None,
|
||||||
|
temperature: float = 0.7,
|
||||||
|
max_tokens: int = 512,
|
||||||
|
):
|
||||||
|
backend = (backend or DEFAULT_BACKEND).upper()
|
||||||
|
|
||||||
def call_llm(prompt: str, backend_env_var: str):
|
if backend not in BACKENDS:
|
||||||
"""
|
raise RuntimeError(f"Unknown backend '{backend}'")
|
||||||
Example:
|
|
||||||
call_llm(prompt, backend_env_var="CORTEX_LLM")
|
|
||||||
|
|
||||||
backend_env_var should contain one of:
|
cfg = BACKENDS[backend]
|
||||||
PRIMARY, SECONDARY, OPENAI, FALLBACK, etc
|
provider = cfg["provider"]
|
||||||
"""
|
url = cfg["url"]
|
||||||
|
model = cfg["model"]
|
||||||
|
|
||||||
backend_name = os.getenv(backend_env_var)
|
if not url or not model:
|
||||||
if not backend_name:
|
raise RuntimeError(f"Backend '{backend}' missing url/model in env")
|
||||||
raise RuntimeError(f"{backend_env_var} is not set in .env")
|
|
||||||
|
|
||||||
provider, base_url, model = load_backend_config(backend_name)
|
|
||||||
|
|
||||||
# ---------------------------------------------
|
|
||||||
# Provider-specific behavior
|
|
||||||
# ---------------------------------------------
|
|
||||||
|
|
||||||
|
# -------------------------------
|
||||||
|
# Provider: VLLM (your MI50)
|
||||||
|
# -------------------------------
|
||||||
if provider == "vllm":
|
if provider == "vllm":
|
||||||
# vLLM OpenAI-compatible API
|
payload = {
|
||||||
response = requests.post(
|
"model": model,
|
||||||
f"{base_url}/v1/completions",
|
"prompt": prompt,
|
||||||
json={
|
"max_tokens": max_tokens,
|
||||||
"model": model,
|
"temperature": temperature
|
||||||
"prompt": prompt,
|
}
|
||||||
"max_tokens": 1024,
|
r = requests.post(url, json=payload, timeout=120)
|
||||||
"temperature": float(os.getenv("LLM_TEMPERATURE", "0.7"))
|
data = r.json()
|
||||||
},
|
|
||||||
timeout=30
|
|
||||||
)
|
|
||||||
response.raise_for_status()
|
|
||||||
data = response.json()
|
|
||||||
return data["choices"][0]["text"]
|
return data["choices"][0]["text"]
|
||||||
|
|
||||||
elif provider == "ollama":
|
# -------------------------------
|
||||||
response = requests.post(
|
# Provider: OLLAMA (your 3090)
|
||||||
f"{base_url}/api/chat",
|
# -------------------------------
|
||||||
json={
|
if provider == "ollama":
|
||||||
"model": model,
|
payload = {
|
||||||
"messages": [{"role": "user", "content": prompt}],
|
"model": model,
|
||||||
"stream": False
|
"messages": [
|
||||||
},
|
{"role": "user", "content": prompt}
|
||||||
timeout=30
|
],
|
||||||
)
|
"stream": False # <-- critical fix
|
||||||
response.raise_for_status()
|
}
|
||||||
data = response.json()
|
|
||||||
|
r = requests.post(f"{url}/api/chat", json=payload, timeout=120)
|
||||||
|
data = r.json()
|
||||||
|
|
||||||
return data["message"]["content"]
|
return data["message"]["content"]
|
||||||
|
|
||||||
elif provider == "openai":
|
|
||||||
api_key = os.getenv("OPENAI_API_KEY")
|
|
||||||
if not api_key:
|
|
||||||
raise RuntimeError("OPENAI_API_KEY missing but provider=openai was selected")
|
|
||||||
|
|
||||||
response = requests.post(
|
# -------------------------------
|
||||||
f"{base_url}/chat/completions",
|
# Provider: OPENAI
|
||||||
headers={"Authorization": f"Bearer {api_key}"},
|
# -------------------------------
|
||||||
json={
|
if provider == "openai":
|
||||||
"model": model,
|
headers = {
|
||||||
"messages": [{"role": "user", "content": prompt}],
|
"Authorization": f"Bearer {cfg['api_key']}",
|
||||||
"temperature": float(os.getenv("LLM_TEMPERATURE", "0.7"))
|
"Content-Type": "application/json"
|
||||||
},
|
}
|
||||||
timeout=30
|
payload = {
|
||||||
)
|
"model": model,
|
||||||
response.raise_for_status()
|
"messages": [
|
||||||
data = response.json()
|
{"role": "user", "content": prompt}
|
||||||
|
],
|
||||||
|
"temperature": temperature,
|
||||||
|
"max_tokens": max_tokens,
|
||||||
|
}
|
||||||
|
r = requests.post(f"{url}/chat/completions", json=payload, headers=headers, timeout=120)
|
||||||
|
data = r.json()
|
||||||
return data["choices"][0]["message"]["content"]
|
return data["choices"][0]["message"]["content"]
|
||||||
|
|
||||||
else:
|
# -------------------------------
|
||||||
raise RuntimeError(f"Unknown LLM provider: {provider}")
|
# Unknown provider
|
||||||
|
# -------------------------------
|
||||||
|
raise RuntimeError(f"Provider '{provider}' not implemented.")
|
||||||
|
|||||||
@@ -8,22 +8,22 @@ from llm.llm_router import call_llm
|
|||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
# ============================================================
|
# ===============================================
|
||||||
# Config
|
# Configuration
|
||||||
# ============================================================
|
# ===============================================
|
||||||
|
|
||||||
REFINER_TEMPERATURE = float(os.getenv("REFINER_TEMPERATURE", "0.3"))
|
REFINER_TEMPERATURE = float(os.getenv("REFINER_TEMPERATURE", "0.3"))
|
||||||
REFINER_MAX_TOKENS = int(os.getenv("REFINER_MAX_TOKENS", "768"))
|
REFINER_MAX_TOKENS = int(os.getenv("REFINER_MAX_TOKENS", "768"))
|
||||||
REFINER_DEBUG = os.getenv("REFINER_DEBUG", "false").lower() == "true"
|
REFINER_DEBUG = os.getenv("REFINER_DEBUG", "false").lower() == "true"
|
||||||
|
|
||||||
# Module-level backend selection
|
# These come from root .env
|
||||||
REFINE_LLM = os.getenv("REFINE_LLM", "PRIMARY").upper()
|
REFINE_LLM = os.getenv("REFINE_LLM", "").upper()
|
||||||
CORTEX_LLM = os.getenv("CORTEX_LLM", "PRIMARY").upper()
|
CORTEX_LLM = os.getenv("CORTEX_LLM", "PRIMARY").upper()
|
||||||
|
|
||||||
|
|
||||||
# ============================================================
|
# ===============================================
|
||||||
# Prompt builder
|
# Prompt builder
|
||||||
# ============================================================
|
# ===============================================
|
||||||
|
|
||||||
def build_refine_prompt(
|
def build_refine_prompt(
|
||||||
draft_output: str,
|
draft_output: str,
|
||||||
@@ -32,15 +32,10 @@ def build_refine_prompt(
|
|||||||
rag_block: Optional[str],
|
rag_block: Optional[str],
|
||||||
) -> str:
|
) -> str:
|
||||||
|
|
||||||
if reflection_notes is None:
|
try:
|
||||||
reflection_text = "(none)"
|
reflection_text = json.dumps(reflection_notes, ensure_ascii=False)
|
||||||
elif isinstance(reflection_notes, str):
|
except Exception:
|
||||||
reflection_text = reflection_notes
|
reflection_text = str(reflection_notes)
|
||||||
else:
|
|
||||||
try:
|
|
||||||
reflection_text = json.dumps(reflection_notes, ensure_ascii=False)
|
|
||||||
except Exception:
|
|
||||||
reflection_text = str(reflection_notes)
|
|
||||||
|
|
||||||
identity_text = identity_block or "(none)"
|
identity_text = identity_block or "(none)"
|
||||||
rag_text = rag_block or "(none)"
|
rag_text = rag_block or "(none)"
|
||||||
@@ -49,12 +44,13 @@ def build_refine_prompt(
|
|||||||
You are Lyra Cortex's internal refiner.
|
You are Lyra Cortex's internal refiner.
|
||||||
|
|
||||||
Your job:
|
Your job:
|
||||||
- Fix factual errors, logical gaps, or missing info.
|
- Fix factual issues.
|
||||||
- Use reflection notes for corrections.
|
- Improve clarity.
|
||||||
- Use RAG context as factual grounding.
|
- Apply reflection notes when helpful.
|
||||||
- Respect the identity block without adding style or personality.
|
- Respect identity constraints.
|
||||||
|
- Apply RAG context as truth source.
|
||||||
|
|
||||||
Never mention RAG, reflection, or internal logic.
|
Do NOT mention RAG, reflection, internal logic, or this refinement step.
|
||||||
|
|
||||||
------------------------------
|
------------------------------
|
||||||
[IDENTITY BLOCK]
|
[IDENTITY BLOCK]
|
||||||
@@ -74,14 +70,14 @@ Never mention RAG, reflection, or internal logic.
|
|||||||
|
|
||||||
------------------------------
|
------------------------------
|
||||||
Task:
|
Task:
|
||||||
Rewrite the DRAFT ANSWER into a single, final answer.
|
Rewrite the DRAFT into a single final answer for the user.
|
||||||
Return ONLY the final answer text.
|
Return ONLY the final answer text.
|
||||||
""".strip()
|
""".strip()
|
||||||
|
|
||||||
|
|
||||||
# ============================================================
|
# ===============================================
|
||||||
# Public API: async, using llm_router
|
# Public API — now async & fully router-based
|
||||||
# ============================================================
|
# ===============================================
|
||||||
|
|
||||||
async def refine_answer(
|
async def refine_answer(
|
||||||
draft_output: str,
|
draft_output: str,
|
||||||
@@ -104,7 +100,7 @@ async def refine_answer(
|
|||||||
rag_block,
|
rag_block,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Refinement backend → fallback to Cortex backend → fallback to PRIMARY
|
# backend priority: REFINE_LLM → CORTEX_LLM → PRIMARY
|
||||||
backend = REFINE_LLM or CORTEX_LLM or "PRIMARY"
|
backend = REFINE_LLM or CORTEX_LLM or "PRIMARY"
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
# router.py
|
# router.py
|
||||||
|
|
||||||
|
from unittest import result
|
||||||
from fastapi import APIRouter, HTTPException
|
from fastapi import APIRouter, HTTPException
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
@@ -55,7 +56,7 @@ async def run_reason(req: ReasonRequest):
|
|||||||
)
|
)
|
||||||
|
|
||||||
# 4. Refinement
|
# 4. Refinement
|
||||||
result = refine_answer(
|
result = await refine_answer(
|
||||||
draft_output=draft,
|
draft_output=draft,
|
||||||
reflection_notes=reflection_notes,
|
reflection_notes=reflection_notes,
|
||||||
identity_block=None,
|
identity_block=None,
|
||||||
@@ -63,6 +64,7 @@ async def run_reason(req: ReasonRequest):
|
|||||||
)
|
)
|
||||||
final_neutral = result["final_output"]
|
final_neutral = result["final_output"]
|
||||||
|
|
||||||
|
|
||||||
# 5. Persona layer
|
# 5. Persona layer
|
||||||
persona_answer = await speak(final_neutral)
|
persona_answer = await speak(final_neutral)
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user