mode selection, settings added to ui

This commit is contained in:
serversdwn
2025-12-21 14:30:32 -05:00
parent ceb60119fb
commit 01d4811717
6 changed files with 359 additions and 37 deletions

View File

@@ -4,8 +4,8 @@
"focus": "conversation",
"confidence": 0.7,
"curiosity": 1.0,
"last_updated": "2025-12-20T09:08:41.342756",
"interaction_count": 25,
"last_updated": "2025-12-21T18:50:41.582043",
"interaction_count": 26,
"learning_queue": [],
"active_goals": [],
"preferences": {

View File

@@ -141,11 +141,16 @@ async def call_llm(
"Authorization": f"Bearer {cfg['api_key']}",
"Content-Type": "application/json"
}
# Use messages array if provided, otherwise convert prompt to single user message
if messages:
chat_messages = messages
else:
chat_messages = [{"role": "user", "content": prompt}]
payload = {
"model": model,
"messages": [
{"role": "user", "content": prompt}
],
"messages": chat_messages,
"temperature": temperature,
"max_tokens": max_tokens,
}

View File

@@ -44,6 +44,7 @@ class ReasonRequest(BaseModel):
session_id: str
user_prompt: str
temperature: float | None = None
backend: str | None = None
# -------------------------------------------------------------------
@@ -388,8 +389,11 @@ async def run_simple(req: ReasonRequest):
logger.info(f"📨 Total messages being sent to LLM: {len(messages)} (including system message)")
# Get backend from env (default to OPENAI for standard mode)
backend = os.getenv("STANDARD_MODE_LLM", "OPENAI")
# Get backend from request, otherwise fall back to env variable
backend = req.backend if req.backend else os.getenv("STANDARD_MODE_LLM", "SECONDARY")
backend = backend.upper() # Normalize to uppercase
logger.info(f"🔧 Using backend: {backend}")
temperature = req.temperature if req.temperature is not None else 0.7
# Direct LLM call with messages (works for Ollama/OpenAI chat APIs)