simple context added to standard mode
This commit is contained in:
@@ -351,17 +351,34 @@ async def run_simple(req: ReasonRequest):
|
||||
logger.info(f"📝 User: {req.user_prompt[:150]}...")
|
||||
logger.info(f"{'-'*100}\n")
|
||||
|
||||
# Get conversation history from context
|
||||
# Get conversation history from context and intake buffer
|
||||
context_state = await collect_context(req.session_id, req.user_prompt)
|
||||
|
||||
# Build simple conversation history
|
||||
messages = []
|
||||
if context_state.get("recent_messages"):
|
||||
for msg in context_state["recent_messages"]:
|
||||
# Get recent messages from Intake buffer
|
||||
from intake.intake import get_recent_messages
|
||||
recent_msgs = get_recent_messages(req.session_id, limit=20)
|
||||
logger.info(f"📋 Retrieved {len(recent_msgs)} recent messages from Intake buffer")
|
||||
|
||||
# Build simple conversation history with system message
|
||||
system_message = {
|
||||
"role": "system",
|
||||
"content": (
|
||||
"You are a helpful AI assistant. Provide direct, concise responses to the user's questions. "
|
||||
"Maintain context from previous messages in the conversation."
|
||||
)
|
||||
}
|
||||
|
||||
messages = [system_message]
|
||||
|
||||
# Add conversation history
|
||||
|
||||
if recent_msgs:
|
||||
for msg in recent_msgs:
|
||||
messages.append({
|
||||
"role": msg.get("role", "user"),
|
||||
"content": msg.get("content", "")
|
||||
})
|
||||
logger.info(f" - {msg.get('role')}: {msg.get('content', '')[:50]}...")
|
||||
|
||||
# Add current user message
|
||||
messages.append({
|
||||
@@ -369,30 +386,24 @@ async def run_simple(req: ReasonRequest):
|
||||
"content": req.user_prompt
|
||||
})
|
||||
|
||||
# Format messages into a simple prompt for the LLM
|
||||
conversation = ""
|
||||
for msg in messages:
|
||||
role = msg["role"]
|
||||
content = msg["content"]
|
||||
if role == "user":
|
||||
conversation += f"User: {content}\n\n"
|
||||
elif role == "assistant":
|
||||
conversation += f"Assistant: {content}\n\n"
|
||||
|
||||
conversation += "Assistant: "
|
||||
logger.info(f"📨 Total messages being sent to LLM: {len(messages)} (including system message)")
|
||||
|
||||
# Get backend from env (default to OPENAI for standard mode)
|
||||
backend = os.getenv("STANDARD_MODE_LLM", "OPENAI")
|
||||
temperature = req.temperature if req.temperature is not None else 0.7
|
||||
|
||||
# Direct LLM call
|
||||
# Direct LLM call with messages (works for Ollama/OpenAI chat APIs)
|
||||
try:
|
||||
response = await call_llm(
|
||||
prompt=conversation,
|
||||
raw_response = await call_llm(
|
||||
messages=messages,
|
||||
backend=backend,
|
||||
temperature=temperature,
|
||||
max_tokens=2048
|
||||
)
|
||||
|
||||
# Clean response - just strip whitespace
|
||||
response = raw_response.strip()
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ LLM call failed: {e}")
|
||||
response = f"Error: {str(e)}"
|
||||
|
||||
Reference in New Issue
Block a user