133 lines
6.3 KiB
Plaintext
133 lines
6.3 KiB
Plaintext
# ============================================================================
|
|
# CORTEX LOGGING CONFIGURATION
|
|
# ============================================================================
|
|
# This file contains all logging-related environment variables for the
|
|
# Cortex reasoning pipeline. Copy this to your .env file and adjust as needed.
|
|
#
|
|
# Log Detail Levels:
|
|
# minimal - Only errors and critical events
|
|
# summary - Stage completion + errors (DEFAULT - RECOMMENDED FOR PRODUCTION)
|
|
# detailed - Include raw LLM outputs, RAG results, timing breakdowns
|
|
# verbose - Everything including intermediate states, full JSON dumps
|
|
#
|
|
# Quick Start:
|
|
# - For debugging weak links: LOG_DETAIL_LEVEL=detailed
|
|
# - For finding performance bottlenecks: LOG_DETAIL_LEVEL=detailed + VERBOSE_DEBUG=true
|
|
# - For production: LOG_DETAIL_LEVEL=summary
|
|
# - For silent mode: LOG_DETAIL_LEVEL=minimal
|
|
# ============================================================================
|
|
|
|
# -----------------------------
|
|
# Primary Logging Level
|
|
# -----------------------------
|
|
# Controls overall verbosity across all components
|
|
LOG_DETAIL_LEVEL=detailed
|
|
|
|
# Legacy verbose debug flag (kept for compatibility)
|
|
# When true, enables maximum logging including raw data dumps
|
|
VERBOSE_DEBUG=false
|
|
|
|
# -----------------------------
|
|
# LLM Logging
|
|
# -----------------------------
|
|
# Enable raw LLM response logging (only works with detailed/verbose levels)
|
|
# Shows full JSON responses from each LLM backend call
|
|
# Set to "true" to see exact LLM outputs for debugging weak links
|
|
LOG_RAW_LLM_RESPONSES=true
|
|
|
|
# -----------------------------
|
|
# Context Logging
|
|
# -----------------------------
|
|
# Show full raw intake data (L1-L30 summaries) in logs
|
|
# WARNING: Very verbose, use only for deep debugging
|
|
LOG_RAW_CONTEXT_DATA=false
|
|
|
|
# -----------------------------
|
|
# Loop Detection & Protection
|
|
# -----------------------------
|
|
# Enable duplicate message detection to prevent processing loops
|
|
ENABLE_DUPLICATE_DETECTION=true
|
|
|
|
# Maximum number of messages to keep in session history (prevents unbounded growth)
|
|
# Older messages are trimmed automatically
|
|
MAX_MESSAGE_HISTORY=100
|
|
|
|
# Session TTL in hours - sessions inactive longer than this are auto-expired
|
|
SESSION_TTL_HOURS=24
|
|
|
|
# -----------------------------
|
|
# NeoMem / RAG Logging
|
|
# -----------------------------
|
|
# Relevance score threshold for NeoMem results
|
|
RELEVANCE_THRESHOLD=0.4
|
|
|
|
# Enable NeoMem long-term memory retrieval
|
|
NEOMEM_ENABLED=false
|
|
|
|
# -----------------------------
|
|
# Autonomous Features
|
|
# -----------------------------
|
|
# Enable autonomous tool invocation (RAG, WEB, WEATHER, CODEBRAIN)
|
|
ENABLE_AUTONOMOUS_TOOLS=true
|
|
|
|
# Confidence threshold for autonomous tool invocation (0.0 - 1.0)
|
|
AUTONOMOUS_TOOL_CONFIDENCE_THRESHOLD=0.6
|
|
|
|
# Enable proactive monitoring and suggestions
|
|
ENABLE_PROACTIVE_MONITORING=true
|
|
|
|
# Minimum priority for proactive suggestions to be included (0.0 - 1.0)
|
|
PROACTIVE_SUGGESTION_MIN_PRIORITY=0.6
|
|
|
|
# ============================================================================
|
|
# EXAMPLE LOGGING OUTPUT AT DIFFERENT LEVELS
|
|
# ============================================================================
|
|
#
|
|
# LOG_DETAIL_LEVEL=summary (RECOMMENDED):
|
|
# ────────────────────────────────────────────────────────────────────────────
|
|
# ✅ [LLM] PRIMARY | 14:23:45.123 | Reply: Based on your question about...
|
|
# 📊 Context | Session: abc123 | Messages: 42 | Last: 5.2min | RAG: 3 results
|
|
# 🧠 Monologue | question | Tone: curious
|
|
# ✨ PIPELINE COMPLETE | Session: abc123 | Total: 1250ms
|
|
# 📤 Output: 342 characters
|
|
# ────────────────────────────────────────────────────────────────────────────
|
|
#
|
|
# LOG_DETAIL_LEVEL=detailed (FOR DEBUGGING):
|
|
# ────────────────────────────────────────────────────────────────────────────
|
|
# 🚀 PIPELINE START | Session: abc123 | 14:23:45.123
|
|
# 📝 User: What is the meaning of life?
|
|
# ────────────────────────────────────────────────────────────────────────────
|
|
# 🧠 LLM CALL | Backend: PRIMARY | 14:23:45.234
|
|
# ────────────────────────────────────────────────────────────────────────────
|
|
# 📝 Prompt: You are Lyra, a thoughtful AI assistant...
|
|
# 💬 Reply: Based on philosophical perspectives, the meaning...
|
|
# ╭─ RAW RESPONSE ────────────────────────────────────────────────────────────
|
|
# │ {
|
|
# │ "choices": [
|
|
# │ {
|
|
# │ "message": {
|
|
# │ "content": "Based on philosophical perspectives..."
|
|
# │ }
|
|
# │ }
|
|
# │ ]
|
|
# │ }
|
|
# ╰───────────────────────────────────────────────────────────────────────────
|
|
#
|
|
# ✨ PIPELINE COMPLETE | Session: abc123 | Total: 1250ms
|
|
# ⏱️ Stage Timings:
|
|
# context : 150ms ( 12.0%)
|
|
# identity : 10ms ( 0.8%)
|
|
# monologue : 200ms ( 16.0%)
|
|
# reasoning : 450ms ( 36.0%)
|
|
# refinement : 300ms ( 24.0%)
|
|
# persona : 140ms ( 11.2%)
|
|
# ────────────────────────────────────────────────────────────────────────────
|
|
#
|
|
# LOG_DETAIL_LEVEL=verbose (MAXIMUM DEBUG):
|
|
# Same as detailed but includes:
|
|
# - Full 50+ line raw JSON dumps
|
|
# - Complete intake data structures
|
|
# - All intermediate processing states
|
|
# - Detailed traceback on errors
|
|
# ============================================================================
|