env cleanup round 2
This commit is contained in:
87
.env.example
Normal file
87
.env.example
Normal file
@@ -0,0 +1,87 @@
|
||||
# ====================================
|
||||
# 🌌 GLOBAL LYRA CONFIG
|
||||
# ====================================
|
||||
LOCAL_TZ_LABEL=America/New_York
|
||||
DEFAULT_SESSION_ID=default
|
||||
|
||||
|
||||
# ====================================
|
||||
# 🤖 LLM BACKEND OPTIONS
|
||||
# ====================================
|
||||
# Services choose which backend to use from these options
|
||||
# Primary: vLLM on MI50 GPU
|
||||
LLM_PRIMARY_PROVIDER=vllm
|
||||
LLM_PRIMARY_URL=http://10.0.0.43:8000
|
||||
LLM_PRIMARY_MODEL=/model
|
||||
|
||||
# Secondary: Ollama on 3090 GPU
|
||||
LLM_SECONDARY_PROVIDER=ollama
|
||||
LLM_SECONDARY_URL=http://10.0.0.3:11434
|
||||
LLM_SECONDARY_MODEL=qwen2.5:7b-instruct-q4_K_M
|
||||
|
||||
# Cloud: OpenAI
|
||||
LLM_CLOUD_PROVIDER=openai_chat
|
||||
LLM_CLOUD_URL=https://api.openai.com/v1
|
||||
LLM_CLOUD_MODEL=gpt-4o-mini
|
||||
OPENAI_API_KEY=sk-proj-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
|
||||
|
||||
# Local Fallback: llama.cpp or LM Studio
|
||||
LLM_FALLBACK_PROVIDER=openai_completions
|
||||
LLM_FALLBACK_URL=http://10.0.0.41:11435
|
||||
LLM_FALLBACK_MODEL=llama-3.2-8b-instruct
|
||||
|
||||
# Global LLM controls
|
||||
LLM_TEMPERATURE=0.7
|
||||
|
||||
|
||||
# ====================================
|
||||
# 🗄️ DATABASE CONFIGURATION
|
||||
# ====================================
|
||||
# Postgres (pgvector for NeoMem)
|
||||
POSTGRES_USER=neomem
|
||||
POSTGRES_PASSWORD=change_me_in_production
|
||||
POSTGRES_DB=neomem
|
||||
POSTGRES_HOST=neomem-postgres
|
||||
POSTGRES_PORT=5432
|
||||
|
||||
# Neo4j Graph Database
|
||||
NEO4J_URI=bolt://neomem-neo4j:7687
|
||||
NEO4J_USERNAME=neo4j
|
||||
NEO4J_PASSWORD=change_me_in_production
|
||||
NEO4J_AUTH=neo4j/change_me_in_production
|
||||
|
||||
|
||||
# ====================================
|
||||
# 🧠 MEMORY SERVICES (NEOMEM)
|
||||
# ====================================
|
||||
NEOMEM_API=http://neomem-api:7077
|
||||
NEOMEM_API_KEY=generate_secure_random_token_here
|
||||
NEOMEM_HISTORY_DB=postgresql://neomem:change_me_in_production@neomem-postgres:5432/neomem
|
||||
|
||||
# Embeddings configuration (used by NeoMem)
|
||||
EMBEDDER_PROVIDER=openai
|
||||
EMBEDDER_MODEL=text-embedding-3-small
|
||||
|
||||
|
||||
# ====================================
|
||||
# 🔌 INTERNAL SERVICE URLS
|
||||
# ====================================
|
||||
# Using container names for Docker network communication
|
||||
INTAKE_API_URL=http://intake:7080
|
||||
CORTEX_API=http://cortex:7081
|
||||
CORTEX_URL=http://cortex:7081/reflect
|
||||
CORTEX_URL_INGEST=http://cortex:7081/ingest
|
||||
RAG_API_URL=http://rag:7090
|
||||
RELAY_URL=http://relay:7078
|
||||
|
||||
# Persona service (optional)
|
||||
PERSONA_URL=http://persona-sidecar:7080/current
|
||||
|
||||
|
||||
# ====================================
|
||||
# 🔧 FEATURE FLAGS
|
||||
# ====================================
|
||||
CORTEX_ENABLED=true
|
||||
MEMORY_ENABLED=true
|
||||
PERSONA_ENABLED=false
|
||||
DEBUG_PROMPT=true
|
||||
Reference in New Issue
Block a user